update bcmdhd wifi driver to 1.201.59
[firefly-linux-kernel-4.4.55.git] / drivers / net / wireless / rockchip_wlan / rkwifi / bcmdhd / dhd_pcie.c
1 /*
2  * DHD Bus Module for PCIE
3  *
4  * $Copyright Open Broadcom Corporation$
5  *
6  * $Id: dhd_pcie.c 506043 2014-10-02 12:29:45Z $
7  */
8
9
10 /* include files */
11 #include <typedefs.h>
12 #include <bcmutils.h>
13 #include <bcmdevs.h>
14 #include <siutils.h>
15 #include <hndsoc.h>
16 #include <hndpmu.h>
17 #include <sbchipc.h>
18 #if defined(DHD_DEBUG)
19 #include <hnd_armtrap.h>
20 #include <hnd_cons.h>
21 #endif /* defined(DHD_DEBUG) */
22 #include <dngl_stats.h>
23 #include <pcie_core.h>
24 #include <dhd.h>
25 #include <dhd_bus.h>
26 #include <dhd_flowring.h>
27 #include <dhd_proto.h>
28 #include <dhd_dbg.h>
29 #include <dhdioctl.h>
30 #include <sdiovar.h>
31 #include <bcmmsgbuf.h>
32 #include <pcicfg.h>
33 #include <dhd_pcie.h>
34 #include <bcmpcie.h>
35 #include <bcmendian.h>
36 #ifdef DHDTCPACK_SUPPRESS
37 #include <dhd_ip.h>
38 #endif /* DHDTCPACK_SUPPRESS */
39 #include <dhd_config.h>
40
41 #ifdef BCMEMBEDIMAGE
42 #include BCMEMBEDIMAGE
43 #endif /* BCMEMBEDIMAGE */
44
45 #define MEMBLOCK        2048            /* Block size used for downloading of dongle image */
46 #define MAX_NVRAMBUF_SIZE       6144    /* max nvram buf size */
47
48 #define ARMCR4REG_BANKIDX       (0x40/sizeof(uint32))
49 #define ARMCR4REG_BANKPDA       (0x4C/sizeof(uint32))
50 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
51
52 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
53         extern unsigned int system_rev;
54 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
55
56 int dhd_dongle_memsize;
57 int dhd_dongle_ramsize;
58 #ifdef DHD_DEBUG
59 static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
60 static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
61 #endif
62 static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
63 static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
64         const char *name, void *params,
65         int plen, void *arg, int len, int val_size);
66 static int dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 intval);
67 static int dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus,
68         uint32 len, uint32 srcdelay, uint32 destdelay);
69 static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
70 static int _dhdpcie_download_firmware(struct dhd_bus *bus);
71 static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
72 static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
73 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
74 static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
75 static int dhdpcie_readshared(dhd_bus_t *bus);
76 static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
77 static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
78 static void dhdpcie_bus_intr_enable(dhd_bus_t *bus);
79 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
80 static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
81         bool dongle_isolation, bool reset_flag);
82 static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
83 static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
84 static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
85 static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
86 static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
87 static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
88 static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
89 static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
90 static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
91 static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset);
92 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
93 #ifdef CONFIG_ARCH_MSM8994
94 static void dhdpcie_bus_cfg_set_bar1_win(dhd_bus_t *bus, uint32 data);
95 static ulong dhd_bus_cmn_check_offset(dhd_bus_t *bus, ulong offset);
96 #endif
97 static void dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size);
98 static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
99 static void dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data);
100 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
101 extern void dhd_dpc_kill(dhd_pub_t *dhdp);
102
103 #ifdef BCMEMBEDIMAGE
104 static int dhdpcie_download_code_array(dhd_bus_t *bus);
105 #endif /* BCMEMBEDIMAGE */
106
107
108
109 #define     PCI_VENDOR_ID_BROADCOM          0x14e4
110
111 /* IOVar table */
112 enum {
113         IOV_INTR = 1,
114         IOV_MEMBYTES,
115         IOV_MEMSIZE,
116         IOV_SET_DOWNLOAD_STATE,
117         IOV_DEVRESET,
118         IOV_VARS,
119         IOV_MSI_SIM,
120         IOV_PCIE_LPBK,
121         IOV_CC_NVMSHADOW,
122         IOV_RAMSIZE,
123         IOV_RAMSTART,
124         IOV_SLEEP_ALLOWED,
125         IOV_PCIE_DMAXFER,
126         IOV_PCIE_SUSPEND,
127         IOV_PCIEREG,
128         IOV_PCIECFGREG,
129         IOV_PCIECOREREG,
130         IOV_PCIESERDESREG,
131         IOV_BAR0_SECWIN_REG,
132         IOV_SBREG,
133         IOV_DONGLEISOLATION,
134         IOV_LTRSLEEPON_UNLOOAD,
135         IOV_RX_METADATALEN,
136         IOV_TX_METADATALEN,
137         IOV_TXP_THRESHOLD,
138         IOV_BUZZZ_DUMP,
139         IOV_DUMP_RINGUPD_BLOCK,
140         IOV_DMA_RINGINDICES,
141         IOV_DB1_FOR_MB,
142         IOV_FLOW_PRIO_MAP,
143         IOV_RXBOUND,
144         IOV_TXBOUND
145 };
146
147
148 const bcm_iovar_t dhdpcie_iovars[] = {
149         {"intr",        IOV_INTR,       0,      IOVT_BOOL,      0 },
150         {"membytes",    IOV_MEMBYTES,   0,      IOVT_BUFFER,    2 * sizeof(int) },
151         {"memsize",     IOV_MEMSIZE,    0,      IOVT_UINT32,    0 },
152         {"dwnldstate",  IOV_SET_DOWNLOAD_STATE, 0,      IOVT_BOOL,      0 },
153         {"vars",        IOV_VARS,       0,      IOVT_BUFFER,    0 },
154         {"devreset",    IOV_DEVRESET,   0,      IOVT_BOOL,      0 },
155         {"pcie_lpbk",   IOV_PCIE_LPBK,  0,      IOVT_UINT32,    0 },
156         {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, IOVT_BUFFER, 0 },
157         {"ramsize",     IOV_RAMSIZE,    0,      IOVT_UINT32,    0 },
158         {"ramstart",    IOV_RAMSTART,   0,      IOVT_UINT32,    0 },
159         {"pciereg",     IOV_PCIEREG,    0,      IOVT_BUFFER,    2 * sizeof(int32) },
160         {"pciecfgreg",  IOV_PCIECFGREG, 0,      IOVT_BUFFER,    2 * sizeof(int32) },
161         {"pciecorereg", IOV_PCIECOREREG,        0,      IOVT_BUFFER,    2 * sizeof(int32) },
162         {"pcieserdesreg",       IOV_PCIESERDESREG,      0,      IOVT_BUFFER,    3 * sizeof(int32) },
163         {"bar0secwinreg",       IOV_BAR0_SECWIN_REG,    0,      IOVT_BUFFER,    2 * sizeof(int32) },
164         {"sbreg",       IOV_SBREG,      0,      IOVT_BUFFER,    sizeof(sdreg_t) },
165         {"pcie_dmaxfer",        IOV_PCIE_DMAXFER,       0,      IOVT_BUFFER,    3 * sizeof(int32) },
166         {"pcie_suspend", IOV_PCIE_SUSPEND,      0,      IOVT_UINT32,    0 },
167         {"sleep_allowed",       IOV_SLEEP_ALLOWED,      0,      IOVT_BOOL,      0 },
168         {"dngl_isolation", IOV_DONGLEISOLATION, 0,      IOVT_UINT32,    0 },
169         {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD,  0,      IOVT_UINT32,    0 },
170         {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK,     0,      IOVT_BUFFER,    0 },
171         {"dma_ring_indices", IOV_DMA_RINGINDICES,       0,      IOVT_UINT32,    0},
172         {"rx_metadata_len", IOV_RX_METADATALEN, 0,      IOVT_UINT32,    0 },
173         {"tx_metadata_len", IOV_TX_METADATALEN, 0,      IOVT_UINT32,    0 },
174         {"db1_for_mb", IOV_DB1_FOR_MB,  0,      IOVT_UINT32,    0 },
175         {"txp_thresh", IOV_TXP_THRESHOLD,       0,      IOVT_UINT32,    0 },
176         {"buzzz_dump", IOV_BUZZZ_DUMP,          0,      IOVT_UINT32,    0 },
177         {"flow_prio_map", IOV_FLOW_PRIO_MAP,    0,      IOVT_UINT32,    0 },
178         {"rxbound",     IOV_RXBOUND,    0,      IOVT_UINT32,    0 },
179         {"txbound",     IOV_TXBOUND,    0,      IOVT_UINT32,    0 },
180         {NULL, 0, 0, 0, 0 }
181 };
182
183 #define MAX_READ_TIMEOUT        5 * 1000 * 1000
184
185 #ifndef DHD_RXBOUND
186 #define DHD_RXBOUND             64
187 #endif
188 #ifndef DHD_TXBOUND
189 #define DHD_TXBOUND             64
190 #endif
191 uint dhd_rxbound = DHD_RXBOUND;
192 uint dhd_txbound = DHD_TXBOUND;
193
194 /* Register/Unregister functions are called by the main DHD entry
195  * point (e.g. module insertion) to link with the bus driver, in
196  * order to look for or await the device.
197  */
198
199 int
200 dhd_bus_register(void)
201 {
202         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
203
204         return dhdpcie_bus_register();
205 }
206
207 void
208 dhd_bus_unregister(void)
209 {
210         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
211
212         dhdpcie_bus_unregister();
213         return;
214 }
215
216
217 /** returns a host virtual address */
218 uint32 *
219 dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
220 {
221         return (uint32 *)REG_MAP(addr, size);
222 }
223
224 void
225 dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size)
226 {
227         REG_UNMAP((void*)(uintptr)addr);
228         return;
229 }
230
231 /**
232  * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
233  * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
234  * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
235  *
236  * 'tcm' is the *host* virtual address at which tcm is mapped.
237  */
238 dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, volatile char* tcm, uint32 tcm_size)
239 {
240         dhd_bus_t *bus;
241
242         DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
243
244         do {
245                 if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
246                         DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
247                         break;
248                 }
249                 bzero(bus, sizeof(dhd_bus_t));
250                 bus->regs = regs;
251                 bus->tcm = tcm;
252                 bus->tcm_size = tcm_size;
253                 bus->osh = osh;
254
255                 dll_init(&bus->const_flowring);
256
257                 /* Attach pcie shared structure */
258                 bus->pcie_sh = MALLOC(osh, sizeof(pciedev_shared_t));
259                 if (!bus->pcie_sh) {
260                         DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
261                         break;
262                 }
263
264                 /* dhd_common_init(osh); */
265                 if (dhdpcie_dongle_attach(bus)) {
266                         DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
267                         break;
268                 }
269
270                 /* software resources */
271                 if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
272                         DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
273
274                         break;
275                 }
276                 bus->dhd->busstate = DHD_BUS_DOWN;
277                 bus->db1_for_mb = TRUE;
278                 bus->dhd->hang_report  = TRUE;
279
280                 DHD_TRACE(("%s: EXIT SUCCESS\n",
281                         __FUNCTION__));
282
283                 return bus;
284         } while (0);
285
286         DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
287
288         if (bus && bus->pcie_sh)
289                 MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
290
291         if (bus)
292                 MFREE(osh, bus, sizeof(dhd_bus_t));
293
294         return NULL;
295 }
296
297 uint
298 dhd_bus_chip(struct dhd_bus *bus)
299 {
300         ASSERT(bus->sih != NULL);
301         return bus->sih->chip;
302 }
303
304 uint
305 dhd_bus_chiprev(struct dhd_bus *bus)
306 {
307         ASSERT(bus);
308         ASSERT(bus->sih != NULL);
309         return bus->sih->chiprev;
310 }
311
312 void *
313 dhd_bus_pub(struct dhd_bus *bus)
314 {
315         return bus->dhd;
316 }
317
318 void *
319 dhd_bus_sih(struct dhd_bus *bus)
320 {
321         return (void *)bus->sih;
322 }
323
324 void *
325 dhd_bus_txq(struct dhd_bus *bus)
326 {
327         return &bus->txq;
328 }
329
330 /* Get Chip ID version */
331 uint dhd_bus_chip_id(dhd_pub_t *dhdp)
332 {
333         dhd_bus_t *bus = dhdp->bus;
334         return  bus->sih->chip;
335 }
336
337 /* Get Chip Rev ID version */
338 uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
339 {
340         dhd_bus_t *bus = dhdp->bus;
341         return bus->sih->chiprev;
342 }
343
344 /* Get Chip Pkg ID version */
345 uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
346 {
347         dhd_bus_t *bus = dhdp->bus;
348         return bus->sih->chippkg;
349 }
350
351
352 /*
353
354 Name:  dhdpcie_bus_isr
355
356 Parametrs:
357
358 1: IN int irq   -- interrupt vector
359 2: IN void *arg      -- handle to private data structure
360
361 Return value:
362
363 Status (TRUE or FALSE)
364
365 Description:
366 Interrupt Service routine checks for the status register,
367 disable interrupt and queue DPC if mail box interrupts are raised.
368 */
369
370
371 int32
372 dhdpcie_bus_isr(dhd_bus_t *bus)
373 {
374
375         do {
376                         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
377                         /* verify argument */
378                         if (!bus) {
379                                 DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
380                                 break;
381                         }
382
383                         if (bus->dhd->busstate == DHD_BUS_DOWN) {
384                                 DHD_TRACE(("%s : bus is down. we have nothing to do\n",
385                                         __FUNCTION__));
386                                 break;
387                         }
388
389                         /*  Overall operation:
390                          *    - Mask further interrupts
391                          *    - Read/ack intstatus
392                          *    - Take action based on bits and state
393                          *    - Reenable interrupts (as per state)
394                          */
395
396                         /* Count the interrupt call */
397                         bus->intrcount++;
398
399                         /* read interrupt status register!! Status bits will be cleared in DPC !! */
400                         bus->ipend = TRUE;
401                         dhdpcie_bus_intr_disable(bus); /* Disable interrupt!! */
402                         bus->intdis = TRUE;
403
404 #if defined(PCIE_ISR_THREAD)
405
406                         DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
407                         DHD_OS_WAKE_LOCK(bus->dhd);
408                         while (dhd_bus_dpc(bus));
409                         DHD_OS_WAKE_UNLOCK(bus->dhd);
410 #else
411                         bus->dpc_sched = TRUE;
412                         dhd_sched_dpc(bus->dhd);     /* queue DPC now!! */
413 #endif /* defined(SDIO_ISR_THREAD) */
414
415                         DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
416                         return TRUE;
417
418         } while (0);
419
420         DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__));
421         return FALSE;
422 }
423
424 static bool
425 dhdpcie_dongle_attach(dhd_bus_t *bus)
426 {
427
428         osl_t *osh = bus->osh;
429         void *regsva = (void*)bus->regs;
430         uint16 devid = bus->cl_devid;
431         uint32 val;
432         sbpcieregs_t *sbpcieregs;
433
434         DHD_TRACE(("%s: ENTER\n",
435                 __FUNCTION__));
436
437
438         bus->alp_only = TRUE;
439         bus->sih = NULL;
440
441         /* Set bar0 window to si_enum_base */
442         dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE);
443
444 #ifdef CONFIG_ARCH_MSM8994
445         /* Read bar1 window */
446         bus->bar1_win_base = OSL_PCI_READ_CONFIG(bus->osh, PCI_BAR1_WIN, 4);
447         DHD_ERROR(("%s: PCI_BAR1_WIN = %x\n", __FUNCTION__, bus->bar1_win_base));
448 #endif
449
450         /* si_attach() will provide an SI handle and scan the backplane */
451         if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
452                                    &bus->vars, &bus->varsz))) {
453                 DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
454                 goto fail;
455         }
456
457
458         si_setcore(bus->sih, PCIE2_CORE_ID, 0);
459         sbpcieregs = (sbpcieregs_t*)(bus->regs);
460
461         /* WAR where the BAR1 window may not be sized properly */
462         W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
463         val = R_REG(osh, &sbpcieregs->configdata);
464 #ifdef CONFIG_ARCH_MSM8994
465         bus->bar1_win_mask = 0xffffffff - (bus->tcm_size - 1);
466         DHD_ERROR(("%s: BAR1 window val=%d mask=%x\n", __FUNCTION__, val, bus->bar1_win_mask));
467 #endif
468         W_REG(osh, &sbpcieregs->configdata, val);
469
470         /* Get info on the ARM and SOCRAM cores... */
471         /* Should really be qualified by device id */
472         if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
473             (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
474             (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
475                 bus->armrev = si_corerev(bus->sih);
476         } else {
477                 DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
478                 goto fail;
479         }
480
481         if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
482                 if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
483                         DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
484                         goto fail;
485                 }
486         } else {
487                 /* cr4 has a different way to find the RAM size from TCM's */
488                 if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
489                         DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
490                         goto fail;
491                 }
492                 /* also populate base address */
493                 switch ((uint16)bus->sih->chip) {
494                 case BCM4339_CHIP_ID:
495                 case BCM4335_CHIP_ID:
496                         bus->dongle_ram_base = CR4_4335_RAM_BASE;
497                         break;
498                 case BCM4358_CHIP_ID:
499                 case BCM4356_CHIP_ID:
500                 case BCM4354_CHIP_ID:
501                 case BCM43567_CHIP_ID:
502                 case BCM43569_CHIP_ID:
503                 case BCM4350_CHIP_ID:
504                 case BCM43570_CHIP_ID:
505                         bus->dongle_ram_base = CR4_4350_RAM_BASE;
506                         break;
507                 case BCM4360_CHIP_ID:
508                         bus->dongle_ram_base = CR4_4360_RAM_BASE;
509                         break;
510                 case BCM4345_CHIP_ID:
511                         bus->dongle_ram_base = CR4_4345_RAM_BASE;
512                         break;
513                 case BCM43602_CHIP_ID:
514                         bus->dongle_ram_base = CR4_43602_RAM_BASE;
515                         break;
516                 case BCM4349_CHIP_GRPID:
517                         bus->dongle_ram_base = CR4_4349_RAM_BASE;
518                         break;
519                 default:
520                         bus->dongle_ram_base = 0;
521                         DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
522                                    __FUNCTION__, bus->dongle_ram_base));
523                 }
524         }
525         bus->ramsize = bus->orig_ramsize;
526         if (dhd_dongle_memsize)
527                 dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
528
529         DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
530                    bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
531
532         bus->srmemsize = si_socram_srmem_size(bus->sih);
533
534
535         bus->def_intmask = PCIE_MB_D2H_MB_MASK | PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
536
537         /* Set the poll and/or interrupt flags */
538         bus->intr = (bool)dhd_intr;
539
540         bus->wait_for_d3_ack = 1;
541         bus->suspended = FALSE;
542         DHD_TRACE(("%s: EXIT: SUCCESS\n",
543                 __FUNCTION__));
544         return 0;
545
546 fail:
547         if (bus->sih != NULL)
548                 si_detach(bus->sih);
549         DHD_TRACE(("%s: EXIT: FAILURE\n",
550                 __FUNCTION__));
551         return -1;
552 }
553
554 int
555 dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
556 {
557         dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
558         return 0;
559 }
560 int
561 dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
562 {
563         dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
564         return 0;
565 }
566
567 void
568 dhdpcie_bus_intr_enable(dhd_bus_t *bus)
569 {
570         DHD_TRACE(("%s: enable interrupts\n", __FUNCTION__));
571
572         if (!bus || !bus->sih)
573                 return;
574
575         if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
576                 (bus->sih->buscorerev == 4)) {
577                 dhpcie_bus_unmask_interrupt(bus);
578         }
579         else if (bus->sih) {
580                 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
581                         bus->def_intmask, bus->def_intmask);
582         }
583 }
584
585 void
586 dhdpcie_bus_intr_disable(dhd_bus_t *bus)
587 {
588
589         DHD_TRACE(("%s Enter\n", __FUNCTION__));
590
591         if (!bus || !bus->sih)
592                 return;
593
594         if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
595                 (bus->sih->buscorerev == 4)) {
596                 dhpcie_bus_mask_interrupt(bus);
597         }
598         else if (bus->sih) {
599                 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
600                         bus->def_intmask, 0);
601         }
602
603         DHD_TRACE(("%s Exit\n", __FUNCTION__));
604 }
605
606 void
607 dhdpcie_bus_remove_prep(dhd_bus_t *bus)
608 {
609         DHD_TRACE(("%s Enter\n", __FUNCTION__));
610
611         dhd_os_sdlock(bus->dhd);
612
613         bus->dhd->busstate = DHD_BUS_DOWN;
614         dhdpcie_bus_intr_disable(bus);
615         // terence 20150406: fix for null pointer handle
616         if (bus->sih)
617                 pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
618
619         dhd_os_sdunlock(bus->dhd);
620
621         DHD_TRACE(("%s Exit\n", __FUNCTION__));
622 }
623
624
625 /* Detach and free everything */
626 void
627 dhdpcie_bus_release(dhd_bus_t *bus)
628 {
629         bool dongle_isolation = FALSE;
630         osl_t *osh = NULL;
631
632         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
633
634         if (bus) {
635
636                 osh = bus->osh;
637                 ASSERT(osh);
638
639                 if (bus->dhd) {
640                         dongle_isolation = bus->dhd->dongle_isolation;
641                         if (bus->intr) {
642                                 dhdpcie_bus_intr_disable(bus);
643                                 dhdpcie_free_irq(bus);
644                         }
645                         dhd_detach(bus->dhd);
646                         dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
647                         dhd_free(bus->dhd);
648                         bus->dhd = NULL;
649                 }
650
651                 /* unmap the regs and tcm here!! */
652                 if (bus->regs) {
653                         dhdpcie_bus_reg_unmap(osh, (ulong)bus->regs, DONGLE_REG_MAP_SIZE);
654                         bus->regs = NULL;
655                 }
656                 if (bus->tcm) {
657                         dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, bus->tcm_size);
658                         bus->tcm = NULL;
659                 }
660
661                 dhdpcie_bus_release_malloc(bus, osh);
662                 /* Detach pcie shared structure */
663                 if (bus->pcie_sh)
664                         MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
665
666 #ifdef DHD_DEBUG
667
668                 if (bus->console.buf != NULL)
669                         MFREE(osh, bus->console.buf, bus->console.bufsize);
670 #endif
671
672
673                 /* Finally free bus info */
674                 MFREE(osh, bus, sizeof(dhd_bus_t));
675
676         }
677
678         DHD_TRACE(("%s: Exit\n", __FUNCTION__));
679
680 }
681
682
683 void
684 dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
685 {
686
687         DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
688                 bus->dhd, bus->dhd->dongle_reset));
689
690         if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
691                 DHD_TRACE(("%s Exit\n", __FUNCTION__));
692                 return;
693         }
694
695         if (bus->sih) {
696
697                 if (!dongle_isolation)
698                         pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
699
700                 if (bus->ltrsleep_on_unload) {
701                         si_corereg(bus->sih, bus->sih->buscoreidx,
702                                 OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
703                 }
704                 si_detach(bus->sih);
705                 // terence 20150420: fix for sih incorrectly handled in other function
706                 bus->sih = NULL;
707                 if (bus->vars && bus->varsz)
708                         MFREE(osh, bus->vars, bus->varsz);
709                 bus->vars = NULL;
710         }
711
712         DHD_TRACE(("%s Exit\n", __FUNCTION__));
713 }
714
715 uint32
716 dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
717 {
718         uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
719         return data;
720 }
721
722 /* 32 bit config write */
723 void
724 dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
725 {
726         OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
727 }
728
729 void
730 dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
731 {
732         OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
733 }
734
735 #ifdef CONFIG_ARCH_MSM8994
736 void
737 dhdpcie_bus_cfg_set_bar1_win(dhd_bus_t *bus, uint32 data)
738 {
739         OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, 4, data);
740 }
741 #endif
742
743 void
744 dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
745 {
746         int32 min_size =  DONGLE_MIN_MEMSIZE;
747         /* Restrict the memsize to user specified limit */
748         DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
749                 dhd_dongle_memsize, min_size));
750         if ((dhd_dongle_memsize > min_size) &&
751                 (dhd_dongle_memsize < (int32)bus->orig_ramsize))
752                 bus->ramsize = dhd_dongle_memsize;
753 }
754
755 void
756 dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
757 {
758         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
759
760         if (bus->dhd && bus->dhd->dongle_reset)
761                 return;
762
763         if (bus->vars && bus->varsz) {
764                 MFREE(osh, bus->vars, bus->varsz);
765                 bus->vars = NULL;
766         }
767
768         DHD_TRACE(("%s: Exit\n", __FUNCTION__));
769         return;
770
771 }
772
773 /* Stop bus module: clear pending frames, disable data flow */
774 void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
775 {
776         uint32 status;
777
778         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
779
780         if (!bus->dhd)
781                 return;
782
783         if (bus->dhd->busstate == DHD_BUS_DOWN) {
784                 DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
785                 goto done;
786         }
787
788         bus->dhd->busstate = DHD_BUS_DOWN;
789         dhdpcie_bus_intr_disable(bus);
790         status =  dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
791         dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
792         if (!dhd_download_fw_on_driverload)
793                 dhd_dpc_kill(bus->dhd);
794
795         /* Clear rx control and wake any waiters */
796         bus->rxlen = 0;
797         dhd_os_ioctl_resp_wake(bus->dhd);
798
799 done:
800         return;
801 }
802
803 /* Watchdog timer function */
804 bool dhd_bus_watchdog(dhd_pub_t *dhd)
805 {
806 #ifdef DHD_DEBUG
807         dhd_bus_t *bus;
808         bus = dhd->bus;
809
810
811
812         /* Poll for console output periodically */
813         if (dhd->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
814                 bus->console.count += dhd_watchdog_ms;
815                 if (bus->console.count >= dhd_console_ms) {
816                         bus->console.count -= dhd_console_ms;
817                         /* Make sure backplane clock is on */
818                         if (dhdpcie_bus_readconsole(bus) < 0)
819                                 dhd_console_ms = 0;     /* On error, stop trying */
820                 }
821         }
822 #endif /* DHD_DEBUG */
823
824         return FALSE;
825 }
826
827
828
829 /* Download firmware image and nvram image */
830 int
831 dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
832                           char *pfw_path, char *pnv_path, char *pconf_path)
833 {
834         int ret;
835
836         bus->fw_path = pfw_path;
837         bus->nv_path = pnv_path;
838         bus->dhd->conf_path = pconf_path;
839
840         ret = dhdpcie_download_firmware(bus, osh);
841
842         return ret;
843 }
844
845 static int
846 dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
847 {
848         int ret = 0;
849
850         DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n",
851                 __FUNCTION__, bus->fw_path, bus->nv_path));
852
853         DHD_OS_WAKE_LOCK(bus->dhd);
854
855         /* External conf takes precedence if specified */
856         dhd_conf_preinit(bus->dhd);
857         dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
858         dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
859
860         printk("Final fw_path=%s\n", bus->fw_path);
861         printk("Final nv_path=%s\n", bus->nv_path);
862         printk("Final conf_path=%s\n", bus->dhd->conf_path);
863
864         ret = _dhdpcie_download_firmware(bus);
865
866         DHD_OS_WAKE_UNLOCK(bus->dhd);
867         return ret;
868 }
869
870 static int
871 dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
872 {
873         int bcmerror = -1;
874         int offset = 0;
875         int len;
876         void *image = NULL;
877         uint8 *memblock = NULL, *memptr;
878
879         DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
880
881         /* Should succeed in opening image if it is actually given through registry
882          * entry or in module param.
883          */
884         image = dhd_os_open_image(pfw_path);
885         if (image == NULL) {
886                 printk("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
887                 goto err;
888         }
889
890         memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
891         if (memblock == NULL) {
892                 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
893                 goto err;
894         }
895         if ((uint32)(uintptr)memblock % DHD_SDALIGN)
896                 memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
897
898         /* Download image */
899         while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
900                 if (len < 0) {
901                         DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
902                         bcmerror = BCME_ERROR;
903                         goto err;
904                 }
905                 /* check if CR4 */
906                 if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
907                         /* if address is 0, store the reset instruction to be written in 0 */
908
909                         if (offset == 0) {
910                                 bus->resetinstr = *(((uint32*)memptr));
911                                 /* Add start of RAM address to the address given by user */
912                                 offset += bus->dongle_ram_base;
913                         }
914                 }
915
916                 bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, memptr, len);
917                 if (bcmerror) {
918                         DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
919                                 __FUNCTION__, bcmerror, MEMBLOCK, offset));
920                         goto err;
921                 }
922
923                 offset += MEMBLOCK;
924         }
925
926 err:
927         if (memblock)
928                 MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
929
930         if (image)
931                 dhd_os_close_image(image);
932
933         return bcmerror;
934 }
935
936
937 static int
938 dhdpcie_download_nvram(struct dhd_bus *bus)
939 {
940         int bcmerror = -1;
941         uint len;
942         void * image = NULL;
943         char * memblock = NULL;
944         char *bufp;
945         char *pnv_path;
946         bool nvram_file_exists;
947
948         pnv_path = bus->nv_path;
949
950         nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
951         if (!nvram_file_exists && (bus->nvram_params == NULL))
952                 return (0);
953
954         if (nvram_file_exists) {
955                 image = dhd_os_open_image(pnv_path);
956                 if (image == NULL) {
957                         printk("%s: Open nvram file failed %s\n", __FUNCTION__, pnv_path);
958                         goto err;
959                 }
960         }
961
962         memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
963         if (memblock == NULL) {
964                 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
965                            __FUNCTION__, MAX_NVRAMBUF_SIZE));
966                 goto err;
967         }
968
969         /* Download variables */
970         if (nvram_file_exists) {
971                 len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
972         }
973         else {
974
975                 /* nvram is string with null terminated. cannot use strlen */
976                 len = bus->nvram_params_len;
977                 ASSERT(len <= MAX_NVRAMBUF_SIZE);
978                 memcpy(memblock, bus->nvram_params, len);
979         }
980         if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
981                 bufp = (char *)memblock;
982                 bufp[len] = 0;
983
984                 if (nvram_file_exists)
985                         len = process_nvram_vars(bufp, len);
986
987                 if (len % 4) {
988                         len += 4 - (len % 4);
989                 }
990                 bufp += len;
991                 *bufp++ = 0;
992                 if (len)
993                         bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
994                 if (bcmerror) {
995                         DHD_ERROR(("%s: error downloading vars: %d\n",
996                                    __FUNCTION__, bcmerror));
997                 }
998         }
999         else {
1000                 DHD_ERROR(("%s: error reading nvram file: %d\n",
1001                            __FUNCTION__, len));
1002                 bcmerror = BCME_ERROR;
1003         }
1004
1005 err:
1006         if (memblock)
1007                 MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
1008
1009         if (image)
1010                 dhd_os_close_image(image);
1011
1012         return bcmerror;
1013 }
1014
1015
1016 #ifdef BCMEMBEDIMAGE
1017 int
1018 dhdpcie_download_code_array(struct dhd_bus *bus)
1019 {
1020         int bcmerror = -1;
1021         int offset = 0;
1022         unsigned char *p_dlarray  = NULL;
1023         unsigned int dlarray_size = 0;
1024         unsigned int downloded_len, remaining_len, len;
1025         char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
1026         uint8 *memblock = NULL, *memptr;
1027
1028         downloded_len = 0;
1029         remaining_len = 0;
1030         len = 0;
1031
1032         p_dlarray = dlarray;
1033         dlarray_size = sizeof(dlarray);
1034         p_dlimagename = dlimagename;
1035         p_dlimagever  = dlimagever;
1036         p_dlimagedate = dlimagedate;
1037
1038         if ((p_dlarray == 0) || (dlarray_size == 0) ||(dlarray_size > bus->ramsize) ||
1039                 (p_dlimagename == 0) || (p_dlimagever  == 0) || (p_dlimagedate == 0))
1040                 goto err;
1041
1042         memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
1043         if (memblock == NULL) {
1044                 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
1045                 goto err;
1046         }
1047         if ((uint32)(uintptr)memblock % DHD_SDALIGN)
1048                 memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
1049
1050         while (downloded_len  < dlarray_size) {
1051                 remaining_len = dlarray_size - downloded_len;
1052                 if (remaining_len >= MEMBLOCK)
1053                         len = MEMBLOCK;
1054                 else
1055                         len = remaining_len;
1056
1057                 memcpy(memptr, (p_dlarray + downloded_len), len);
1058                 /* check if CR4 */
1059                 if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
1060                         /* if address is 0, store the reset instruction to be written in 0 */
1061                         if (offset == 0) {
1062                                 bus->resetinstr = *(((uint32*)memptr));
1063                                 /* Add start of RAM address to the address given by user */
1064                                 offset += bus->dongle_ram_base;
1065                         }
1066                 }
1067                 bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
1068                 downloded_len += len;
1069                 if (bcmerror) {
1070                         DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
1071                                 __FUNCTION__, bcmerror, MEMBLOCK, offset));
1072                         goto err;
1073                 }
1074                 offset += MEMBLOCK;
1075         }
1076
1077 #ifdef DHD_DEBUG
1078         /* Upload and compare the downloaded code */
1079         {
1080                 unsigned char *ularray = NULL;
1081                 unsigned int uploded_len;
1082                 uploded_len = 0;
1083                 bcmerror = -1;
1084                 ularray = MALLOC(bus->dhd->osh, dlarray_size);
1085                 if (ularray == NULL)
1086                         goto upload_err;
1087                 /* Upload image to verify downloaded contents. */
1088                 offset = bus->dongle_ram_base;
1089                 memset(ularray, 0xaa, dlarray_size);
1090                 while (uploded_len  < dlarray_size) {
1091                         remaining_len = dlarray_size - uploded_len;
1092                         if (remaining_len >= MEMBLOCK)
1093                                 len = MEMBLOCK;
1094                         else
1095                                 len = remaining_len;
1096                         bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
1097                                 (uint8 *)(ularray + uploded_len), len);
1098                         if (bcmerror) {
1099                                 DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
1100                                         __FUNCTION__, bcmerror, MEMBLOCK, offset));
1101                                 goto upload_err;
1102                         }
1103
1104                         uploded_len += len;
1105                         offset += MEMBLOCK;
1106                 }
1107
1108                 if (memcmp(p_dlarray, ularray, dlarray_size)) {
1109                         DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
1110                                 __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
1111                         goto upload_err;
1112
1113                 } else
1114                         DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
1115                                 __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
1116 upload_err:
1117                 if (ularray)
1118                         MFREE(bus->dhd->osh, ularray, dlarray_size);
1119         }
1120 #endif /* DHD_DEBUG */
1121 err:
1122
1123         if (memblock)
1124                 MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
1125
1126         return bcmerror;
1127 }
1128 #endif /* BCMEMBEDIMAGE */
1129
1130
1131 static int
1132 _dhdpcie_download_firmware(struct dhd_bus *bus)
1133 {
1134         int bcmerror = -1;
1135
1136         bool embed = FALSE;     /* download embedded firmware */
1137         bool dlok = FALSE;      /* download firmware succeeded */
1138
1139         /* Out immediately if no image to download */
1140         if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
1141 #ifdef BCMEMBEDIMAGE
1142                 embed = TRUE;
1143 #else
1144                 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
1145                 return 0;
1146 #endif
1147         }
1148
1149         /* Keep arm in reset */
1150         if (dhdpcie_bus_download_state(bus, TRUE)) {
1151                 DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
1152                 goto err;
1153         }
1154
1155         /* External image takes precedence if specified */
1156         if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
1157                 if (dhdpcie_download_code_file(bus, bus->fw_path)) {
1158                         DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
1159 #ifdef BCMEMBEDIMAGE
1160                         embed = TRUE;
1161 #else
1162                         goto err;
1163 #endif
1164                 }
1165                 else {
1166                         embed = FALSE;
1167                         dlok = TRUE;
1168                 }
1169         }
1170
1171 #ifdef BCMEMBEDIMAGE
1172         if (embed) {
1173                 if (dhdpcie_download_code_array(bus)) {
1174                         DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
1175                         goto err;
1176                 }
1177                 else {
1178                         dlok = TRUE;
1179                 }
1180         }
1181 #else
1182         BCM_REFERENCE(embed);
1183 #endif
1184         if (!dlok) {
1185                 DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
1186                 goto err;
1187         }
1188
1189         /* EXAMPLE: nvram_array */
1190         /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
1191         /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
1192
1193
1194         /* External nvram takes precedence if specified */
1195         if (dhdpcie_download_nvram(bus)) {
1196                 DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
1197                 goto err;
1198         }
1199
1200         /* Take arm out of reset */
1201         if (dhdpcie_bus_download_state(bus, FALSE)) {
1202                 DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
1203                 goto err;
1204         }
1205
1206         bcmerror = 0;
1207
1208 err:
1209         return bcmerror;
1210 }
1211
1212 int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
1213 {
1214         int timeleft;
1215         uint rxlen = 0;
1216         bool pending;
1217
1218         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1219
1220         if (bus->dhd->dongle_reset)
1221                 return -EIO;
1222
1223         /* Wait until control frame is available */
1224         timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
1225         rxlen = bus->rxlen;
1226         bcopy(&bus->ioct_resp, msg, MIN(rxlen, sizeof(ioctl_comp_resp_msg_t)));
1227         bus->rxlen = 0;
1228
1229         if (rxlen) {
1230                 DHD_CTL(("%s: resumed on rxctl frame, got %d\n", __FUNCTION__, rxlen));
1231         } else if (timeleft == 0) {
1232                 DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
1233                 bus->ioct_resp.cmn_hdr.request_id = 0;
1234                 bus->ioct_resp.compl_hdr.status = 0xffff;
1235                 bus->dhd->rxcnt_timeout++;
1236                 DHD_ERROR(("%s: rxcnt_timeout=%d\n", __FUNCTION__, bus->dhd->rxcnt_timeout));
1237         } else if (pending == TRUE) {
1238                 DHD_CTL(("%s: canceled\n", __FUNCTION__));
1239                 return -ERESTARTSYS;
1240         } else {
1241                 DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
1242         }
1243
1244         if (timeleft != 0)
1245                 bus->dhd->rxcnt_timeout = 0;
1246
1247         if (rxlen)
1248                 bus->dhd->rx_ctlpkts++;
1249         else
1250                 bus->dhd->rx_ctlerrs++;
1251
1252         if (bus->dhd->rxcnt_timeout >= MAX_CNTL_TX_TIMEOUT)
1253                 return -ETIMEDOUT;
1254
1255         if (bus->dhd->dongle_trap_occured)
1256                 return -EREMOTEIO;
1257
1258         return rxlen ? (int)rxlen : -EIO;
1259
1260 }
1261
1262 #define CONSOLE_LINE_MAX        192
1263
1264 #ifdef DHD_DEBUG
1265 static int
1266 dhdpcie_bus_readconsole(dhd_bus_t *bus)
1267 {
1268         dhd_console_t *c = &bus->console;
1269         uint8 line[CONSOLE_LINE_MAX], ch;
1270         uint32 n, idx, addr;
1271         int rv;
1272
1273         /* Don't do anything until FWREADY updates console address */
1274         if (bus->console_addr == 0)
1275                 return -1;
1276
1277         /* Read console log struct */
1278         addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
1279
1280         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
1281                 return rv;
1282
1283         /* Allocate console buffer (one time only) */
1284         if (c->buf == NULL) {
1285                 c->bufsize = ltoh32(c->log.buf_size);
1286                 if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
1287                         return BCME_NOMEM;
1288         }
1289         idx = ltoh32(c->log.idx);
1290
1291         /* Protect against corrupt value */
1292         if (idx > c->bufsize)
1293                 return BCME_ERROR;
1294
1295         /* Skip reading the console buffer if the index pointer has not moved */
1296         if (idx == c->last)
1297                 return BCME_OK;
1298
1299         /* Read the console buffer */
1300         addr = ltoh32(c->log.buf);
1301         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
1302                 return rv;
1303
1304         while (c->last != idx) {
1305                 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
1306                         if (c->last == idx) {
1307                                 /* This would output a partial line.  Instead, back up
1308                                  * the buffer pointer and output this line next time around.
1309                                  */
1310                                 if (c->last >= n)
1311                                         c->last -= n;
1312                                 else
1313                                         c->last = c->bufsize - n;
1314                                 goto break2;
1315                         }
1316                         ch = c->buf[c->last];
1317                         c->last = (c->last + 1) % c->bufsize;
1318                         if (ch == '\n')
1319                                 break;
1320                         line[n] = ch;
1321                 }
1322
1323                 if (n > 0) {
1324                         if (line[n - 1] == '\r')
1325                                 n--;
1326                         line[n] = 0;
1327                         printf("CONSOLE: %s\n", line);
1328                 }
1329         }
1330 break2:
1331
1332         return BCME_OK;
1333 }
1334
1335 static int
1336 dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
1337 {
1338         int bcmerror = 0;
1339         uint msize = 512;
1340         char *mbuffer = NULL;
1341         char *console_buffer = NULL;
1342         uint maxstrlen = 256;
1343         char *str = NULL;
1344         trap_t tr;
1345         pciedev_shared_t *pciedev_shared = bus->pcie_sh;
1346         struct bcmstrbuf strbuf;
1347         uint32 console_ptr, console_size, console_index;
1348         uint8 line[CONSOLE_LINE_MAX], ch;
1349         uint32 n, i, addr;
1350         int rv;
1351
1352         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1353
1354         if (DHD_NOCHECKDIED_ON())
1355                 return 0;
1356
1357         if (data == NULL) {
1358                 /*
1359                  * Called after a rx ctrl timeout. "data" is NULL.
1360                  * allocate memory to trace the trap or assert.
1361                  */
1362                 size = msize;
1363                 mbuffer = data = MALLOC(bus->dhd->osh, msize);
1364
1365                 if (mbuffer == NULL) {
1366                         DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
1367                         bcmerror = BCME_NOMEM;
1368                         goto done;
1369                 }
1370         }
1371
1372         if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
1373                 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
1374                 bcmerror = BCME_NOMEM;
1375                 goto done;
1376         }
1377
1378         if ((bcmerror = dhdpcie_readshared(bus)) < 0)
1379                 goto done;
1380
1381         bcm_binit(&strbuf, data, size);
1382
1383         bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
1384                     pciedev_shared->msgtrace_addr, pciedev_shared->console_addr);
1385
1386         if ((pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
1387                 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
1388                  * (Avoids conflict with real asserts for programmatic parsing of output.)
1389                  */
1390                 bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
1391         }
1392
1393         if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
1394                 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
1395                  * (Avoids conflict with real asserts for programmatic parsing of output.)
1396                  */
1397                 bcm_bprintf(&strbuf, "No trap%s in dongle",
1398                           (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
1399                           ?"/assrt" :"");
1400         } else {
1401                 if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
1402                         /* Download assert */
1403                         bcm_bprintf(&strbuf, "Dongle assert");
1404                         if (bus->pcie_sh->assert_exp_addr != 0) {
1405                                 str[0] = '\0';
1406                                 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
1407                                                                   bus->pcie_sh->assert_exp_addr,
1408                                                                  (uint8 *)str, maxstrlen)) < 0)
1409                                         goto done;
1410
1411                                 str[maxstrlen - 1] = '\0';
1412                                 bcm_bprintf(&strbuf, " expr \"%s\"", str);
1413                         }
1414
1415                         if (bus->pcie_sh->assert_file_addr != 0) {
1416                                 str[0] = '\0';
1417                                 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
1418                                                                   bus->pcie_sh->assert_file_addr,
1419                                                                  (uint8 *)str, maxstrlen)) < 0)
1420                                         goto done;
1421
1422                                 str[maxstrlen - 1] = '\0';
1423                                 bcm_bprintf(&strbuf, " file \"%s\"", str);
1424                         }
1425
1426                         bcm_bprintf(&strbuf, " line %d ",  bus->pcie_sh->assert_line);
1427                 }
1428
1429                 if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
1430                         bus->dhd->dongle_trap_occured = TRUE;
1431                         if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
1432                                                           bus->pcie_sh->trap_addr,
1433                                                          (uint8*)&tr, sizeof(trap_t))) < 0)
1434                                 goto done;
1435
1436                         bcm_bprintf(&strbuf,
1437                         "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
1438                                     "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
1439                         "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
1440                         "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
1441                         ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
1442                         ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
1443                         ltoh32(bus->pcie_sh->trap_addr),
1444                         ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
1445                         ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
1446
1447                         addr =  bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
1448                         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
1449                                 (uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
1450                                 goto printbuf;
1451
1452                         addr =  bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
1453                         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
1454                                 (uint8 *)&console_size, sizeof(console_size))) < 0)
1455                                 goto printbuf;
1456
1457                         addr =  bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
1458                         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
1459                                 (uint8 *)&console_index, sizeof(console_index))) < 0)
1460                                 goto printbuf;
1461
1462                         console_ptr = ltoh32(console_ptr);
1463                         console_size = ltoh32(console_size);
1464                         console_index = ltoh32(console_index);
1465
1466                         if (console_size > CONSOLE_BUFFER_MAX ||
1467                                 !(console_buffer = MALLOC(bus->dhd->osh, console_size)))
1468                                 goto printbuf;
1469
1470                         if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
1471                                 (uint8 *)console_buffer, console_size)) < 0)
1472                                 goto printbuf;
1473
1474                         for (i = 0, n = 0; i < console_size; i += n + 1) {
1475                                 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
1476                                         ch = console_buffer[(console_index + i + n) % console_size];
1477                                         if (ch == '\n')
1478                                                 break;
1479                                         line[n] = ch;
1480                                 }
1481
1482
1483                                 if (n > 0) {
1484                                         if (line[n - 1] == '\r')
1485                                                 n--;
1486                                         line[n] = 0;
1487                                         /* Don't use DHD_ERROR macro since we print
1488                                          * a lot of information quickly. The macro
1489                                          * will truncate a lot of the printfs
1490                                          */
1491
1492                                         if (dhd_msg_level & DHD_ERROR_VAL)
1493                                                 printf("CONSOLE: %s\n", line);
1494                                 }
1495                         }
1496                 }
1497         }
1498
1499 printbuf:
1500         if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
1501                 DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
1502         }
1503
1504 done:
1505         if (mbuffer)
1506                 MFREE(bus->dhd->osh, mbuffer, msize);
1507         if (str)
1508                 MFREE(bus->dhd->osh, str, maxstrlen);
1509
1510         if (console_buffer)
1511                 MFREE(bus->dhd->osh, console_buffer, console_size);
1512
1513         return bcmerror;
1514 }
1515 #endif /* DHD_DEBUG */
1516
1517
1518 /**
1519  * Transfers bytes from host to dongle using pio mode.
1520  * Parameter 'address' is a backplane address.
1521  */
1522 static int
1523 dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
1524 {
1525         int bcmerror = 0;
1526         uint dsize;
1527         int detect_endian_flag = 0x01;
1528         bool little_endian;
1529 #ifdef CONFIG_ARCH_MSM8994
1530         bool is_64bit_unaligned;
1531 #endif
1532
1533         /* Detect endianness. */
1534         little_endian = *(char *)&detect_endian_flag;
1535
1536 #ifdef CONFIG_ARCH_MSM8994
1537         /* Check 64bit aligned or not. */
1538         is_64bit_unaligned = (address & 0x7);
1539 #endif
1540         /* In remap mode, adjust address beyond socram and redirect
1541          * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
1542          * is not backplane accessible
1543          */
1544
1545         /* Determine initial transfer parameters */
1546         dsize = sizeof(uint64);
1547
1548         /* Do the transfer(s) */
1549         if (write) {
1550                 while (size) {
1551                         if (size >= sizeof(uint64) && little_endian) {
1552 #ifdef CONFIG_ARCH_MSM8994
1553                                 if (is_64bit_unaligned) {
1554                                         DHD_INFO(("%s: write unaligned %lx\n",
1555                                             __FUNCTION__, address));
1556                                         dhdpcie_bus_wtcm32(bus, address, *((uint32 *)data));
1557                                         data += 4;
1558                                         size -= 4;
1559                                         address += 4;
1560                                         is_64bit_unaligned = (address & 0x7);
1561                                         continue;
1562                                 }
1563                                 else
1564 #endif
1565                                 dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
1566                         } else {
1567                                 dsize = sizeof(uint8);
1568                                 dhdpcie_bus_wtcm8(bus, address, *data);
1569                         }
1570
1571                         /* Adjust for next transfer (if any) */
1572                         if ((size -= dsize)) {
1573                                 data += dsize;
1574                                 address += dsize;
1575                         }
1576                 }
1577         } else {
1578                 while (size) {
1579                         if (size >= sizeof(uint64) && little_endian) {
1580 #ifdef CONFIG_ARCH_MSM8994
1581                                 if (is_64bit_unaligned) {
1582                                         DHD_INFO(("%s: read unaligned %lx\n",
1583                                             __FUNCTION__, address));
1584                                         *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
1585                                         data += 4;
1586                                         size -= 4;
1587                                         address += 4;
1588                                         is_64bit_unaligned = (address & 0x7);
1589                                         continue;
1590                                 }
1591                                 else
1592 #endif
1593                                 *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
1594                         } else {
1595                                 dsize = sizeof(uint8);
1596                                 *data = dhdpcie_bus_rtcm8(bus, address);
1597                         }
1598
1599                         /* Adjust for next transfer (if any) */
1600                         if ((size -= dsize) > 0) {
1601                                 data += dsize;
1602                                 address += dsize;
1603                         }
1604                 }
1605         }
1606         return bcmerror;
1607 }
1608
1609 int BCMFASTPATH
1610 dhd_bus_schedule_queue(struct dhd_bus  *bus, uint16 flow_id, bool txs)
1611 {
1612         flow_ring_node_t *flow_ring_node;
1613         int ret = BCME_OK;
1614
1615         DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
1616         /* ASSERT on flow_id */
1617         if (flow_id >= bus->max_sub_queues) {
1618                 DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
1619                         flow_id, bus->max_sub_queues));
1620                 return 0;
1621         }
1622
1623         flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
1624
1625         {
1626                 unsigned long flags;
1627                 void *txp = NULL;
1628                 flow_queue_t *queue;
1629
1630                 queue = &flow_ring_node->queue; /* queue associated with flow ring */
1631
1632                 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1633
1634                 if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
1635                         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1636                         return BCME_NOTREADY;
1637                 }
1638
1639                 while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
1640                         PKTORPHAN(txp);
1641
1642 #ifdef DHDTCPACK_SUPPRESS
1643                 if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
1644                         dhd_tcpack_check_xmit(bus->dhd, txp);
1645                 }
1646 #endif /* DHDTCPACK_SUPPRESS */
1647                         /* Attempt to transfer packet over flow ring */
1648
1649                         ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
1650                         if (ret != BCME_OK) { /* may not have resources in flow ring */
1651                                 DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
1652                                 dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
1653                                 /* reinsert at head */
1654                                 dhd_flow_queue_reinsert(bus->dhd, queue, txp);
1655                                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1656
1657                                 /* If we are able to requeue back, return success */
1658                                 return BCME_OK;
1659                         }
1660                 }
1661
1662                 dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
1663
1664                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1665         }
1666
1667         return ret;
1668 }
1669
1670 #ifndef PCIE_TX_DEFERRAL
1671 /* Send a data frame to the dongle.  Callee disposes of txp. */
1672 int BCMFASTPATH
1673 dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
1674 {
1675         unsigned long flags;
1676         int ret = BCME_OK;
1677         void *txp_pend = NULL;
1678         if (!bus->txmode_push) {
1679                 uint16 flowid;
1680                 flow_queue_t *queue;
1681                 flow_ring_node_t *flow_ring_node;
1682                 if (!bus->dhd->flowid_allocator) {
1683                         DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
1684                         goto toss;
1685                 }
1686
1687                 flowid = DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(txp));
1688
1689                 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
1690
1691                 DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
1692                         __FUNCTION__, flowid, flow_ring_node->status,
1693                         flow_ring_node->active));
1694
1695                 if ((flowid >= bus->dhd->num_flow_rings) ||
1696                         (!flow_ring_node->active) ||
1697                         (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING)) {
1698                         DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
1699                                 __FUNCTION__, flowid, flow_ring_node->status,
1700                                 flow_ring_node->active));
1701                         ret = BCME_ERROR;
1702                         goto toss;
1703                 }
1704
1705                 queue = &flow_ring_node->queue; /* queue associated with flow ring */
1706
1707                 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1708
1709                 if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
1710                         txp_pend = txp;
1711
1712                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1713
1714                 if (flow_ring_node->status) {
1715                         DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
1716                             __FUNCTION__, flowid, flow_ring_node->status,
1717                             flow_ring_node->active));
1718                         if (txp_pend) {
1719                                 txp = txp_pend;
1720                                 goto toss;
1721                         }
1722                         return BCME_OK;
1723                 }
1724                 ret = dhd_bus_schedule_queue(bus, flowid, FALSE);
1725
1726                 /* If we have anything pending, try to push into q */
1727                 if (txp_pend) {
1728                         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1729
1730                         if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
1731                                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1732                                 txp = txp_pend;
1733                                 goto toss;
1734                         }
1735
1736                         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1737                 }
1738
1739                 return ret;
1740
1741         } else { /* bus->txmode_push */
1742                 return dhd_prot_txdata(bus->dhd, txp, ifidx);
1743         }
1744
1745 toss:
1746         DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
1747         PKTCFREE(bus->dhd->osh, txp, TRUE);
1748         return ret;
1749 }
1750 #else /* PCIE_TX_DEFERRAL */
1751 int BCMFASTPATH
1752 dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
1753 {
1754         unsigned long flags;
1755         int ret = BCME_OK;
1756         uint16 flowid;
1757         flow_queue_t *queue;
1758         flow_ring_node_t *flow_ring_node;
1759         uint8 *pktdata = (uint8 *)PKTDATA(bus->dhd->osh, txp);
1760         struct ether_header *eh = (struct ether_header *)pktdata;
1761
1762         if (!bus->dhd->flowid_allocator) {
1763                 DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
1764                 goto toss;
1765         }
1766
1767         flowid = dhd_flowid_find(bus->dhd, ifidx,
1768                 bus->dhd->flow_prio_map[(PKTPRIO(txp))],
1769                 eh->ether_shost, eh->ether_dhost);
1770         if (flowid == FLOWID_INVALID) {
1771                 DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(txp), ifidx);
1772                 skb_queue_tail(&bus->orphan_list, txp);
1773                 queue_work(bus->tx_wq, &bus->create_flow_work);
1774                 return BCME_OK;
1775         }
1776
1777         DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(txp), flowid);
1778         flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
1779         queue = &flow_ring_node->queue; /* queue associated with flow ring */
1780
1781         DHD_DATA(("%s: pkt flowid %d, status %d active %d\n",
1782                 __FUNCTION__, flowid, flow_ring_node->status,
1783                 flow_ring_node->active));
1784
1785         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1786         if ((flowid >= bus->dhd->num_flow_rings) ||
1787                 (!flow_ring_node->active) ||
1788                 (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING)) {
1789                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1790                 DHD_DATA(("%s: Dropping pkt flowid %d, status %d active %d\n",
1791                         __FUNCTION__, flowid, flow_ring_node->status,
1792                         flow_ring_node->active));
1793                 ret = BCME_ERROR;
1794                 goto toss;
1795         }
1796
1797         if (flow_ring_node->status == FLOW_RING_STATUS_PENDING) {
1798                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1799                 DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(txp), ifidx);
1800                 skb_queue_tail(&bus->orphan_list, txp);
1801                 queue_work(bus->tx_wq, &bus->create_flow_work);
1802                 return BCME_OK;
1803         }
1804
1805         if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) {
1806                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1807                 goto toss;
1808         }
1809
1810         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1811
1812         ret = dhd_bus_schedule_queue(bus, flowid, FALSE);
1813
1814         return ret;
1815
1816 toss:
1817         DHD_DATA(("%s: Toss %d\n", __FUNCTION__, ret));
1818         PKTCFREE(bus->dhd->osh, txp, TRUE);
1819         return ret;
1820 }
1821 #endif /* !PCIE_TX_DEFERRAL */
1822
1823
1824 void
1825 dhd_bus_stop_queue(struct dhd_bus *bus)
1826 {
1827         dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
1828         bus->bus_flowctrl = TRUE;
1829 }
1830
1831 void
1832 dhd_bus_start_queue(struct dhd_bus *bus)
1833 {
1834         dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
1835         bus->bus_flowctrl = TRUE;
1836 }
1837
1838 void
1839 dhd_bus_update_retlen(dhd_bus_t *bus, uint32 retlen, uint32 pkt_id, uint16 status,
1840         uint32 resp_len)
1841 {
1842         bus->rxlen = retlen;
1843         bus->ioct_resp.cmn_hdr.request_id = pkt_id;
1844         bus->ioct_resp.compl_hdr.status = status;
1845         bus->ioct_resp.resp_len = (uint16)resp_len;
1846 }
1847
1848 #if defined(DHD_DEBUG)
1849 /* Device console input function */
1850 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
1851 {
1852         dhd_bus_t *bus = dhd->bus;
1853         uint32 addr, val;
1854         int rv;
1855         /* Address could be zero if CONSOLE := 0 in dongle Makefile */
1856         if (bus->console_addr == 0)
1857                 return BCME_UNSUPPORTED;
1858
1859         /* Don't allow input if dongle is in reset */
1860         if (bus->dhd->dongle_reset) {
1861                 dhd_os_sdunlock(bus->dhd);
1862                 return BCME_NOTREADY;
1863         }
1864
1865         /* Zero cbuf_index */
1866         addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
1867         val = htol32(0);
1868         if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
1869                 goto done;
1870
1871         /* Write message into cbuf */
1872         addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
1873         if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
1874                 goto done;
1875
1876         /* Write length into vcons_in */
1877         addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
1878         val = htol32(msglen);
1879         if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
1880                 goto done;
1881
1882         /* generate an interurpt to dongle to indicate that it needs to process cons command */
1883         dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
1884 done:
1885         return rv;
1886 }
1887 #endif /* defined(DHD_DEBUG) */
1888
1889 /* Process rx frame , Send up the layer to netif */
1890 void BCMFASTPATH
1891 dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
1892 {
1893         dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
1894 }
1895
1896 #ifdef CONFIG_ARCH_MSM8994
1897 static ulong dhd_bus_cmn_check_offset(dhd_bus_t *bus, ulong offset)
1898 {
1899         uint new_bar1_wbase = 0;
1900         ulong address = 0;
1901
1902         new_bar1_wbase = (uint)offset & bus->bar1_win_mask;
1903         if (bus->bar1_win_base != new_bar1_wbase) {
1904                 bus->bar1_win_base = new_bar1_wbase;
1905                 dhdpcie_bus_cfg_set_bar1_win(bus, bus->bar1_win_base);
1906                 DHD_ERROR(("%s: offset=%lx, switch bar1_win_base to %x\n",
1907                     __FUNCTION__, offset, bus->bar1_win_base));
1908         }
1909
1910         address = offset - bus->bar1_win_base;
1911
1912         return address;
1913 }
1914 #else
1915 #define dhd_bus_cmn_check_offset(x, y) y
1916 #endif /* CONFIG_ARCH_MSM8994 */
1917
1918 /** 'offset' is a backplane address */
1919 void
1920 dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
1921 {
1922         *(volatile uint8 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint8)data;
1923 }
1924
1925 uint8
1926 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
1927 {
1928         volatile uint8 data;
1929 #ifdef BCM47XX_ACP_WAR
1930         data = R_REG(bus->dhd->osh,
1931             (volatile uint8 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
1932 #else
1933         data = *(volatile uint8 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
1934 #endif
1935         return data;
1936 }
1937
1938 void
1939 dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
1940 {
1941         *(volatile uint32 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint32)data;
1942 }
1943 void
1944 dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
1945 {
1946         *(volatile uint16 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint16)data;
1947 }
1948 void
1949 dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
1950 {
1951         *(volatile uint64 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint64)data;
1952 }
1953
1954 uint16
1955 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
1956 {
1957         volatile uint16 data;
1958 #ifdef BCM47XX_ACP_WAR
1959         data = R_REG(bus->dhd->osh,
1960             (volatile uint16 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
1961 #else
1962         data = *(volatile uint16 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
1963 #endif
1964         return data;
1965 }
1966
1967 uint32
1968 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
1969 {
1970         volatile uint32 data;
1971 #ifdef BCM47XX_ACP_WAR
1972         data = R_REG(bus->dhd->osh,
1973             (volatile uint32 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
1974 #else
1975         data = *(volatile uint32 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
1976 #endif
1977         return data;
1978 }
1979
1980 uint64
1981 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
1982 {
1983         volatile uint64 data;
1984 #ifdef BCM47XX_ACP_WAR
1985         data = R_REG(bus->dhd->osh,
1986             (volatile uint64 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
1987 #else
1988         data = *(volatile uint64 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
1989 #endif
1990         return data;
1991 }
1992
1993 void
1994 dhd_bus_cmn_writeshared(dhd_bus_t *bus, void * data, uint32 len, uint8 type, uint16 ringid)
1995 {
1996         uint64 long_data;
1997         ulong tcm_offset;
1998         pciedev_shared_t *sh;
1999         pciedev_shared_t *shmem = NULL;
2000
2001         sh = (pciedev_shared_t*)bus->shared_addr;
2002
2003         DHD_INFO(("%s: writing to msgbuf type %d, len %d\n", __FUNCTION__, type, len));
2004
2005         switch (type) {
2006                 case DNGL_TO_HOST_DMA_SCRATCH_BUFFER:
2007                         long_data = HTOL64(*(uint64 *)data);
2008                         tcm_offset = (ulong)&(sh->host_dma_scratch_buffer);
2009                         dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2010                         prhex(__FUNCTION__, data, len);
2011                         break;
2012
2013                 case DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN :
2014                         tcm_offset = (ulong)&(sh->host_dma_scratch_buffer_len);
2015                         dhdpcie_bus_wtcm32(bus, tcm_offset, (uint32) HTOL32(*(uint32 *)data));
2016                         prhex(__FUNCTION__, data, len);
2017                         break;
2018
2019                 case HOST_TO_DNGL_DMA_WRITEINDX_BUFFER:
2020                         /* ring_info_ptr stored in pcie_sh */
2021                         shmem = (pciedev_shared_t *)bus->pcie_sh;
2022
2023                         long_data = HTOL64(*(uint64 *)data);
2024                         tcm_offset = (ulong)shmem->rings_info_ptr;
2025                         tcm_offset += OFFSETOF(ring_info_t, h2d_w_idx_hostaddr);
2026                         dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2027                         prhex(__FUNCTION__, data, len);
2028                         break;
2029
2030                 case HOST_TO_DNGL_DMA_READINDX_BUFFER:
2031                         /* ring_info_ptr stored in pcie_sh */
2032                         shmem = (pciedev_shared_t *)bus->pcie_sh;
2033
2034                         long_data = HTOL64(*(uint64 *)data);
2035                         tcm_offset = (ulong)shmem->rings_info_ptr;
2036                         tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr);
2037                         dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2038                         prhex(__FUNCTION__, data, len);
2039                         break;
2040
2041                 case DNGL_TO_HOST_DMA_WRITEINDX_BUFFER:
2042                         /* ring_info_ptr stored in pcie_sh */
2043                         shmem = (pciedev_shared_t *)bus->pcie_sh;
2044
2045                         long_data = HTOL64(*(uint64 *)data);
2046                         tcm_offset = (ulong)shmem->rings_info_ptr;
2047                         tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr);
2048                         dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2049                         prhex(__FUNCTION__, data, len);
2050                         break;
2051
2052                 case DNGL_TO_HOST_DMA_READINDX_BUFFER:
2053                         /* ring_info_ptr stored in pcie_sh */
2054                         shmem = (pciedev_shared_t *)bus->pcie_sh;
2055
2056                         long_data = HTOL64(*(uint64 *)data);
2057                         tcm_offset = (ulong)shmem->rings_info_ptr;
2058                         tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr);
2059                         dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2060                         prhex(__FUNCTION__, data, len);
2061                         break;
2062
2063                 case RING_LEN_ITEMS :
2064                         tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
2065                         tcm_offset += OFFSETOF(ring_mem_t, len_items);
2066                         dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
2067                         break;
2068
2069                 case RING_MAX_ITEM :
2070                         tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
2071                         tcm_offset += OFFSETOF(ring_mem_t, max_item);
2072                         dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
2073                         break;
2074
2075                 case RING_BUF_ADDR :
2076                         long_data = HTOL64(*(uint64 *)data);
2077                         tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
2078                         tcm_offset += OFFSETOF(ring_mem_t, base_addr);
2079                         dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8 *) &long_data, len);
2080                         prhex(__FUNCTION__, data, len);
2081                         break;
2082
2083                 case RING_WRITE_PTR :
2084                         tcm_offset = bus->ring_sh[ringid].ring_state_w;
2085                         dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
2086                         break;
2087                 case RING_READ_PTR :
2088                         tcm_offset = bus->ring_sh[ringid].ring_state_r;
2089                         dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
2090                         break;
2091
2092                 case DTOH_MB_DATA:
2093                         dhdpcie_bus_wtcm32(bus, bus->d2h_mb_data_ptr_addr,
2094                                 (uint32) HTOL32(*(uint32 *)data));
2095                         break;
2096
2097                 case HTOD_MB_DATA:
2098                         dhdpcie_bus_wtcm32(bus, bus->h2d_mb_data_ptr_addr,
2099                                 (uint32) HTOL32(*(uint32 *)data));
2100                         break;
2101                 default:
2102                         break;
2103         }
2104 }
2105
2106
2107 void
2108 dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
2109 {
2110         pciedev_shared_t *sh;
2111         ulong tcm_offset;
2112
2113         sh = (pciedev_shared_t*)bus->shared_addr;
2114
2115         switch (type) {
2116                 case RING_WRITE_PTR :
2117                         tcm_offset = bus->ring_sh[ringid].ring_state_w;
2118                         *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
2119                         break;
2120                 case RING_READ_PTR :
2121                         tcm_offset = bus->ring_sh[ringid].ring_state_r;
2122                         *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
2123                         break;
2124                 case TOTAL_LFRAG_PACKET_CNT :
2125                         *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
2126                                 (ulong) &sh->total_lfrag_pkt_cnt));
2127                         break;
2128                 case HTOD_MB_DATA:
2129                         *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->h2d_mb_data_ptr_addr));
2130                         break;
2131                 case DTOH_MB_DATA:
2132                         *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr));
2133                         break;
2134                 case MAX_HOST_RXBUFS :
2135                         *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
2136                                 (ulong) &sh->max_host_rxbufs));
2137                         break;
2138                 default :
2139                         break;
2140         }
2141 }
2142
2143 uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
2144 {
2145         return ((pciedev_shared_t*)bus->pcie_sh)->flags;
2146 }
2147
2148 void
2149 dhd_bus_clearcounts(dhd_pub_t *dhdp)
2150 {
2151 }
2152
2153 int
2154 dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
2155                  void *params, int plen, void *arg, int len, bool set)
2156 {
2157         dhd_bus_t *bus = dhdp->bus;
2158         const bcm_iovar_t *vi = NULL;
2159         int bcmerror = 0;
2160         int val_size;
2161         uint32 actionid;
2162
2163         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2164
2165         ASSERT(name);
2166         ASSERT(len >= 0);
2167
2168         /* Get MUST have return space */
2169         ASSERT(set || (arg && len));
2170
2171         /* Set does NOT take qualifiers */
2172         ASSERT(!set || (!params && !plen));
2173
2174         DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
2175                  name, (set ? "set" : "get"), len, plen));
2176
2177         /* Look up var locally; if not found pass to host driver */
2178         if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
2179                 goto exit;
2180         }
2181
2182
2183         /* set up 'params' pointer in case this is a set command so that
2184          * the convenience int and bool code can be common to set and get
2185          */
2186         if (params == NULL) {
2187                 params = arg;
2188                 plen = len;
2189         }
2190
2191         if (vi->type == IOVT_VOID)
2192                 val_size = 0;
2193         else if (vi->type == IOVT_BUFFER)
2194                 val_size = len;
2195         else
2196                 /* all other types are integer sized */
2197                 val_size = sizeof(int);
2198
2199         actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
2200         bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
2201
2202 exit:
2203         return bcmerror;
2204 }
2205
2206 #ifdef BCM_BUZZZ
2207 #include <bcm_buzzz.h>
2208
2209 int dhd_buzzz_dump_cntrs3(char *p, uint32 *core, uint32 * ovhd, uint32 *log)
2210 {
2211         int bytes = 0;
2212         uint32 ctr, curr[3], prev[3], delta[3];
2213
2214         /* Compute elapsed counter values per counter event type */
2215         for (ctr = 0U; ctr < 3; ctr++) {
2216                 prev[ctr] = core[ctr];
2217                 curr[ctr] = *log++;
2218                 core[ctr] = curr[ctr];  /* saved for next log */
2219
2220                 if (curr[ctr] < prev[ctr])
2221                         delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
2222                 else
2223                         delta[ctr] = (curr[ctr] - prev[ctr]);
2224
2225                 /* Adjust for instrumentation overhead */
2226                 if (delta[ctr] >= ovhd[ctr])
2227                         delta[ctr] -= ovhd[ctr];
2228                 else
2229                         delta[ctr] = 0;
2230
2231                 bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
2232         }
2233
2234         return bytes;
2235 }
2236
2237 typedef union cm3_cnts { /* export this in bcm_buzzz.h */
2238         uint32 u32;
2239         uint8  u8[4];
2240         struct {
2241                 uint8 cpicnt;
2242                 uint8 exccnt;
2243                 uint8 sleepcnt;
2244                 uint8 lsucnt;
2245         };
2246 } cm3_cnts_t;
2247
2248 int dhd_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 * ovhd, uint32 *log)
2249 {
2250         int bytes = 0;
2251
2252         uint32 cyccnt, instrcnt;
2253         cm3_cnts_t cm3_cnts;
2254         uint8 foldcnt;
2255
2256         {   /* 32bit cyccnt */
2257                 uint32 curr, prev, delta;
2258                 prev = core[0]; curr = *log++; core[0] = curr;
2259                 if (curr < prev)
2260                         delta = curr + (~0U - prev);
2261                 else
2262                         delta = (curr - prev);
2263                 if (delta >= ovhd[0])
2264                         delta -= ovhd[0];
2265                 else
2266                         delta = 0;
2267
2268                 bytes += sprintf(p + bytes, "%12u ", delta);
2269                 cyccnt = delta;
2270         }
2271
2272         {       /* Extract the 4 cnts: cpi, exc, sleep and lsu */
2273                 int i;
2274                 uint8 max8 = ~0;
2275                 cm3_cnts_t curr, prev, delta;
2276                 prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
2277                 for (i = 0; i < 4; i++) {
2278                         if (curr.u8[i] < prev.u8[i])
2279                                 delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
2280                         else
2281                                 delta.u8[i] = (curr.u8[i] - prev.u8[i]);
2282                         if (delta.u8[i] >= ovhd[i + 1])
2283                                 delta.u8[i] -= ovhd[i + 1];
2284                         else
2285                                 delta.u8[i] = 0;
2286                         bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
2287                 }
2288                 cm3_cnts.u32 = delta.u32;
2289         }
2290
2291         {   /* Extract the foldcnt from arg0 */
2292                 uint8 curr, prev, delta, max8 = ~0;
2293                 buzzz_arg0_t arg0; arg0.u32 = *log;
2294                 prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
2295                 if (curr < prev)
2296                         delta = curr + (max8 - prev);
2297                 else
2298                         delta = (curr - prev);
2299                 if (delta >= ovhd[5])
2300                         delta -= ovhd[5];
2301                 else
2302                         delta = 0;
2303                 bytes += sprintf(p + bytes, "%4u ", delta);
2304                 foldcnt = delta;
2305         }
2306
2307         instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
2308                                  + cm3_cnts.u8[3]) + foldcnt;
2309         if (instrcnt > 0xFFFFFF00)
2310                 bytes += sprintf(p + bytes, "[%10s] ", "~");
2311         else
2312                 bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
2313         return bytes;
2314 }
2315
2316 int dhd_buzzz_dump_log(char * p, uint32 * core, uint32 * log, buzzz_t * buzzz)
2317 {
2318         int bytes = 0;
2319         buzzz_arg0_t arg0;
2320         static uint8 * fmt[] = BUZZZ_FMT_STRINGS;
2321
2322         if (buzzz->counters == 6) {
2323                 bytes += dhd_buzzz_dump_cntrs6(p, core, buzzz->ovhd, log);
2324                 log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
2325         } else {
2326                 bytes += dhd_buzzz_dump_cntrs3(p, core, buzzz->ovhd, log);
2327                 log += 3; /* (3 x 32bit) CR4 */
2328         }
2329
2330         /* Dump the logged arguments using the registered formats */
2331         arg0.u32 = *log++;
2332
2333         switch (arg0.klog.args) {
2334                 case 0:
2335                         bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
2336                         break;
2337                 case 1:
2338                 {
2339                         uint32 arg1 = *log++;
2340                         bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
2341                         break;
2342                 }
2343                 default:
2344                         printf("%s: Maximum one argument supported\n", __FUNCTION__);
2345                         break;
2346         }
2347         bytes += sprintf(p + bytes, "\n");
2348
2349         return bytes;
2350 }
2351
2352 void dhd_buzzz_dump(buzzz_t * buzzz_p, void * buffer_p, char * p)
2353 {
2354         int i;
2355         uint32 total, part1, part2, log_sz, core[BUZZZ_COUNTERS_MAX];
2356         void * log;
2357
2358         for (i = 0; i < BUZZZ_COUNTERS_MAX; i++)
2359                 core[i] = 0;
2360
2361         log_sz = buzzz_p->log_sz;
2362
2363         part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
2364
2365         if (buzzz_p->wrap == TRUE) {
2366                 part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
2367                 total = (buzzz_p->buffer_sz - BUZZZ_LOGENTRY_MAXSZ) / log_sz;
2368         } else {
2369                 part2 = 0U;
2370                 total = buzzz_p->count;
2371         }
2372
2373         if (total == 0U) {
2374                 printf("%s: buzzz_dump total<%u> done\n", __FUNCTION__, total);
2375                 return;
2376         } else {
2377                 printf("%s: buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__,
2378                        total, part2, part1);
2379         }
2380
2381         if (part2) {   /* with wrap */
2382                 log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
2383                 while (part2--) {   /* from cur to end : part2 */
2384                         p[0] = '\0';
2385                         dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
2386                         printf("%s", p);
2387                         log = (void*)((size_t)log + buzzz_p->log_sz);
2388                 }
2389         }
2390
2391         log = (void*)buffer_p;
2392         while (part1--) {
2393                 p[0] = '\0';
2394                 dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
2395                 printf("%s", p);
2396                 log = (void*)((size_t)log + buzzz_p->log_sz);
2397         }
2398
2399         printf("%s: buzzz_dump done.\n", __FUNCTION__);
2400 }
2401
2402 int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
2403 {
2404         buzzz_t * buzzz_p = NULL;
2405         void * buffer_p = NULL;
2406         char * page_p = NULL;
2407         pciedev_shared_t *sh;
2408         int ret = 0;
2409
2410         if (bus->dhd->busstate != DHD_BUS_DATA) {
2411                 return BCME_UNSUPPORTED;
2412         }
2413         if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
2414                 printf("%s: Page memory allocation failure\n", __FUNCTION__);
2415                 goto done;
2416         }
2417         if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(buzzz_t))) == NULL) {
2418                 printf("%s: Buzzz memory allocation failure\n", __FUNCTION__);
2419                 goto done;
2420         }
2421
2422         ret = dhdpcie_readshared(bus);
2423         if (ret < 0) {
2424                 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
2425                 goto done;
2426         }
2427
2428         sh = bus->pcie_sh;
2429
2430         DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzzz));
2431
2432         if (sh->buzzz != 0U) {  /* Fetch and display dongle BUZZZ Trace */
2433                 dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzzz,
2434                                      (uint8 *)buzzz_p, sizeof(buzzz_t));
2435                 if (buzzz_p->count == 0) {
2436                         printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__);
2437                         goto done;
2438                 }
2439                 if (buzzz_p->counters != 3) { /* 3 counters for CR4 */
2440                         printf("%s: Counters<%u> mismatch\n", __FUNCTION__, buzzz_p->counters);
2441                         goto done;
2442                 }
2443                 /* Allocate memory for trace buffer and format strings */
2444                 buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
2445                 if (buffer_p == NULL) {
2446                         printf("%s: Buffer memory allocation failure\n", __FUNCTION__);
2447                         goto done;
2448                 }
2449                 /* Fetch the trace and format strings */
2450                 dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log,   /* Trace */
2451                                      (uint8 *)buffer_p, buzzz_p->buffer_sz);
2452                 /* Process and display the trace using formatted output */
2453                 printf("%s: <#cycle> <#instruction> <#ctr3> <event information>\n", __FUNCTION__);
2454                 dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
2455                 printf("%s: ----- End of dongle BUZZZ Trace -----\n\n", __FUNCTION__);
2456                 MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
2457         }
2458
2459 done:
2460
2461         if (page_p)   MFREE(bus->dhd->osh, page_p, 4096);
2462         if (buzzz_p)  MFREE(bus->dhd->osh, buzzz_p, sizeof(buzzz_t));
2463         if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
2464
2465         return BCME_OK;
2466 }
2467 #endif /* BCM_BUZZZ */
2468
2469 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
2470         ((sih)->buscoretype == PCIE2_CORE_ID))
2471
2472 static bool
2473 pcie2_mdiosetblock(dhd_bus_t *bus, uint blk)
2474 {
2475         uint mdiodata, mdioctrl, i = 0;
2476         uint pcie_serdes_spinwait = 200;
2477
2478         mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF);
2479         mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE;
2480
2481         si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdioctrl);
2482         si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, mdiodata);
2483
2484         OSL_DELAY(10);
2485         /* retry till the transaction is complete */
2486         while (i < pcie_serdes_spinwait) {
2487                 uint mdioctrl_read = si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA,
2488                         0, 0);
2489                 if (!(mdioctrl_read & MDIODATA2_DONE)) {
2490                         break;
2491                 }
2492                 OSL_DELAY(1000);
2493                 i++;
2494         }
2495
2496         if (i >= pcie_serdes_spinwait) {
2497                 DHD_ERROR(("%s: pcie_mdiosetblock: timed out\n", __FUNCTION__));
2498                 return FALSE;
2499         }
2500
2501         return TRUE;
2502 }
2503
2504
2505 static int
2506 pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val,
2507         bool slave_bypass)
2508 {
2509         uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl;
2510         uint32 reg32;
2511
2512         pcie2_mdiosetblock(bus, physmedia);
2513
2514         /* enable mdio access to SERDES */
2515         mdio_ctrl = MDIOCTL2_DIVISOR_VAL;
2516         mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF);
2517
2518         if (slave_bypass)
2519                 mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS;
2520
2521         if (!write)
2522                 mdio_ctrl |= MDIOCTL2_READ;
2523
2524         si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl);
2525
2526         if (write) {
2527                 reg32 =  PCIE2_MDIO_WR_DATA;
2528                 si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0,
2529                         *val | MDIODATA2_DONE);
2530         }
2531         else
2532                 reg32 =  PCIE2_MDIO_RD_DATA;
2533
2534         /* retry till the transaction is complete */
2535         while (i < pcie_serdes_spinwait) {
2536                 uint done_val =  si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0);
2537                 if (!(done_val & MDIODATA2_DONE)) {
2538                         if (!write) {
2539                                 *val = si_corereg(bus->sih, bus->sih->buscoreidx,
2540                                         PCIE2_MDIO_RD_DATA, 0, 0);
2541                                 *val = *val & MDIODATA2_MASK;
2542                         }
2543                         return 0;
2544                 }
2545                 OSL_DELAY(1000);
2546                 i++;
2547         }
2548         return -1;
2549 }
2550
2551 int
2552 dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
2553 {
2554         dhd_bus_t *bus = dhdp->bus;
2555         int bcmerror = 0;
2556 #ifdef CONFIG_ARCH_MSM
2557         int retry = POWERUP_MAX_RETRY;
2558 #endif /* CONFIG_ARCH_MSM */
2559
2560         if (dhd_download_fw_on_driverload) {
2561                 bcmerror = dhd_bus_start(dhdp);
2562         } else {
2563                 if (flag == TRUE) { /* Turn off WLAN */
2564                         /* Removing Power */
2565                         DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
2566                         bus->dhd->up = FALSE;
2567                         if (bus->dhd->busstate != DHD_BUS_DOWN) {
2568                                 if (bus->intr) {
2569                                         dhdpcie_bus_intr_disable(bus);
2570                                         dhdpcie_free_irq(bus);
2571                                 }
2572 #ifdef BCMPCIE_OOB_HOST_WAKE
2573                                 /* Clean up any pending host wake IRQ */
2574                                 dhd_bus_oob_intr_set(bus->dhd, FALSE);
2575                                 dhd_bus_oob_intr_unregister(bus->dhd);
2576 #endif /* BCMPCIE_OOB_HOST_WAKE */
2577                                 dhd_os_wd_timer(dhdp, 0);
2578                                 dhd_bus_stop(bus, TRUE);
2579                                 dhd_prot_clear(dhdp);
2580                                 dhd_clear(dhdp);
2581                                 dhd_bus_release_dongle(bus);
2582                                 dhdpcie_bus_free_resource(bus);
2583                                 bcmerror = dhdpcie_bus_disable_device(bus);
2584                                 if (bcmerror) {
2585                                         DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
2586                                                 __FUNCTION__, bcmerror));
2587                                         goto done;
2588                                 }
2589 #ifdef CONFIG_ARCH_MSM
2590                                 bcmerror = dhdpcie_bus_clock_stop(bus);
2591                                 if (bcmerror) {
2592                                         DHD_ERROR(("%s: host clock stop failed: %d\n",
2593                                                 __FUNCTION__, bcmerror));
2594                                         goto done;
2595                                 }
2596 #endif /* CONFIG_ARCH_MSM */
2597                                 bus->dhd->busstate = DHD_BUS_DOWN;
2598                         } else {
2599                                 if (bus->intr) {
2600                                         dhdpcie_bus_intr_disable(bus);
2601                                         dhdpcie_free_irq(bus);
2602                                 }
2603 #ifdef BCMPCIE_OOB_HOST_WAKE
2604                                 /* Clean up any pending host wake IRQ */
2605                                 dhd_bus_oob_intr_set(bus->dhd, FALSE);
2606                                 dhd_bus_oob_intr_unregister(bus->dhd);
2607 #endif /* BCMPCIE_OOB_HOST_WAKE */
2608                                 dhd_prot_clear(dhdp);
2609                                 dhd_clear(dhdp);
2610                                 dhd_bus_release_dongle(bus);
2611                                 dhdpcie_bus_free_resource(bus);
2612                                 bcmerror = dhdpcie_bus_disable_device(bus);
2613                                 if (bcmerror) {
2614                                         DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
2615                                                 __FUNCTION__, bcmerror));
2616                                         goto done;
2617                                 }
2618
2619 #ifdef CONFIG_ARCH_MSM
2620                                 bcmerror = dhdpcie_bus_clock_stop(bus);
2621                                 if (bcmerror) {
2622                                         DHD_ERROR(("%s: host clock stop failed: %d\n",
2623                                                 __FUNCTION__, bcmerror));
2624                                         goto done;
2625                                 }
2626 #endif  /* CONFIG_ARCH_MSM */
2627                         }
2628
2629                         bus->dhd->dongle_reset = TRUE;
2630                         DHD_ERROR(("%s:  WLAN OFF Done\n", __FUNCTION__));
2631
2632                 } else { /* Turn on WLAN */
2633                         if (bus->dhd->busstate == DHD_BUS_DOWN) {
2634                                 /* Powering On */
2635                                 DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
2636 #ifdef CONFIG_ARCH_MSM
2637                                 while (--retry) {
2638                                         bcmerror = dhdpcie_bus_clock_start(bus);
2639                                         if (!bcmerror) {
2640                                                 DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
2641                                                         __FUNCTION__));
2642                                                 break;
2643                                         }
2644                                         else
2645                                                 OSL_SLEEP(10);
2646                                 }
2647
2648                                 if (bcmerror && !retry) {
2649                                         DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
2650                                                 __FUNCTION__, bcmerror));
2651                                         goto done;
2652                                 }
2653 #endif /* CONFIG_ARCH_MSM */
2654                                 bcmerror = dhdpcie_bus_enable_device(bus);
2655                                 if (bcmerror) {
2656                                         DHD_ERROR(("%s: host configuration restore failed: %d\n",
2657                                                 __FUNCTION__, bcmerror));
2658                                         goto done;
2659                                 }
2660
2661                                 bcmerror = dhdpcie_bus_alloc_resource(bus);
2662                                 if (bcmerror) {
2663                                         DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
2664                                                 __FUNCTION__, bcmerror));
2665                                         goto done;
2666                                 }
2667
2668                                 bcmerror = dhdpcie_bus_dongle_attach(bus);
2669                                 if (bcmerror) {
2670                                         DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
2671                                                 __FUNCTION__, bcmerror));
2672                                         goto done;
2673                                 }
2674
2675                                 bcmerror = dhd_bus_request_irq(bus);
2676                                 if (bcmerror) {
2677                                         DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
2678                                                 __FUNCTION__, bcmerror));
2679                                         goto done;
2680                                 }
2681
2682                                 bus->dhd->dongle_reset = FALSE;
2683
2684                                 bcmerror = dhd_bus_start(dhdp);
2685                                 if (bcmerror) {
2686                                         DHD_ERROR(("%s: dhd_bus_start: %d\n",
2687                                                 __FUNCTION__, bcmerror));
2688                                         goto done;
2689                                 }
2690
2691                                 bus->dhd->up = TRUE;
2692                                 DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
2693                         } else {
2694                                 DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
2695                                 goto done;
2696                         }
2697                 }
2698         }
2699 done:
2700         if (bcmerror)
2701                 bus->dhd->busstate = DHD_BUS_DOWN;
2702
2703         return bcmerror;
2704 }
2705
2706 static int
2707 dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
2708                 void *params, int plen, void *arg, int len, int val_size)
2709 {
2710         int bcmerror = 0;
2711         int32 int_val = 0;
2712         int32 int_val2 = 0;
2713         int32 int_val3 = 0;
2714         bool bool_val = 0;
2715
2716         DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
2717                    __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
2718
2719         if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
2720                 goto exit;
2721
2722         if (plen >= (int)sizeof(int_val))
2723                 bcopy(params, &int_val, sizeof(int_val));
2724
2725         if (plen >= (int)sizeof(int_val) * 2)
2726                 bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
2727
2728         if (plen >= (int)sizeof(int_val) * 3)
2729                 bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
2730
2731         bool_val = (int_val != 0) ? TRUE : FALSE;
2732
2733         /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
2734         if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
2735                                         actionid == IOV_GVAL(IOV_DEVRESET))) {
2736                 bcmerror = BCME_NOTREADY;
2737                 goto exit;
2738         }
2739
2740         switch (actionid) {
2741
2742
2743         case IOV_SVAL(IOV_VARS):
2744                 bcmerror = dhdpcie_downloadvars(bus, arg, len);
2745                 break;
2746
2747         case IOV_SVAL(IOV_PCIEREG):
2748                 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
2749                         int_val);
2750                 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
2751                         int_val2);
2752                 break;
2753
2754         case IOV_GVAL(IOV_PCIEREG):
2755                 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
2756                         int_val);
2757                 int_val = si_corereg(bus->sih, bus->sih->buscoreidx,
2758                         OFFSETOF(sbpcieregs_t, configdata), 0, 0);
2759                 bcopy(&int_val, arg, val_size);
2760                 break;
2761
2762         case IOV_GVAL(IOV_BAR0_SECWIN_REG):
2763         {
2764                 uint32 cur_base, base;
2765                 uchar *bar0;
2766                 volatile uint32 *offset;
2767                 /* set the bar0 secondary window to this */
2768                 /* write the register value */
2769                 cur_base = dhdpcie_bus_cfg_read_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint));
2770                 base = int_val & 0xFFFFF000;
2771                 dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN,  sizeof(uint32), base);
2772                 bar0 = (uchar *)bus->regs;
2773                 offset = (uint32 *)(bar0 + 0x4000 + (int_val & 0xFFF));
2774                 int_val = *offset;
2775                 bcopy(&int_val, arg, val_size);
2776                 dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), cur_base);
2777         }
2778                 break;
2779         case IOV_SVAL(IOV_BAR0_SECWIN_REG):
2780         {
2781                 uint32 cur_base, base;
2782                 uchar *bar0;
2783                 volatile uint32 *offset;
2784                 /* set the bar0 secondary window to this */
2785                 /* write the register value */
2786                 cur_base = dhdpcie_bus_cfg_read_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint));
2787                 base = int_val & 0xFFFFF000;
2788                 dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN,  sizeof(uint32), base);
2789                 bar0 = (uchar *)bus->regs;
2790                 offset = (uint32 *)(bar0 + 0x4000 + (int_val & 0xFFF));
2791                 *offset = int_val2;
2792                 bcopy(&int_val2, arg, val_size);
2793                 dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), cur_base);
2794         }
2795                 break;
2796
2797         case IOV_SVAL(IOV_PCIECOREREG):
2798                 si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2);
2799                 break;
2800         case IOV_GVAL(IOV_SBREG):
2801         {
2802                 sdreg_t sdreg;
2803                 uint32 addr, coreidx;
2804
2805                 bcopy(params, &sdreg, sizeof(sdreg));
2806
2807                 addr = sdreg.offset;
2808                 coreidx =  (addr & 0xF000) >> 12;
2809
2810                 int_val = si_corereg(bus->sih, coreidx, (addr & 0xFFF), 0, 0);
2811                 bcopy(&int_val, arg, sizeof(int32));
2812                 break;
2813         }
2814
2815         case IOV_SVAL(IOV_SBREG):
2816         {
2817                 sdreg_t sdreg;
2818                 uint32 addr, coreidx;
2819
2820                 bcopy(params, &sdreg, sizeof(sdreg));
2821
2822                 addr = sdreg.offset;
2823                 coreidx =  (addr & 0xF000) >> 12;
2824
2825                 si_corereg(bus->sih, coreidx, (addr & 0xFFF), ~0, sdreg.value);
2826
2827                 break;
2828         }
2829
2830         case IOV_GVAL(IOV_PCIESERDESREG):
2831         {
2832                 uint val;
2833                 if (!PCIE_GEN2(bus->sih)) {
2834                         DHD_ERROR(("%s: supported only in pcie gen2\n", __FUNCTION__));
2835                         bcmerror = BCME_ERROR;
2836                         break;
2837                 }
2838                 if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) {
2839                         bcopy(&val, arg, sizeof(int32));
2840                 }
2841                 else {
2842                         DHD_ERROR(("%s: pcie2_mdioop failed.\n", __FUNCTION__));
2843                         bcmerror = BCME_ERROR;
2844                 }
2845                 break;
2846         }
2847         case IOV_SVAL(IOV_PCIESERDESREG):
2848                 if (!PCIE_GEN2(bus->sih)) {
2849                         DHD_ERROR(("%s: supported only in pcie gen2\n", __FUNCTION__));
2850                         bcmerror = BCME_ERROR;
2851                         break;
2852                 }
2853                 if (pcie2_mdioop(bus, int_val, int_val2, TRUE, &int_val3, FALSE)) {
2854                         DHD_ERROR(("%s: pcie2_mdioop failed.\n", __FUNCTION__));
2855                         bcmerror = BCME_ERROR;
2856                 }
2857                 break;
2858         case IOV_GVAL(IOV_PCIECOREREG):
2859                 int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0);
2860                 bcopy(&int_val, arg, val_size);
2861                 break;
2862
2863         case IOV_SVAL(IOV_PCIECFGREG):
2864                 OSL_PCI_WRITE_CONFIG(bus->osh, int_val, 4, int_val2);
2865                 break;
2866
2867         case IOV_GVAL(IOV_PCIECFGREG):
2868                 int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4);
2869                 bcopy(&int_val, arg, val_size);
2870                 break;
2871
2872         case IOV_SVAL(IOV_PCIE_LPBK):
2873                 bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
2874                 break;
2875
2876         case IOV_SVAL(IOV_PCIE_DMAXFER):
2877                 bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3);
2878                 break;
2879
2880         case IOV_GVAL(IOV_PCIE_SUSPEND):
2881                 int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
2882                 bcopy(&int_val, arg, val_size);
2883                 break;
2884
2885         case IOV_SVAL(IOV_PCIE_SUSPEND):
2886                 dhdpcie_bus_suspend(bus, bool_val);
2887                 break;
2888
2889         case IOV_GVAL(IOV_MEMSIZE):
2890                 int_val = (int32)bus->ramsize;
2891                 bcopy(&int_val, arg, val_size);
2892                 break;
2893         case IOV_SVAL(IOV_MEMBYTES):
2894         case IOV_GVAL(IOV_MEMBYTES):
2895         {
2896                 uint32 address;         /* absolute backplane address */
2897                 uint size, dsize;
2898                 uint8 *data;
2899
2900                 bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
2901
2902                 ASSERT(plen >= 2*sizeof(int));
2903
2904                 address = (uint32)int_val;
2905                 bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
2906                 size = (uint)int_val;
2907
2908                 /* Do some validation */
2909                 dsize = set ? plen - (2 * sizeof(int)) : len;
2910                 if (dsize < size) {
2911                         DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
2912                                    __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
2913                         bcmerror = BCME_BADARG;
2914                         break;
2915                 }
2916
2917                 DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n dsize %d ", __FUNCTION__,
2918                           (set ? "write" : "read"), size, address, dsize));
2919
2920                 /* check if CR4 */
2921                 if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
2922                         /* if address is 0, store the reset instruction to be written in 0 */
2923                         if (set && address == bus->dongle_ram_base) {
2924                                 bus->resetinstr = *(((uint32*)params) + 2);
2925                         }
2926                 } else {
2927                 /* If we know about SOCRAM, check for a fit */
2928                 if ((bus->orig_ramsize) &&
2929                     ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
2930                 {
2931                         uint8 enable, protect, remap;
2932                         si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
2933                         if (!enable || protect) {
2934                                 DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
2935                                         __FUNCTION__, bus->orig_ramsize, size, address));
2936                                 DHD_ERROR(("%s: socram enable %d, protect %d\n",
2937                                         __FUNCTION__, enable, protect));
2938                                 bcmerror = BCME_BADARG;
2939                                 break;
2940                         }
2941
2942                         if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) {
2943                                 uint32 devramsize = si_socdevram_size(bus->sih);
2944                                 if ((address < SOCDEVRAM_ARM_ADDR) ||
2945                                         (address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) {
2946                                         DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
2947                                                 __FUNCTION__, address, size));
2948                                         DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
2949                                                 __FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize));
2950                                         bcmerror = BCME_BADARG;
2951                                         break;
2952                                 }
2953                                 /* move it such that address is real now */
2954                                 address -= SOCDEVRAM_ARM_ADDR;
2955                                 address += SOCDEVRAM_BP_ADDR;
2956                                 DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
2957                                         __FUNCTION__, (set ? "write" : "read"), size, address));
2958                         } else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) {
2959                                 /* Can not access remap region while devram remap bit is set
2960                                  * ROM content would be returned in this case
2961                                  */
2962                                 DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n",
2963                                         __FUNCTION__, address));
2964                                 bcmerror = BCME_ERROR;
2965                                 break;
2966                         }
2967                 }
2968                 }
2969
2970                 /* Generate the actual data pointer */
2971                 data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
2972
2973                 /* Call to do the transfer */
2974                 bcmerror = dhdpcie_bus_membytes(bus, set, address, data, size);
2975
2976                 break;
2977         }
2978
2979 #ifdef BCM_BUZZZ
2980         case IOV_GVAL(IOV_BUZZZ_DUMP):
2981                 bcmerror = dhd_buzzz_dump_dngl(bus);
2982                 break;
2983 #endif /* BCM_BUZZZ */
2984
2985         case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
2986                 bcmerror = dhdpcie_bus_download_state(bus, bool_val);
2987                 break;
2988
2989         case IOV_GVAL(IOV_RAMSIZE):
2990                 int_val = (int32)bus->ramsize;
2991                 bcopy(&int_val, arg, val_size);
2992                 break;
2993
2994         case IOV_GVAL(IOV_RAMSTART):
2995                 int_val = (int32)bus->dongle_ram_base;
2996                 bcopy(&int_val, arg, val_size);
2997                 break;
2998
2999         case IOV_GVAL(IOV_CC_NVMSHADOW):
3000         {
3001                 struct bcmstrbuf dump_b;
3002
3003                 bcm_binit(&dump_b, arg, len);
3004                 bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
3005                 break;
3006         }
3007
3008         case IOV_GVAL(IOV_SLEEP_ALLOWED):
3009                 bool_val = bus->sleep_allowed;
3010                 bcopy(&bool_val, arg, val_size);
3011                 break;
3012
3013         case IOV_SVAL(IOV_SLEEP_ALLOWED):
3014                 bus->sleep_allowed = bool_val;
3015                 break;
3016
3017         case IOV_GVAL(IOV_DONGLEISOLATION):
3018                 int_val = bus->dhd->dongle_isolation;
3019                 bcopy(&int_val, arg, val_size);
3020                 break;
3021
3022         case IOV_SVAL(IOV_DONGLEISOLATION):
3023                 bus->dhd->dongle_isolation = bool_val;
3024                 break;
3025
3026         case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
3027                 int_val = bus->ltrsleep_on_unload;
3028                 bcopy(&int_val, arg, val_size);
3029                 break;
3030
3031         case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
3032                 bus->ltrsleep_on_unload = bool_val;
3033                 break;
3034
3035         case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
3036         {
3037                 struct bcmstrbuf dump_b;
3038                 bcm_binit(&dump_b, arg, len);
3039                 bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
3040                 break;
3041         }
3042         case IOV_GVAL(IOV_DMA_RINGINDICES):
3043         {       int h2d_support, d2h_support;
3044
3045                 d2h_support = DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0;
3046                 h2d_support = DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0;
3047                 int_val = d2h_support | (h2d_support << 1);
3048                 bcopy(&int_val, arg, val_size);
3049                 break;
3050         }
3051         case IOV_SVAL(IOV_DMA_RINGINDICES):
3052                 /* Can change it only during initialization/FW download */
3053                 if (bus->dhd->busstate == DHD_BUS_DOWN) {
3054                         if ((int_val > 3) || (int_val < 0)) {
3055                                 DHD_ERROR(("%s: Bad argument. Possible values: 0, 1, 2 & 3\n", __FUNCTION__));
3056                                 bcmerror = BCME_BADARG;
3057                         } else {
3058                                 bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
3059                                 bus->dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
3060                         }
3061                 } else {
3062                         DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
3063                                 __FUNCTION__));
3064                         bcmerror = BCME_NOTDOWN;
3065                 }
3066                 break;
3067
3068         case IOV_GVAL(IOV_RX_METADATALEN):
3069                 int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
3070                 bcopy(&int_val, arg, val_size);
3071                 break;
3072
3073                 case IOV_SVAL(IOV_RX_METADATALEN):
3074                 if (int_val > 64) {
3075                         bcmerror = BCME_BUFTOOLONG;
3076                         break;
3077                 }
3078                 dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
3079                 break;
3080
3081         case IOV_SVAL(IOV_TXP_THRESHOLD):
3082                 dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
3083                 break;
3084
3085         case IOV_GVAL(IOV_TXP_THRESHOLD):
3086                 int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
3087                 bcopy(&int_val, arg, val_size);
3088                 break;
3089
3090         case IOV_SVAL(IOV_DB1_FOR_MB):
3091                 if (int_val)
3092                         bus->db1_for_mb = TRUE;
3093                 else
3094                         bus->db1_for_mb = FALSE;
3095                 break;
3096
3097         case IOV_GVAL(IOV_DB1_FOR_MB):
3098                 if (bus->db1_for_mb)
3099                         int_val = 1;
3100                 else
3101                         int_val = 0;
3102                 bcopy(&int_val, arg, val_size);
3103                 break;
3104
3105         case IOV_GVAL(IOV_TX_METADATALEN):
3106                 int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
3107                 bcopy(&int_val, arg, val_size);
3108                 break;
3109
3110         case IOV_SVAL(IOV_TX_METADATALEN):
3111                 if (int_val > 64) {
3112                         bcmerror = BCME_BUFTOOLONG;
3113                         break;
3114                 }
3115                 dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
3116                 break;
3117
3118         case IOV_GVAL(IOV_FLOW_PRIO_MAP):
3119                 int_val = bus->dhd->flow_prio_map_type;
3120                 bcopy(&int_val, arg, val_size);
3121                 break;
3122
3123         case IOV_SVAL(IOV_FLOW_PRIO_MAP):
3124                 int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
3125                 bcopy(&int_val, arg, val_size);
3126                 break;
3127
3128         case IOV_GVAL(IOV_TXBOUND):
3129                 int_val = (int32)dhd_txbound;
3130                 bcopy(&int_val, arg, val_size);
3131                 break;
3132
3133         case IOV_SVAL(IOV_TXBOUND):
3134                 dhd_txbound = (uint)int_val;
3135                 break;
3136
3137         case IOV_GVAL(IOV_RXBOUND):
3138                 int_val = (int32)dhd_rxbound;
3139                 bcopy(&int_val, arg, val_size);
3140                 break;
3141
3142         case IOV_SVAL(IOV_RXBOUND):
3143                 dhd_rxbound = (uint)int_val;
3144                 break;
3145
3146         default:
3147                 bcmerror = BCME_UNSUPPORTED;
3148                 break;
3149         }
3150
3151 exit:
3152         return bcmerror;
3153 }
3154
3155 /* Transfers bytes from host to dongle using pio mode */
3156 static int
3157 dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 len)
3158 {
3159         if (bus->dhd == NULL) {
3160                 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
3161                 return 0;
3162         }
3163         if (bus->dhd->prot == NULL) {
3164                 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
3165                 return 0;
3166         }
3167         if (bus->dhd->busstate != DHD_BUS_DATA) {
3168                 DHD_ERROR(("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
3169                 return 0;
3170         }
3171         dhdmsgbuf_lpbk_req(bus->dhd, len);
3172         return 0;
3173 }
3174
3175 void
3176 dhd_bus_set_suspend_resume(dhd_pub_t *dhdp, bool state)
3177 {
3178         struct  dhd_bus *bus = dhdp->bus;
3179         if (bus) {
3180                 dhdpcie_bus_suspend(bus, state);
3181         }
3182 }
3183
3184 int
3185 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
3186 {
3187
3188         int timeleft;
3189         bool pending;
3190         int rc = 0;
3191
3192         if (bus->dhd == NULL) {
3193                 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
3194                 return BCME_ERROR;
3195         }
3196         if (bus->dhd->prot == NULL) {
3197                 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
3198                 return BCME_ERROR;
3199         }
3200         if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) {
3201                 DHD_ERROR(("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
3202                 return BCME_ERROR;
3203         }
3204         if (bus->dhd->dongle_reset)
3205                 return -EIO;
3206
3207         if (bus->suspended == state) /* Set to same state */
3208                 return BCME_OK;
3209
3210         if (state) {
3211                 bus->wait_for_d3_ack = 0;
3212                 bus->suspended = TRUE;
3213                 bus->dhd->busstate = DHD_BUS_SUSPEND;
3214                 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
3215                 dhd_os_set_ioctl_resp_timeout(DEFAULT_IOCTL_RESP_TIMEOUT);
3216                 dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
3217                 timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->wait_for_d3_ack, &pending);
3218                 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
3219                 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
3220                 if (bus->wait_for_d3_ack) {
3221                         /* Got D3 Ack. Suspend the bus */
3222                         if (dhd_os_check_wakelock_all(bus->dhd)) {
3223                                 DHD_ERROR(("%s: Suspend failed because of wakelock\n", __FUNCTION__));
3224                                 bus->dev->current_state = PCI_D3hot;
3225                                 pci_set_master(bus->dev);
3226                                 rc = pci_set_power_state(bus->dev, PCI_D0);
3227                                 if (rc) {
3228                                         DHD_ERROR(("%s: pci_set_power_state failed:"
3229                                                 " current_state[%d], ret[%d]\n",
3230                                                 __FUNCTION__, bus->dev->current_state, rc));
3231                                 }
3232                                 bus->suspended = FALSE;
3233                                 bus->dhd->busstate = DHD_BUS_DATA;
3234                                 rc = BCME_ERROR;
3235                         } else {
3236                                 dhdpcie_bus_intr_disable(bus);
3237                                 rc = dhdpcie_pci_suspend_resume(bus, state);
3238                         }
3239                 } else if (timeleft == 0) {
3240                         DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
3241                         bus->dev->current_state = PCI_D3hot;
3242                         pci_set_master(bus->dev);
3243                         rc = pci_set_power_state(bus->dev, PCI_D0);
3244                         if (rc) {
3245                                 DHD_ERROR(("%s: pci_set_power_state failed:"
3246                                         " current_state[%d], ret[%d]\n",
3247                                         __FUNCTION__, bus->dev->current_state, rc));
3248                         }
3249                         bus->suspended = FALSE;
3250                         bus->dhd->busstate = DHD_BUS_DATA;
3251                         rc = -ETIMEDOUT;
3252                 }
3253                 bus->wait_for_d3_ack = 1;
3254         } else {
3255                 /* Resume */
3256 #ifdef BCMPCIE_OOB_HOST_WAKE
3257                 DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
3258 #endif /* BCMPCIE_OOB_HOST_WAKE */
3259                 rc = dhdpcie_pci_suspend_resume(bus, state);
3260                 bus->suspended = FALSE;
3261                 bus->dhd->busstate = DHD_BUS_DATA;
3262                 dhdpcie_bus_intr_enable(bus);
3263         }
3264         return rc;
3265 }
3266
3267 /* Transfers bytes from host to dongle and to host again using DMA */
3268 static int
3269 dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay)
3270 {
3271         if (bus->dhd == NULL) {
3272                 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
3273                 return BCME_ERROR;
3274         }
3275         if (bus->dhd->prot == NULL) {
3276                 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
3277                 return BCME_ERROR;
3278         }
3279         if (bus->dhd->busstate != DHD_BUS_DATA) {
3280                 DHD_ERROR(("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
3281                 return BCME_ERROR;
3282         }
3283
3284         if (len < 5 || len > 4194296) {
3285                 DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__));
3286                 return BCME_ERROR;
3287         }
3288         return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay);
3289 }
3290
3291
3292
3293 static int
3294 dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
3295 {
3296         int bcmerror = 0;
3297         uint32 *cr4_regs;
3298
3299         if (!bus->sih)
3300                 return BCME_ERROR;
3301         /* To enter download state, disable ARM and reset SOCRAM.
3302          * To exit download state, simply reset ARM (default is RAM boot).
3303          */
3304         if (enter) {
3305                 bus->alp_only = TRUE;
3306
3307                 /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
3308                 cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
3309
3310                 if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
3311                     !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
3312                         DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
3313                         bcmerror = BCME_ERROR;
3314                         goto fail;
3315                 }
3316
3317                 if (cr4_regs == NULL) { /* no CR4 present on chip */
3318                         si_core_disable(bus->sih, 0);
3319
3320                         if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
3321                                 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
3322                                 bcmerror = BCME_ERROR;
3323                                 goto fail;
3324                         }
3325
3326                         si_core_reset(bus->sih, 0, 0);
3327
3328
3329                         /* Clear the top bit of memory */
3330                         if (bus->ramsize) {
3331                                 uint32 zeros = 0;
3332                                 if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
3333                                                      (uint8*)&zeros, 4) < 0) {
3334                                         bcmerror = BCME_ERROR;
3335                                         goto fail;
3336                                 }
3337                         }
3338                 } else {
3339                         /* For CR4,
3340                          * Halt ARM
3341                          * Remove ARM reset
3342                          * Read RAM base address [0x18_0000]
3343                          * [next] Download firmware
3344                          * [done at else] Populate the reset vector
3345                          * [done at else] Remove ARM halt
3346                         */
3347                         /* Halt ARM & remove reset */
3348                         si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
3349                         if (bus->sih->chip == BCM43602_CHIP_ID) {
3350                                 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
3351                                 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
3352                                 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
3353                                 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
3354                         }
3355                         /* reset last 4 bytes of RAM address. to be used for shared area */
3356                         dhdpcie_init_shared_addr(bus);
3357                 }
3358         } else {
3359                 if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
3360                         if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
3361                                 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
3362                                 bcmerror = BCME_ERROR;
3363                                 goto fail;
3364                         }
3365
3366                         if (!si_iscoreup(bus->sih)) {
3367                                 DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
3368                                 bcmerror = BCME_ERROR;
3369                                 goto fail;
3370                         }
3371
3372
3373                         /* Enable remap before ARM reset but after vars.
3374                          * No backplane access in remap mode
3375                          */
3376
3377                         if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
3378                             !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
3379                                 DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
3380                                 bcmerror = BCME_ERROR;
3381                                 goto fail;
3382                         }
3383
3384
3385                         if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
3386                             !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
3387                                 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
3388                                 bcmerror = BCME_ERROR;
3389                                 goto fail;
3390                         }
3391                 } else {
3392                         if (bus->sih->chip == BCM43602_CHIP_ID) {
3393                                 /* Firmware crashes on SOCSRAM access when core is in reset */
3394                                 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
3395                                         DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
3396                                                 __FUNCTION__));
3397                                         bcmerror = BCME_ERROR;
3398                                         goto fail;
3399                                 }
3400                                 si_core_reset(bus->sih, 0, 0);
3401                                 si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
3402                         }
3403
3404                         /* write vars */
3405                         if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
3406                                 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
3407                                 goto fail;
3408                         }
3409
3410
3411                         /* switch back to arm core again */
3412                         if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
3413                                 DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
3414                                 bcmerror = BCME_ERROR;
3415                                 goto fail;
3416                         }
3417
3418                         /* write address 0 with reset instruction */
3419                         bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
3420                                 (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
3421
3422                         /* now remove reset and halt and continue to run CR4 */
3423                 }
3424
3425                 si_core_reset(bus->sih, 0, 0);
3426
3427                 /* Allow HT Clock now that the ARM is running. */
3428                 bus->alp_only = FALSE;
3429
3430                 bus->dhd->busstate = DHD_BUS_LOAD;
3431         }
3432
3433 fail:
3434         /* Always return to PCIE core */
3435         si_setcore(bus->sih, PCIE2_CORE_ID, 0);
3436
3437         return bcmerror;
3438 }
3439
3440 static int
3441 dhdpcie_bus_write_vars(dhd_bus_t *bus)
3442 {
3443         int bcmerror = 0;
3444         uint32 varsize, phys_size;
3445         uint32 varaddr;
3446         uint8 *vbuffer;
3447         uint32 varsizew;
3448 #ifdef DHD_DEBUG
3449         uint8 *nvram_ularray;
3450 #endif /* DHD_DEBUG */
3451
3452         /* Even if there are no vars are to be written, we still need to set the ramsize. */
3453         varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
3454         varaddr = (bus->ramsize - 4) - varsize;
3455
3456         varaddr += bus->dongle_ram_base;
3457
3458         if (bus->vars) {
3459
3460                 vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
3461                 if (!vbuffer)
3462                         return BCME_NOMEM;
3463
3464                 bzero(vbuffer, varsize);
3465                 bcopy(bus->vars, vbuffer, bus->varsz);
3466                 /* Write the vars list */
3467                 bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
3468
3469                 /* Implement read back and verify later */
3470 #ifdef DHD_DEBUG
3471                 /* Verify NVRAM bytes */
3472                 DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize));
3473                 nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
3474                 if (!nvram_ularray)
3475                         return BCME_NOMEM;
3476
3477                 /* Upload image to verify downloaded contents. */
3478                 memset(nvram_ularray, 0xaa, varsize);
3479
3480                 /* Read the vars list to temp buffer for comparison */
3481                 bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
3482                 if (bcmerror) {
3483                                 DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
3484                                         __FUNCTION__, bcmerror, varsize, varaddr));
3485                 }
3486
3487                 /* Compare the org NVRAM with the one read from RAM */
3488                 if (memcmp(vbuffer, nvram_ularray, varsize)) {
3489                         DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
3490                 } else
3491                         DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
3492                         __FUNCTION__));
3493
3494                 MFREE(bus->dhd->osh, nvram_ularray, varsize);
3495 #endif /* DHD_DEBUG */
3496
3497                 MFREE(bus->dhd->osh, vbuffer, varsize);
3498         }
3499
3500         phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
3501
3502         phys_size += bus->dongle_ram_base;
3503
3504         /* adjust to the user specified RAM */
3505         DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__,
3506                 phys_size, bus->ramsize));
3507         DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__,
3508                 varaddr, varsize));
3509         varsize = ((phys_size - 4) - varaddr);
3510
3511         /*
3512          * Determine the length token:
3513          * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
3514          */
3515         if (bcmerror) {
3516                 varsizew = 0;
3517                 bus->nvram_csm = varsizew;
3518         } else {
3519                 varsizew = varsize / 4;
3520                 varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
3521                 bus->nvram_csm = varsizew;
3522                 varsizew = htol32(varsizew);
3523         }
3524
3525         DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew));
3526
3527         /* Write the length token to the last word */
3528         bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
3529                 (uint8*)&varsizew, 4);
3530
3531         return bcmerror;
3532 }
3533
3534 int
3535 dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
3536 {
3537         int bcmerror = BCME_OK;
3538
3539         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3540
3541         /* Basic sanity checks */
3542         if (bus->dhd->up) {
3543                 bcmerror = BCME_NOTDOWN;
3544                 goto err;
3545         }
3546         if (!len) {
3547                 bcmerror = BCME_BUFTOOSHORT;
3548                 goto err;
3549         }
3550
3551         /* Free the old ones and replace with passed variables */
3552         if (bus->vars)
3553                 MFREE(bus->dhd->osh, bus->vars, bus->varsz);
3554
3555         bus->vars = MALLOC(bus->dhd->osh, len);
3556         bus->varsz = bus->vars ? len : 0;
3557         if (bus->vars == NULL) {
3558                 bcmerror = BCME_NOMEM;
3559                 goto err;
3560         }
3561
3562         /* Copy the passed variables, which should include the terminating double-null */
3563         bcopy(arg, bus->vars, bus->varsz);
3564 err:
3565         return bcmerror;
3566 }
3567
3568 #ifndef BCMPCIE_OOB_HOST_WAKE
3569 /* loop through the capability list and see if the pcie capabilty exists */
3570 uint8
3571 dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
3572 {
3573         uint8 cap_id;
3574         uint8 cap_ptr = 0;
3575         uint8 byte_val;
3576
3577         /* check for Header type 0 */
3578         byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
3579         if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
3580                 DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
3581                 goto end;
3582         }
3583
3584         /* check if the capability pointer field exists */
3585         byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
3586         if (!(byte_val & PCI_CAPPTR_PRESENT)) {
3587                 DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
3588                 goto end;
3589         }
3590
3591         cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
3592         /* check if the capability pointer is 0x00 */
3593         if (cap_ptr == 0x00) {
3594                 DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
3595                 goto end;
3596         }
3597
3598         /* loop thr'u the capability list and see if the pcie capabilty exists */
3599
3600         cap_id = read_pci_cfg_byte(cap_ptr);
3601
3602         while (cap_id != req_cap_id) {
3603                 cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
3604                 if (cap_ptr == 0x00) break;
3605                 cap_id = read_pci_cfg_byte(cap_ptr);
3606         }
3607
3608 end:
3609         return cap_ptr;
3610 }
3611
3612 void
3613 dhdpcie_pme_active(osl_t *osh, bool enable)
3614 {
3615         uint8 cap_ptr;
3616         uint32 pme_csr;
3617
3618         cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
3619
3620         if (!cap_ptr) {
3621                 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
3622                 return;
3623         }
3624
3625         pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
3626         DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
3627
3628         pme_csr |= PME_CSR_PME_STAT;
3629         if (enable) {
3630                 pme_csr |= PME_CSR_PME_EN;
3631         } else {
3632                 pme_csr &= ~PME_CSR_PME_EN;
3633         }
3634
3635         OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
3636 }
3637 #endif /* BCMPCIE_OOB_HOST_WAKE */
3638
3639 /* Add bus dump output to a buffer */
3640 void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
3641 {
3642         uint16 flowid;
3643         flow_ring_node_t *flow_ring_node;
3644
3645         dhd_prot_print_info(dhdp, strbuf);
3646         for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
3647                 flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
3648                 if (flow_ring_node->active) {
3649                         bcm_bprintf(strbuf, "Flow:%d IF %d Prio %d  Qlen %d ",
3650                                 flow_ring_node->flowid, flow_ring_node->flow_info.ifindex,
3651                                 flow_ring_node->flow_info.tid, flow_ring_node->queue.len);
3652                         dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf);
3653                 }
3654         }
3655 }
3656
3657 static void
3658 dhd_update_txflowrings(dhd_pub_t *dhd)
3659 {
3660         dll_t *item, *next;
3661         flow_ring_node_t *flow_ring_node;
3662         struct dhd_bus *bus = dhd->bus;
3663
3664         for (item = dll_head_p(&bus->const_flowring);
3665                  !dll_end(&bus->const_flowring, item); item = next) {
3666                 next = dll_next_p(item);
3667
3668                 flow_ring_node = dhd_constlist_to_flowring(item);
3669                 dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
3670         }
3671 }
3672
3673
3674 /* Mailbox ringbell Function */
3675 static void
3676 dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
3677 {
3678         if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
3679                 (bus->sih->buscorerev == 4)) {
3680                 DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__));
3681                 return;
3682         }
3683         if (bus->db1_for_mb)  {
3684                 /* this is a pcie core register, not the config regsiter */
3685                 DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__));
3686                 si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
3687         }
3688         else {
3689                 DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__));
3690                 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
3691                 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
3692         }
3693 }
3694
3695 /* doorbell ring Function */
3696 void
3697 dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
3698 {
3699         if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
3700                 (bus->sih->buscorerev == 4)) {
3701                 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, PCIE_INTB, PCIE_INTB);
3702         } else {
3703                 /* this is a pcie core register, not the config regsiter */
3704                 DHD_INFO(("%s: writing a door bell to the device\n", __FUNCTION__));
3705                 si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox, ~0, 0x12345678);
3706         }
3707 }
3708
3709 static void
3710 dhd_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
3711 {
3712         W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
3713 }
3714
3715 static void
3716 dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
3717 {
3718         uint32 w;
3719         w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
3720         W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
3721 }
3722
3723 dhd_mb_ring_t
3724 dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
3725 {
3726         if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
3727                 (bus->sih->buscorerev == 4)) {
3728                 bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
3729                         PCIMailBoxInt);
3730                 if (bus->pcie_mb_intr_addr) {
3731                         bus->pcie_mb_intr_osh = si_osh(bus->sih);
3732                         return dhd_bus_ringbell_oldpcie;
3733                 }
3734         } else {
3735                 bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
3736                         PCIH2D_MailBox);
3737                 if (bus->pcie_mb_intr_addr) {
3738                         bus->pcie_mb_intr_osh = si_osh(bus->sih);
3739                         return dhd_bus_ringbell_fast;
3740                 }
3741         }
3742         return dhd_bus_ringbell;
3743 }
3744
3745 bool BCMFASTPATH
3746 dhd_bus_dpc(struct dhd_bus *bus)
3747 {
3748         uint32 intstatus = 0;
3749         uint32 newstatus = 0;
3750         bool resched = FALSE;     /* Flag indicating resched wanted */
3751
3752         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3753
3754         if (bus->dhd->busstate == DHD_BUS_DOWN) {
3755                 DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
3756                 bus->intstatus = 0;
3757                 return 0;
3758         }
3759
3760         intstatus = bus->intstatus;
3761
3762         if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
3763                 (bus->sih->buscorerev == 2)) {
3764                 newstatus =  dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
3765                 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, newstatus);
3766                 /* Merge new bits with previous */
3767                 intstatus |= newstatus;
3768                 bus->intstatus = 0;
3769                 if (intstatus & I_MB) {
3770                         resched = dhdpcie_bus_process_mailbox_intr(bus, intstatus);
3771                 }
3772         } else {
3773                 /* this is a PCIE core register..not a config register... */
3774                 newstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
3775                 intstatus |= (newstatus & bus->def_intmask);
3776                 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, newstatus, newstatus);
3777                 if (intstatus & bus->def_intmask) {
3778                         resched = dhdpcie_bus_process_mailbox_intr(bus, intstatus);
3779                         intstatus &= ~bus->def_intmask;
3780                 }
3781         }
3782
3783         if (!resched) {
3784                 // terence 20150420: no need to enable interrupt if busstate is down
3785                 if (bus->dhd->busstate) {
3786                         dhdpcie_bus_intr_enable(bus);
3787                 }
3788         }
3789         return resched;
3790
3791 }
3792
3793
3794 static void
3795 dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
3796 {
3797         uint32 cur_h2d_mb_data = 0;
3798
3799         dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, HTOD_MB_DATA, 0);
3800
3801         if (cur_h2d_mb_data != 0) {
3802                 uint32 i = 0;
3803                 DHD_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n", __FUNCTION__, cur_h2d_mb_data));
3804                 while ((i++ < 100) && cur_h2d_mb_data) {
3805                         OSL_DELAY(10);
3806                         dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, HTOD_MB_DATA, 0);
3807                 }
3808                 if (i >= 100)
3809                         DHD_ERROR(("%s: waited 1ms for the dngl to ack the previous mb transaction\n", __FUNCTION__));
3810         }
3811
3812         dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), HTOD_MB_DATA, 0);
3813         dhd_bus_gen_devmb_intr(bus);
3814
3815         if (h2d_mb_data == H2D_HOST_D3_INFORM)
3816                 DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
3817 }
3818
3819 static void
3820 dhdpcie_handle_mb_data(dhd_bus_t *bus)
3821 {
3822         uint32 d2h_mb_data = 0;
3823         uint32 zero = 0;
3824         dhd_bus_cmn_readshared(bus, &d2h_mb_data, DTOH_MB_DATA, 0);
3825         if (!d2h_mb_data)
3826                 return;
3827
3828         dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), DTOH_MB_DATA, 0);
3829
3830         DHD_INFO(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data));
3831         if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
3832                 /* what should we do */
3833                 DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__));
3834                 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
3835                 DHD_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__));
3836         }
3837         if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
3838                 /* what should we do */
3839                 DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__));
3840         }
3841         if (d2h_mb_data & D2H_DEV_D3_ACK)  {
3842                 /* what should we do */
3843                 DHD_INFO_HW4(("%s D2H_MB_DATA: Received D3 ACK\n", __FUNCTION__));
3844                 if (!bus->wait_for_d3_ack) {
3845                         bus->wait_for_d3_ack = 1;
3846                         dhd_os_ioctl_resp_wake(bus->dhd);
3847                 }
3848         }
3849         if (d2h_mb_data & D2H_DEV_FWHALT)  {
3850                 DHD_INFO(("%s: FW trap has happened\n", __FUNCTION__));
3851 #ifdef DHD_DEBUG
3852                 dhdpcie_checkdied(bus, NULL, 0);
3853 #endif
3854                 bus->dhd->busstate = DHD_BUS_DOWN;
3855         }
3856 }
3857
3858 static bool
3859 dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
3860 {
3861         bool resched = FALSE;
3862
3863         if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
3864                 (bus->sih->buscorerev == 4)) {
3865                 /* Msg stream interrupt */
3866                 if (intstatus & I_BIT1) {
3867                         resched = dhdpci_bus_read_frames(bus);
3868                 } else if (intstatus & I_BIT0) {
3869                         /* do nothing for Now */
3870                 }
3871         }
3872         else {
3873                 if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
3874                         dhdpcie_handle_mb_data(bus);
3875
3876                 if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
3877                         goto exit;
3878                 }
3879
3880                 if (intstatus & PCIE_MB_D2H_MB_MASK) {
3881                         resched = dhdpci_bus_read_frames(bus);
3882                 }
3883         }
3884 exit:
3885         return resched;
3886 }
3887
3888 /* Decode dongle to host message stream */
3889 static bool
3890 dhdpci_bus_read_frames(dhd_bus_t *bus)
3891 {
3892         bool more = FALSE;
3893
3894         /* There may be frames in both ctrl buf and data buf; check ctrl buf first */
3895         DHD_PERIM_LOCK(bus->dhd); /* Take the perimeter lock */
3896         dhd_prot_process_ctrlbuf(bus->dhd);
3897         /* Unlock to give chance for resp to be handled */
3898         DHD_PERIM_UNLOCK(bus->dhd); /* Release the perimeter lock */
3899
3900         DHD_PERIM_LOCK(bus->dhd); /* Take the perimeter lock */
3901         /* update the flow ring cpls */
3902         dhd_update_txflowrings(bus->dhd);
3903
3904         /* With heavy TX traffic, we could get a lot of TxStatus
3905          * so add bound
3906          */
3907         more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound);
3908
3909         /* With heavy RX traffic, this routine potentially could spend some time
3910          * processing RX frames without RX bound
3911          */
3912         more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound);
3913         DHD_PERIM_UNLOCK(bus->dhd); /* Release the perimeter lock */
3914
3915         return more;
3916 }
3917
3918 static int
3919 dhdpcie_readshared(dhd_bus_t *bus)
3920 {
3921         uint32 addr = 0;
3922         int rv, w_init, r_init;
3923         uint32 shaddr = 0;
3924         pciedev_shared_t *sh = bus->pcie_sh;
3925         dhd_timeout_t tmo;
3926
3927         shaddr = bus->dongle_ram_base + bus->ramsize - 4;
3928         /* start a timer for 5 seconds */
3929         dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
3930
3931         while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
3932                 /* Read last word in memory to determine address of sdpcm_shared structure */
3933                 addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
3934         }
3935
3936         if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
3937                 (addr > shaddr)) {
3938                 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
3939                         __FUNCTION__, addr));
3940                 DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed));
3941                 return BCME_ERROR;
3942         } else {
3943                 bus->shared_addr = (ulong)addr;
3944                 DHD_ERROR(("%s: PCIe shared addr read took %u usec "
3945                         "before dongle is ready\n", __FUNCTION__, tmo.elapsed));
3946         }
3947
3948         /* Read hndrte_shared structure */
3949         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
3950                 sizeof(pciedev_shared_t))) < 0) {
3951                 DHD_ERROR(("%s: Failed to read PCIe shared struct,"
3952                         "size read %d < %d\n", __FUNCTION__, rv, (int)sizeof(pciedev_shared_t)));
3953                 return rv;
3954         }
3955
3956         /* Endianness */
3957         sh->flags = ltoh32(sh->flags);
3958         sh->trap_addr = ltoh32(sh->trap_addr);
3959         sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
3960         sh->assert_file_addr = ltoh32(sh->assert_file_addr);
3961         sh->assert_line = ltoh32(sh->assert_line);
3962         sh->console_addr = ltoh32(sh->console_addr);
3963         sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
3964         sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
3965         sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
3966         /* load bus console address */
3967
3968 #ifdef DHD_DEBUG
3969         bus->console_addr = sh->console_addr;
3970 #endif
3971
3972         /* Read the dma rx offset */
3973         bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
3974         dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
3975
3976         DHD_ERROR(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset));
3977
3978         if ((sh->flags & PCIE_SHARED_VERSION_MASK) > PCIE_SHARED_VERSION) {
3979                 DHD_ERROR(("%s: pcie_shared version %d in dhd "
3980                            "is older than pciedev_shared version %d in dongle\n",
3981                            __FUNCTION__, PCIE_SHARED_VERSION,
3982                            sh->flags & PCIE_SHARED_VERSION_MASK));
3983                 return BCME_ERROR;
3984         }
3985         if ((sh->flags & PCIE_SHARED_VERSION_MASK) >= 4) {
3986                 if (sh->flags & PCIE_SHARED_TXPUSH_SPRT) {
3987 #ifdef DHDTCPACK_SUPPRESS
3988                         /* Do not use tcpack suppress as packets don't stay in queue */
3989                         dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
3990 #endif
3991                         bus->txmode_push = TRUE;
3992                 } else
3993                         bus->txmode_push = FALSE;
3994         }
3995         DHD_ERROR(("%s: bus->txmode_push is set to %d\n", __FUNCTION__, bus->txmode_push));
3996
3997         /* Does the FW support DMA'ing r/w indices */
3998         if (sh->flags & PCIE_SHARED_DMA_INDEX) {
3999
4000                 DHD_ERROR(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
4001                         __FUNCTION__,
4002                         (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0),
4003                         (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0)));
4004
4005         } else if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ||
4006                    DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) {
4007
4008 #ifdef BCM_INDX_DMA
4009                 DHD_ERROR(("%s: Incompatible FW. FW does not support DMAing indices\n",
4010                         __FUNCTION__));
4011                 return BCME_ERROR;
4012 #endif
4013                 DHD_ERROR(("%s: Host supports DMAing indices but FW does not\n",
4014                         __FUNCTION__));
4015                 bus->dhd->dma_d2h_ring_upd_support = FALSE;
4016                 bus->dhd->dma_h2d_ring_upd_support = FALSE;
4017         }
4018
4019
4020         /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
4021         {
4022                 ring_info_t  ring_info;
4023
4024                 if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
4025                         (uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
4026                         return rv;
4027
4028                 bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
4029                 bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
4030
4031
4032                 bus->max_sub_queues = ltoh16(ring_info.max_sub_queues);
4033
4034                 /* If both FW and Host support DMA'ing indices, allocate memory and notify FW
4035                  * The max_sub_queues is read from FW initialized ring_info
4036                  */
4037                 if (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) {
4038                         w_init = dhd_prot_init_index_dma_block(bus->dhd,
4039                                 HOST_TO_DNGL_DMA_WRITEINDX_BUFFER,
4040                                 bus->max_sub_queues);
4041                         r_init = dhd_prot_init_index_dma_block(bus->dhd,
4042                                 DNGL_TO_HOST_DMA_READINDX_BUFFER,
4043                                 BCMPCIE_D2H_COMMON_MSGRINGS);
4044
4045                         if ((w_init != BCME_OK) || (r_init != BCME_OK)) {
4046                                 DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
4047                                                 "Host will use w/r indices in TCM\n",
4048                                                 __FUNCTION__));
4049                                 bus->dhd->dma_h2d_ring_upd_support = FALSE;
4050                         }
4051                 }
4052
4053                 if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support)) {
4054                         w_init = dhd_prot_init_index_dma_block(bus->dhd,
4055                                 DNGL_TO_HOST_DMA_WRITEINDX_BUFFER,
4056                                 BCMPCIE_D2H_COMMON_MSGRINGS);
4057                         r_init = dhd_prot_init_index_dma_block(bus->dhd,
4058                                 HOST_TO_DNGL_DMA_READINDX_BUFFER,
4059                                 bus->max_sub_queues);
4060
4061                         if ((w_init != BCME_OK) || (r_init != BCME_OK)) {
4062                                 DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
4063                                                 "Host will use w/r indices in TCM\n",
4064                                                 __FUNCTION__));
4065                                 bus->dhd->dma_d2h_ring_upd_support = FALSE;
4066                         }
4067                 }
4068
4069                 /* read ringmem and ringstate ptrs from shared area and store in host variables */
4070                 dhd_fillup_ring_sharedptr_info(bus, &ring_info);
4071
4072                 bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
4073                 DHD_INFO(("%s: ring_info\n", __FUNCTION__));
4074
4075                 DHD_ERROR(("%s: max H2D queues %d\n", __FUNCTION__, ltoh16(ring_info.max_sub_queues)));
4076
4077                 DHD_INFO(("%s: mail box address\n", __FUNCTION__));
4078                 DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n", __FUNCTION__, bus->h2d_mb_data_ptr_addr));
4079                 DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n", __FUNCTION__, bus->d2h_mb_data_ptr_addr));
4080         }
4081
4082         bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
4083         DHD_INFO(("%s: d2h_sync_mode 0x%08x\n", __FUNCTION__, bus->dhd->d2h_sync_mode));
4084
4085         return BCME_OK;
4086 }
4087 /* Read ring mem and ring state ptr info from shared are in TCM */
4088 static void
4089 dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
4090 {
4091         uint16 i = 0;
4092         uint16 j = 0;
4093         uint32 tcm_memloc;
4094         uint32  d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
4095
4096         /* Ring mem ptr info */
4097         /* Alloated in the order
4098                 H2D_MSGRING_CONTROL_SUBMIT              0
4099                 H2D_MSGRING_RXPOST_SUBMIT               1
4100                 D2H_MSGRING_CONTROL_COMPLETE            2
4101                 D2H_MSGRING_TX_COMPLETE                 3
4102                 D2H_MSGRING_RX_COMPLETE                 4
4103                 TX_FLOW_RING                            5
4104         */
4105
4106         {
4107                 /* ringmemptr holds start of the mem block address space */
4108                 tcm_memloc = ltoh32(ring_info->ringmem_ptr);
4109
4110                 /* Find out ringmem ptr for each ring common  ring */
4111                 for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
4112                         bus->ring_sh[i].ring_mem_addr = tcm_memloc;
4113                         /* Update mem block */
4114                         tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
4115                         DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
4116                                 i, bus->ring_sh[i].ring_mem_addr));
4117                 }
4118
4119                 /* Tx flow Ring */
4120                 if (bus->txmode_push) {
4121                         bus->ring_sh[i].ring_mem_addr = tcm_memloc;
4122                         DHD_INFO(("%s: TX ring ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
4123                                 i, bus->ring_sh[i].ring_mem_addr));
4124                 }
4125         }
4126
4127         /* Ring state mem ptr info */
4128         {
4129                 d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
4130                 d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
4131                 h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
4132                 h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
4133                 /* Store h2d common ring write/read pointers */
4134                 for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
4135                         bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
4136                         bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
4137
4138                         /* update mem block */
4139                         h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32);
4140                         h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32);
4141
4142                         DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i,
4143                                 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
4144                 }
4145                 /* Store d2h common ring write/read pointers */
4146                 for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
4147                         bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
4148                         bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
4149
4150                         /* update mem block */
4151                         d2h_w_idx_ptr = d2h_w_idx_ptr + sizeof(uint32);
4152                         d2h_r_idx_ptr = d2h_r_idx_ptr + sizeof(uint32);
4153
4154                         DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i,
4155                                 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
4156                 }
4157
4158                 /* Store txflow ring write/read pointers */
4159                 if (bus->txmode_push) {
4160                         bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
4161                         bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
4162
4163                         DHD_INFO(("%s: txflow : idx %d write %x read %x \n", __FUNCTION__, i,
4164                                 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
4165                 } else {
4166                         for (j = 0; j < (bus->max_sub_queues - BCMPCIE_H2D_COMMON_MSGRINGS);
4167                                 i++, j++)
4168                         {
4169                                 bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
4170                                 bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
4171
4172                                 /* update mem block */
4173                                 h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32);
4174                                 h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32);
4175
4176                                 DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n",
4177                                         __FUNCTION__, i,
4178                                         bus->ring_sh[i].ring_state_w,
4179                                         bus->ring_sh[i].ring_state_r));
4180                         }
4181                 }
4182         }
4183 }
4184
4185 /* Initialize bus module: prepare for communication w/dongle */
4186 int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
4187 {
4188         dhd_bus_t *bus = dhdp->bus;
4189         int  ret = 0;
4190
4191         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4192
4193         ASSERT(bus->dhd);
4194         if (!bus->dhd)
4195                 return 0;
4196
4197         /* Make sure we're talking to the core. */
4198         bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
4199         ASSERT(bus->reg != NULL);
4200
4201         /* before opening up bus for data transfer, check if shared are is intact */
4202         ret = dhdpcie_readshared(bus);
4203         if (ret < 0) {
4204                 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
4205                 return ret;
4206         }
4207
4208
4209         /* Make sure we're talking to the core. */
4210         bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
4211         ASSERT(bus->reg != NULL);
4212
4213         /* Set bus state according to enable result */
4214         dhdp->busstate = DHD_BUS_DATA;
4215
4216         /* Enable the interrupt after device is up */
4217         dhdpcie_bus_intr_enable(bus);
4218
4219         /* bcmsdh_intr_unmask(bus->sdh); */
4220
4221         return ret;
4222
4223 }
4224
4225
4226 static void
4227 dhdpcie_init_shared_addr(dhd_bus_t *bus)
4228 {
4229         uint32 addr = 0;
4230         uint32 val = 0;
4231         addr = bus->dongle_ram_base + bus->ramsize - 4;
4232         dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
4233 }
4234
4235
4236 bool
4237 dhdpcie_chipmatch(uint16 vendor, uint16 device)
4238 {
4239         if (vendor != PCI_VENDOR_ID_BROADCOM) {
4240                 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
4241                         vendor, device));
4242                 return (-ENODEV);
4243         }
4244
4245         if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
4246                 (device == BCM4350_D11AC5G_ID) || BCM4350_CHIP(device))
4247                 return 0;
4248
4249         if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
4250                 (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID))
4251                 return 0;
4252
4253         if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
4254                 (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID))
4255                 return 0;
4256
4257         if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
4258                 (device == BCM4345_D11AC5G_ID) || (device == BCM4345_CHIP_ID))
4259                 return 0;
4260
4261         if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
4262                 (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID))
4263                 return 0;
4264
4265         if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
4266                 (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID))
4267                 return 0;
4268
4269         if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
4270                 (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID))
4271                 return 0;
4272
4273         if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
4274                 (device == BCM4358_D11AC5G_ID) || (device == BCM4358_CHIP_ID))
4275                 return 0;
4276
4277         if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
4278                 (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID))
4279                 return 0;
4280         if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
4281                 (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID))
4282                 return 0;
4283         if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
4284                 (device == BCM4359_D11AC5G_ID) || (device == BCM4359_CHIP_ID))
4285                 return 0;
4286
4287
4288         DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
4289         return (-ENODEV);
4290 }
4291
4292
4293 /*
4294
4295 Name:  dhdpcie_cc_nvmshadow
4296
4297 Description:
4298 A shadow of OTP/SPROM exists in ChipCommon Region
4299 betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
4300 Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
4301 can also be read from ChipCommon Registers.
4302 */
4303
4304 static int
4305 dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
4306 {
4307         uint16 dump_offset = 0;
4308         uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
4309
4310         /* Table for 65nm OTP Size (in bits) */
4311         int  otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
4312
4313         volatile uint16 *nvm_shadow;
4314
4315         uint cur_coreid;
4316         uint chipc_corerev;
4317         chipcregs_t *chipcregs;
4318
4319
4320         /* Save the current core */
4321         cur_coreid = si_coreid(bus->sih);
4322         /* Switch to ChipC */
4323         chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
4324         chipc_corerev = si_corerev(bus->sih);
4325
4326         /* Check ChipcommonCore Rev */
4327         if (chipc_corerev < 44) {
4328                 DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
4329                 return BCME_UNSUPPORTED;
4330         }
4331
4332         /* Check ChipID */
4333         if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) &&
4334                 ((uint16)bus->sih->chip != BCM4345_CHIP_ID)) {
4335                 DHD_ERROR(("%s: cc_nvmdump cmd. supported for 4350/4345 only\n",
4336                         __FUNCTION__));
4337                 return BCME_UNSUPPORTED;
4338         }
4339
4340         /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
4341         if (chipcregs->sromcontrol & SRC_PRESENT) {
4342                 /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
4343                 sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
4344                                         >> SRC_SIZE_SHIFT))) * 1024;
4345                 bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
4346         }
4347
4348         if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
4349                 bcm_bprintf(b, "\nOTP Present");
4350
4351                 if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
4352                         == OTPL_WRAP_TYPE_40NM) {
4353                         /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
4354                         otp_size =  (((chipcregs->capabilities & CC_CAP_OTPSIZE)
4355                                         >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
4356                         bcm_bprintf(b, "(Size %d bits)\n", otp_size);
4357                 } else {
4358                         /* This part is untested since newer chips have 40nm OTP */
4359                         otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
4360                                         >> CC_CAP_OTPSIZE_SHIFT];
4361                         bcm_bprintf(b, "(Size %d bits)\n", otp_size);
4362                         DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
4363                                 __FUNCTION__));
4364                 }
4365         }
4366
4367         if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
4368                 ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
4369                 DHD_ERROR(("%s: SPROM and OTP could not be found \n",
4370                         __FUNCTION__));
4371                 return BCME_NOTFOUND;
4372         }
4373
4374         /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
4375         if ((chipcregs->sromcontrol & SRC_OTPSEL) &&
4376                 (chipcregs->sromcontrol & SRC_OTPPRESENT)) {
4377
4378                 bcm_bprintf(b, "OTP Strap selected.\n"
4379                                "\nOTP Shadow in ChipCommon:\n");
4380
4381                 dump_size = otp_size / 16 ; /* 16bit words */
4382
4383         } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
4384                 (chipcregs->sromcontrol & SRC_PRESENT)) {
4385
4386                 bcm_bprintf(b, "SPROM Strap selected\n"
4387                                 "\nSPROM Shadow in ChipCommon:\n");
4388
4389                 /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
4390                 /* dump_size in 16bit words */
4391                 dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
4392         }
4393         else {
4394                 DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
4395                         __FUNCTION__));
4396                 return BCME_NOTFOUND;
4397         }
4398
4399         if (bus->regs == NULL) {
4400                 DHD_ERROR(("ChipCommon Regs. not initialized\n"));
4401                 return BCME_NOTREADY;
4402         } else {
4403             bcm_bprintf(b, "\n OffSet:");
4404
4405             /* Point to the SPROM/OTP shadow in ChipCommon */
4406             nvm_shadow = chipcregs->sromotp;
4407
4408            /*
4409             * Read 16 bits / iteration.
4410             * dump_size & dump_offset in 16-bit words
4411             */
4412             while (dump_offset < dump_size) {
4413                 if (dump_offset % 2 == 0)
4414                         /* Print the offset in the shadow space in Bytes */
4415                         bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
4416
4417                 bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
4418                 dump_offset += 0x1;
4419             }
4420         }
4421
4422         /* Switch back to the original core */
4423         si_setcore(bus->sih, cur_coreid, 0);
4424
4425         return BCME_OK;
4426 }
4427
4428
4429 uint8 BCMFASTPATH
4430 dhd_bus_is_txmode_push(dhd_bus_t *bus)
4431 {
4432         return bus->txmode_push;
4433 }
4434
4435 void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
4436 {
4437         void *pkt;
4438         flow_queue_t *queue;
4439         flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
4440         unsigned long flags;
4441
4442         queue = &flow_ring_node->queue;
4443
4444 #ifdef DHDTCPACK_SUPPRESS
4445         /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
4446          * when there is a newly coming packet from network stack.
4447          */
4448         dhd_tcpack_info_tbl_clean(bus->dhd);
4449 #endif /* DHDTCPACK_SUPPRESS */
4450
4451         /* clean up BUS level info */
4452         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4453
4454         /* Flush all pending packets in the queue, if any */
4455         while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
4456                 PKTFREE(bus->dhd->osh, pkt, TRUE);
4457         }
4458         ASSERT(flow_queue_empty(queue));
4459
4460         flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
4461         flow_ring_node->active = FALSE;
4462         dll_delete(&flow_ring_node->list);
4463
4464         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4465
4466         /* Call Flow ring clean up */
4467         dhd_prot_clean_flow_ring(bus->dhd, flow_ring_node->prot_info);
4468         dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
4469                         flow_ring_node->flowid);
4470
4471 }
4472
4473 /*
4474  * Allocate a Flow ring buffer,
4475  * Init Ring buffer,
4476  * Send Msg to device about flow ring creation
4477 */
4478 int
4479 dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
4480 {
4481         flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
4482
4483         DHD_INFO(("%s :Flow create\n", __FUNCTION__));
4484
4485         /* Send Msg to device about flow ring creation */
4486         if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
4487                 return BCME_NOMEM;
4488
4489         return BCME_OK;
4490 }
4491
4492 void
4493 dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
4494 {
4495         flow_ring_node_t *flow_ring_node;
4496         unsigned long flags;
4497
4498         DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
4499
4500         flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
4501         ASSERT(flow_ring_node->flowid == flowid);
4502
4503         if (status != BCME_OK) {
4504                 DHD_ERROR(("%s Flow create Response failure error status = %d \n",
4505                      __FUNCTION__, status));
4506                 /* Call Flow clean up */
4507                 dhd_bus_clean_flow_ring(bus, flow_ring_node);
4508                 return;
4509         }
4510
4511         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4512         flow_ring_node->status = FLOW_RING_STATUS_OPEN;
4513         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4514
4515         dhd_bus_schedule_queue(bus, flowid, FALSE);
4516
4517         return;
4518 }
4519
4520 int
4521 dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
4522 {
4523         void * pkt;
4524         flow_queue_t *queue;
4525         flow_ring_node_t *flow_ring_node;
4526         unsigned long flags;
4527
4528         DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
4529
4530         flow_ring_node = (flow_ring_node_t *)arg;
4531
4532         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4533         if (flow_ring_node->status & FLOW_RING_STATUS_DELETE_PENDING) {
4534                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4535                 DHD_ERROR(("%s :Delete Pending\n", __FUNCTION__));
4536                 return BCME_ERROR;
4537         }
4538         flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
4539
4540         queue = &flow_ring_node->queue; /* queue associated with flow ring */
4541
4542 #ifdef DHDTCPACK_SUPPRESS
4543         /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
4544          * when there is a newly coming packet from network stack.
4545          */
4546         dhd_tcpack_info_tbl_clean(bus->dhd);
4547 #endif /* DHDTCPACK_SUPPRESS */
4548         /* Flush all pending packets in the queue, if any */
4549         while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
4550                 PKTFREE(bus->dhd->osh, pkt, TRUE);
4551         }
4552         ASSERT(flow_queue_empty(queue));
4553
4554         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4555
4556         /* Send Msg to device about flow ring deletion */
4557         dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
4558
4559         return BCME_OK;
4560 }
4561
4562 void
4563 dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
4564 {
4565         flow_ring_node_t *flow_ring_node;
4566
4567         DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
4568
4569         flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
4570         ASSERT(flow_ring_node->flowid == flowid);
4571
4572         if (status != BCME_OK) {
4573                 DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
4574                     __FUNCTION__, status));
4575                 return;
4576         }
4577         /* Call Flow clean up */
4578         dhd_bus_clean_flow_ring(bus, flow_ring_node);
4579
4580         return;
4581
4582 }
4583
4584 int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
4585 {
4586         void *pkt;
4587         flow_queue_t *queue;
4588         flow_ring_node_t *flow_ring_node;
4589         unsigned long flags;
4590
4591         DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
4592
4593         flow_ring_node = (flow_ring_node_t *)arg;
4594         queue = &flow_ring_node->queue; /* queue associated with flow ring */
4595
4596         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4597
4598 #ifdef DHDTCPACK_SUPPRESS
4599         /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
4600          * when there is a newly coming packet from network stack.
4601          */
4602         dhd_tcpack_info_tbl_clean(bus->dhd);
4603 #endif /* DHDTCPACK_SUPPRESS */
4604         /* Flush all pending packets in the queue, if any */
4605         while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
4606                 PKTFREE(bus->dhd->osh, pkt, TRUE);
4607         }
4608         ASSERT(flow_queue_empty(queue));
4609
4610         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4611
4612         /* Send Msg to device about flow ring flush */
4613         dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
4614
4615         flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
4616         return BCME_OK;
4617 }
4618
4619 void
4620 dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
4621 {
4622         flow_ring_node_t *flow_ring_node;
4623
4624         if (status != BCME_OK) {
4625                 DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
4626                     __FUNCTION__, status));
4627                 return;
4628         }
4629
4630         flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
4631         ASSERT(flow_ring_node->flowid == flowid);
4632
4633         flow_ring_node->status = FLOW_RING_STATUS_OPEN;
4634         return;
4635 }
4636
4637 uint32
4638 dhd_bus_max_h2d_queues(struct dhd_bus *bus, uint8 *txpush)
4639 {
4640         if (bus->txmode_push)
4641                 *txpush = 1;
4642         else
4643                 *txpush = 0;
4644         return bus->max_sub_queues;
4645 }
4646
4647 int
4648 dhdpcie_bus_clock_start(struct dhd_bus *bus)
4649 {
4650         return dhdpcie_start_host_pcieclock(bus);
4651 }
4652
4653 int
4654 dhdpcie_bus_clock_stop(struct dhd_bus *bus)
4655 {
4656         return dhdpcie_stop_host_pcieclock(bus);
4657 }
4658
4659 int
4660 dhdpcie_bus_disable_device(struct dhd_bus *bus)
4661 {
4662         return dhdpcie_disable_device(bus);
4663 }
4664
4665 int
4666 dhdpcie_bus_enable_device(struct dhd_bus *bus)
4667 {
4668         return dhdpcie_enable_device(bus);
4669 }
4670
4671 int
4672 dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
4673 {
4674         return dhdpcie_alloc_resource(bus);
4675 }
4676
4677 void
4678 dhdpcie_bus_free_resource(struct dhd_bus *bus)
4679 {
4680         dhdpcie_free_resource(bus);
4681 }
4682
4683 int
4684 dhd_bus_request_irq(struct dhd_bus *bus)
4685 {
4686         return dhdpcie_bus_request_irq(bus);
4687 }
4688
4689 bool
4690 dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
4691 {
4692         return dhdpcie_dongle_attach(bus);
4693 }
4694
4695 int
4696 dhd_bus_release_dongle(struct dhd_bus *bus)
4697 {
4698         bool dongle_isolation;
4699         osl_t           *osh;
4700
4701         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4702
4703         if (bus) {
4704                 osh = bus->osh;
4705                 ASSERT(osh);
4706
4707                 if (bus->dhd) {
4708                         dongle_isolation = bus->dhd->dongle_isolation;
4709                         dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
4710                 }
4711         }
4712
4713         return 0;
4714 }
4715
4716 #ifdef BCMPCIE_OOB_HOST_WAKE
4717 int dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
4718 {
4719         return dhdpcie_oob_intr_register(dhdp->bus);
4720 }
4721
4722 void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
4723 {
4724         dhdpcie_oob_intr_unregister(dhdp->bus);
4725 }
4726
4727 void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
4728 {
4729         dhdpcie_oob_intr_set(dhdp->bus, enable);
4730 }
4731 #endif /* BCMPCIE_OOB_HOST_WAKE */