2 * DHD Bus Module for PCIE
4 * $Copyright Open Broadcom Corporation$
6 * $Id: dhd_pcie.c 506043 2014-10-02 12:29:45Z $
18 #if defined(DHD_DEBUG)
19 #include <hnd_armtrap.h>
21 #endif /* defined(DHD_DEBUG) */
22 #include <dngl_stats.h>
23 #include <pcie_core.h>
26 #include <dhd_flowring.h>
27 #include <dhd_proto.h>
31 #include <bcmmsgbuf.h>
35 #include <bcmendian.h>
36 #ifdef DHDTCPACK_SUPPRESS
38 #endif /* DHDTCPACK_SUPPRESS */
39 #include <dhd_config.h>
42 #include BCMEMBEDIMAGE
43 #endif /* BCMEMBEDIMAGE */
45 #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
46 #define MAX_NVRAMBUF_SIZE 6144 /* max nvram buf size */
48 #define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
49 #define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
50 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
52 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
53 extern unsigned int system_rev;
54 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
56 int dhd_dongle_memsize;
57 int dhd_dongle_ramsize;
59 static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
60 static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
62 static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
63 static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
64 const char *name, void *params,
65 int plen, void *arg, int len, int val_size);
66 static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval);
67 static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
68 uint32 len, uint32 srcdelay, uint32 destdelay);
69 static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
70 static int _dhdpcie_download_firmware(struct dhd_bus *bus);
71 static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
72 static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
73 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
74 static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
75 static int dhdpcie_readshared(dhd_bus_t *bus);
76 static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
77 static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
78 static void dhdpcie_bus_intr_enable(dhd_bus_t *bus);
79 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
80 static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
81 bool dongle_isolation, bool reset_flag);
82 static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
83 static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
84 static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
85 static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
86 static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
87 static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
88 static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
89 static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
90 static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
91 static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset);
92 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
93 #ifdef CONFIG_ARCH_MSM8994
94 static void dhdpcie_bus_cfg_set_bar1_win(dhd_bus_t *bus, uint32 data);
95 static ulong dhd_bus_cmn_check_offset(dhd_bus_t *bus, ulong offset);
97 static void dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size);
98 static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
99 static void dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data);
100 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
101 extern void dhd_dpc_kill(dhd_pub_t *dhdp);
104 static int dhdpcie_download_code_array(dhd_bus_t *bus);
105 #endif /* BCMEMBEDIMAGE */
109 #define PCI_VENDOR_ID_BROADCOM 0x14e4
116 IOV_SET_DOWNLOAD_STATE,
134 IOV_LTRSLEEPON_UNLOOAD,
139 IOV_DUMP_RINGUPD_BLOCK,
148 const bcm_iovar_t dhdpcie_iovars[] = {
149 {"intr", IOV_INTR, 0, IOVT_BOOL, 0 },
150 {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) },
151 {"memsize", IOV_MEMSIZE, 0, IOVT_UINT32, 0 },
152 {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, IOVT_BOOL, 0 },
153 {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 },
154 {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 },
155 {"pcie_lpbk", IOV_PCIE_LPBK, 0, IOVT_UINT32, 0 },
156 {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, IOVT_BUFFER, 0 },
157 {"ramsize", IOV_RAMSIZE, 0, IOVT_UINT32, 0 },
158 {"ramstart", IOV_RAMSTART, 0, IOVT_UINT32, 0 },
159 {"pciereg", IOV_PCIEREG, 0, IOVT_BUFFER, 2 * sizeof(int32) },
160 {"pciecfgreg", IOV_PCIECFGREG, 0, IOVT_BUFFER, 2 * sizeof(int32) },
161 {"pciecorereg", IOV_PCIECOREREG, 0, IOVT_BUFFER, 2 * sizeof(int32) },
162 {"pcieserdesreg", IOV_PCIESERDESREG, 0, IOVT_BUFFER, 3 * sizeof(int32) },
163 {"bar0secwinreg", IOV_BAR0_SECWIN_REG, 0, IOVT_BUFFER, 2 * sizeof(int32) },
164 {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
165 {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, IOVT_BUFFER, 3 * sizeof(int32) },
166 {"pcie_suspend", IOV_PCIE_SUSPEND, 0, IOVT_UINT32, 0 },
167 {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, IOVT_BOOL, 0 },
168 {"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 },
169 {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, IOVT_UINT32, 0 },
170 {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, IOVT_BUFFER, 0 },
171 {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, IOVT_UINT32, 0},
172 {"rx_metadata_len", IOV_RX_METADATALEN, 0, IOVT_UINT32, 0 },
173 {"tx_metadata_len", IOV_TX_METADATALEN, 0, IOVT_UINT32, 0 },
174 {"db1_for_mb", IOV_DB1_FOR_MB, 0, IOVT_UINT32, 0 },
175 {"txp_thresh", IOV_TXP_THRESHOLD, 0, IOVT_UINT32, 0 },
176 {"buzzz_dump", IOV_BUZZZ_DUMP, 0, IOVT_UINT32, 0 },
177 {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, IOVT_UINT32, 0 },
178 {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 },
179 {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 },
183 #define MAX_READ_TIMEOUT 5 * 1000 * 1000
186 #define DHD_RXBOUND 64
189 #define DHD_TXBOUND 64
191 uint dhd_rxbound = DHD_RXBOUND;
192 uint dhd_txbound = DHD_TXBOUND;
194 /* Register/Unregister functions are called by the main DHD entry
195 * point (e.g. module insertion) to link with the bus driver, in
196 * order to look for or await the device.
200 dhd_bus_register(void)
202 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
204 return dhdpcie_bus_register();
208 dhd_bus_unregister(void)
210 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
212 dhdpcie_bus_unregister();
217 /** returns a host virtual address */
219 dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
221 return (uint32 *)REG_MAP(addr, size);
225 dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size)
227 REG_UNMAP((void*)(uintptr)addr);
232 * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
233 * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
234 * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
236 * 'tcm' is the *host* virtual address at which tcm is mapped.
238 dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, volatile char* tcm, uint32 tcm_size)
242 DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
245 if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
246 DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
249 bzero(bus, sizeof(dhd_bus_t));
252 bus->tcm_size = tcm_size;
255 dll_init(&bus->const_flowring);
257 /* Attach pcie shared structure */
258 bus->pcie_sh = MALLOC(osh, sizeof(pciedev_shared_t));
260 DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
264 /* dhd_common_init(osh); */
265 if (dhdpcie_dongle_attach(bus)) {
266 DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
270 /* software resources */
271 if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
272 DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
276 bus->dhd->busstate = DHD_BUS_DOWN;
277 bus->db1_for_mb = TRUE;
278 bus->dhd->hang_report = TRUE;
280 DHD_TRACE(("%s: EXIT SUCCESS\n",
286 DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
288 if (bus && bus->pcie_sh)
289 MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
292 MFREE(osh, bus, sizeof(dhd_bus_t));
298 dhd_bus_chip(struct dhd_bus *bus)
300 ASSERT(bus->sih != NULL);
301 return bus->sih->chip;
305 dhd_bus_chiprev(struct dhd_bus *bus)
308 ASSERT(bus->sih != NULL);
309 return bus->sih->chiprev;
313 dhd_bus_pub(struct dhd_bus *bus)
319 dhd_bus_sih(struct dhd_bus *bus)
321 return (void *)bus->sih;
325 dhd_bus_txq(struct dhd_bus *bus)
330 /* Get Chip ID version */
331 uint dhd_bus_chip_id(dhd_pub_t *dhdp)
333 dhd_bus_t *bus = dhdp->bus;
334 return bus->sih->chip;
337 /* Get Chip Rev ID version */
338 uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
340 dhd_bus_t *bus = dhdp->bus;
341 return bus->sih->chiprev;
344 /* Get Chip Pkg ID version */
345 uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
347 dhd_bus_t *bus = dhdp->bus;
348 return bus->sih->chippkg;
354 Name: dhdpcie_bus_isr
358 1: IN int irq -- interrupt vector
359 2: IN void *arg -- handle to private data structure
363 Status (TRUE or FALSE)
366 Interrupt Service routine checks for the status register,
367 disable interrupt and queue DPC if mail box interrupts are raised.
372 dhdpcie_bus_isr(dhd_bus_t *bus)
376 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
377 /* verify argument */
379 DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
383 if (bus->dhd->busstate == DHD_BUS_DOWN) {
384 DHD_TRACE(("%s : bus is down. we have nothing to do\n",
389 /* Overall operation:
390 * - Mask further interrupts
391 * - Read/ack intstatus
392 * - Take action based on bits and state
393 * - Reenable interrupts (as per state)
396 /* Count the interrupt call */
399 /* read interrupt status register!! Status bits will be cleared in DPC !! */
401 dhdpcie_bus_intr_disable(bus); /* Disable interrupt!! */
404 #if defined(PCIE_ISR_THREAD)
406 DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
407 DHD_OS_WAKE_LOCK(bus->dhd);
408 while (dhd_bus_dpc(bus));
409 DHD_OS_WAKE_UNLOCK(bus->dhd);
411 bus->dpc_sched = TRUE;
412 dhd_sched_dpc(bus->dhd); /* queue DPC now!! */
413 #endif /* defined(SDIO_ISR_THREAD) */
415 DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
420 DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__));
425 dhdpcie_dongle_attach(dhd_bus_t *bus)
428 osl_t *osh = bus->osh;
429 void *regsva = (void*)bus->regs;
430 uint16 devid = bus->cl_devid;
432 sbpcieregs_t *sbpcieregs;
434 DHD_TRACE(("%s: ENTER\n",
438 bus->alp_only = TRUE;
441 /* Set bar0 window to si_enum_base */
442 dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE);
444 #ifdef CONFIG_ARCH_MSM8994
445 /* Read bar1 window */
446 bus->bar1_win_base = OSL_PCI_READ_CONFIG(bus->osh, PCI_BAR1_WIN, 4);
447 DHD_ERROR(("%s: PCI_BAR1_WIN = %x\n", __FUNCTION__, bus->bar1_win_base));
450 /* si_attach() will provide an SI handle and scan the backplane */
451 if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
452 &bus->vars, &bus->varsz))) {
453 DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
458 si_setcore(bus->sih, PCIE2_CORE_ID, 0);
459 sbpcieregs = (sbpcieregs_t*)(bus->regs);
461 /* WAR where the BAR1 window may not be sized properly */
462 W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
463 val = R_REG(osh, &sbpcieregs->configdata);
464 #ifdef CONFIG_ARCH_MSM8994
465 bus->bar1_win_mask = 0xffffffff - (bus->tcm_size - 1);
466 DHD_ERROR(("%s: BAR1 window val=%d mask=%x\n", __FUNCTION__, val, bus->bar1_win_mask));
468 W_REG(osh, &sbpcieregs->configdata, val);
470 /* Get info on the ARM and SOCRAM cores... */
471 /* Should really be qualified by device id */
472 if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
473 (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
474 (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
475 bus->armrev = si_corerev(bus->sih);
477 DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
481 if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
482 if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
483 DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
487 /* cr4 has a different way to find the RAM size from TCM's */
488 if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
489 DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
492 /* also populate base address */
493 switch ((uint16)bus->sih->chip) {
494 case BCM4339_CHIP_ID:
495 case BCM4335_CHIP_ID:
496 bus->dongle_ram_base = CR4_4335_RAM_BASE;
498 case BCM4358_CHIP_ID:
499 case BCM4356_CHIP_ID:
500 case BCM4354_CHIP_ID:
501 case BCM43567_CHIP_ID:
502 case BCM43569_CHIP_ID:
503 case BCM4350_CHIP_ID:
504 case BCM43570_CHIP_ID:
505 bus->dongle_ram_base = CR4_4350_RAM_BASE;
507 case BCM4360_CHIP_ID:
508 bus->dongle_ram_base = CR4_4360_RAM_BASE;
510 case BCM4345_CHIP_ID:
511 bus->dongle_ram_base = CR4_4345_RAM_BASE;
513 case BCM43602_CHIP_ID:
514 bus->dongle_ram_base = CR4_43602_RAM_BASE;
516 case BCM4349_CHIP_GRPID:
517 bus->dongle_ram_base = CR4_4349_RAM_BASE;
520 bus->dongle_ram_base = 0;
521 DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
522 __FUNCTION__, bus->dongle_ram_base));
525 bus->ramsize = bus->orig_ramsize;
526 if (dhd_dongle_memsize)
527 dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
529 DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
530 bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
532 bus->srmemsize = si_socram_srmem_size(bus->sih);
535 bus->def_intmask = PCIE_MB_D2H_MB_MASK | PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
537 /* Set the poll and/or interrupt flags */
538 bus->intr = (bool)dhd_intr;
540 bus->wait_for_d3_ack = 1;
541 bus->suspended = FALSE;
542 DHD_TRACE(("%s: EXIT: SUCCESS\n",
547 if (bus->sih != NULL)
549 DHD_TRACE(("%s: EXIT: FAILURE\n",
555 dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
557 dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
561 dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
563 dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
568 dhdpcie_bus_intr_enable(dhd_bus_t *bus)
570 DHD_TRACE(("%s: enable interrupts\n", __FUNCTION__));
572 if (!bus || !bus->sih)
575 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
576 (bus->sih->buscorerev == 4)) {
577 dhpcie_bus_unmask_interrupt(bus);
580 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
581 bus->def_intmask, bus->def_intmask);
586 dhdpcie_bus_intr_disable(dhd_bus_t *bus)
589 DHD_TRACE(("%s Enter\n", __FUNCTION__));
591 if (!bus || !bus->sih)
594 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
595 (bus->sih->buscorerev == 4)) {
596 dhpcie_bus_mask_interrupt(bus);
599 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
600 bus->def_intmask, 0);
603 DHD_TRACE(("%s Exit\n", __FUNCTION__));
607 dhdpcie_bus_remove_prep(dhd_bus_t *bus)
609 DHD_TRACE(("%s Enter\n", __FUNCTION__));
611 dhd_os_sdlock(bus->dhd);
613 bus->dhd->busstate = DHD_BUS_DOWN;
614 dhdpcie_bus_intr_disable(bus);
615 // terence 20150406: fix for null pointer handle
617 pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
619 dhd_os_sdunlock(bus->dhd);
621 DHD_TRACE(("%s Exit\n", __FUNCTION__));
625 /* Detach and free everything */
627 dhdpcie_bus_release(dhd_bus_t *bus)
629 bool dongle_isolation = FALSE;
632 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
640 dongle_isolation = bus->dhd->dongle_isolation;
642 dhdpcie_bus_intr_disable(bus);
643 dhdpcie_free_irq(bus);
645 dhd_detach(bus->dhd);
646 dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
651 /* unmap the regs and tcm here!! */
653 dhdpcie_bus_reg_unmap(osh, (ulong)bus->regs, DONGLE_REG_MAP_SIZE);
657 dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, bus->tcm_size);
661 dhdpcie_bus_release_malloc(bus, osh);
662 /* Detach pcie shared structure */
664 MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
668 if (bus->console.buf != NULL)
669 MFREE(osh, bus->console.buf, bus->console.bufsize);
673 /* Finally free bus info */
674 MFREE(osh, bus, sizeof(dhd_bus_t));
678 DHD_TRACE(("%s: Exit\n", __FUNCTION__));
684 dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
687 DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
688 bus->dhd, bus->dhd->dongle_reset));
690 if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
691 DHD_TRACE(("%s Exit\n", __FUNCTION__));
697 if (!dongle_isolation)
698 pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
700 if (bus->ltrsleep_on_unload) {
701 si_corereg(bus->sih, bus->sih->buscoreidx,
702 OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
705 // terence 20150420: fix for sih incorrectly handled in other function
707 if (bus->vars && bus->varsz)
708 MFREE(osh, bus->vars, bus->varsz);
712 DHD_TRACE(("%s Exit\n", __FUNCTION__));
716 dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
718 uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
722 /* 32 bit config write */
724 dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
726 OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
730 dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
732 OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
735 #ifdef CONFIG_ARCH_MSM8994
737 dhdpcie_bus_cfg_set_bar1_win(dhd_bus_t *bus, uint32 data)
739 OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, 4, data);
744 dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
746 int32 min_size = DONGLE_MIN_MEMSIZE;
747 /* Restrict the memsize to user specified limit */
748 DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
749 dhd_dongle_memsize, min_size));
750 if ((dhd_dongle_memsize > min_size) &&
751 (dhd_dongle_memsize < (int32)bus->orig_ramsize))
752 bus->ramsize = dhd_dongle_memsize;
756 dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
758 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
760 if (bus->dhd && bus->dhd->dongle_reset)
763 if (bus->vars && bus->varsz) {
764 MFREE(osh, bus->vars, bus->varsz);
768 DHD_TRACE(("%s: Exit\n", __FUNCTION__));
773 /* Stop bus module: clear pending frames, disable data flow */
774 void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
778 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
783 if (bus->dhd->busstate == DHD_BUS_DOWN) {
784 DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
788 bus->dhd->busstate = DHD_BUS_DOWN;
789 dhdpcie_bus_intr_disable(bus);
790 status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
791 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
792 if (!dhd_download_fw_on_driverload)
793 dhd_dpc_kill(bus->dhd);
795 /* Clear rx control and wake any waiters */
797 dhd_os_ioctl_resp_wake(bus->dhd);
803 /* Watchdog timer function */
804 bool dhd_bus_watchdog(dhd_pub_t *dhd)
812 /* Poll for console output periodically */
813 if (dhd->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
814 bus->console.count += dhd_watchdog_ms;
815 if (bus->console.count >= dhd_console_ms) {
816 bus->console.count -= dhd_console_ms;
817 /* Make sure backplane clock is on */
818 if (dhdpcie_bus_readconsole(bus) < 0)
819 dhd_console_ms = 0; /* On error, stop trying */
822 #endif /* DHD_DEBUG */
829 /* Download firmware image and nvram image */
831 dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
832 char *pfw_path, char *pnv_path, char *pconf_path)
836 bus->fw_path = pfw_path;
837 bus->nv_path = pnv_path;
838 bus->dhd->conf_path = pconf_path;
840 ret = dhdpcie_download_firmware(bus, osh);
846 dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
850 DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n",
851 __FUNCTION__, bus->fw_path, bus->nv_path));
853 DHD_OS_WAKE_LOCK(bus->dhd);
855 /* External conf takes precedence if specified */
856 dhd_conf_preinit(bus->dhd);
857 dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
858 dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
860 printk("Final fw_path=%s\n", bus->fw_path);
861 printk("Final nv_path=%s\n", bus->nv_path);
862 printk("Final conf_path=%s\n", bus->dhd->conf_path);
864 ret = _dhdpcie_download_firmware(bus);
866 DHD_OS_WAKE_UNLOCK(bus->dhd);
871 dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
877 uint8 *memblock = NULL, *memptr;
879 DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
881 /* Should succeed in opening image if it is actually given through registry
882 * entry or in module param.
884 image = dhd_os_open_image(pfw_path);
886 printk("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
890 memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
891 if (memblock == NULL) {
892 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
895 if ((uint32)(uintptr)memblock % DHD_SDALIGN)
896 memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
899 while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
901 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
902 bcmerror = BCME_ERROR;
906 if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
907 /* if address is 0, store the reset instruction to be written in 0 */
910 bus->resetinstr = *(((uint32*)memptr));
911 /* Add start of RAM address to the address given by user */
912 offset += bus->dongle_ram_base;
916 bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, memptr, len);
918 DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
919 __FUNCTION__, bcmerror, MEMBLOCK, offset));
928 MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
931 dhd_os_close_image(image);
938 dhdpcie_download_nvram(struct dhd_bus *bus)
943 char * memblock = NULL;
946 bool nvram_file_exists;
948 pnv_path = bus->nv_path;
950 nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
951 if (!nvram_file_exists && (bus->nvram_params == NULL))
954 if (nvram_file_exists) {
955 image = dhd_os_open_image(pnv_path);
957 printk("%s: Open nvram file failed %s\n", __FUNCTION__, pnv_path);
962 memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
963 if (memblock == NULL) {
964 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
965 __FUNCTION__, MAX_NVRAMBUF_SIZE));
969 /* Download variables */
970 if (nvram_file_exists) {
971 len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
975 /* nvram is string with null terminated. cannot use strlen */
976 len = bus->nvram_params_len;
977 ASSERT(len <= MAX_NVRAMBUF_SIZE);
978 memcpy(memblock, bus->nvram_params, len);
980 if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
981 bufp = (char *)memblock;
984 if (nvram_file_exists)
985 len = process_nvram_vars(bufp, len);
988 len += 4 - (len % 4);
993 bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
995 DHD_ERROR(("%s: error downloading vars: %d\n",
996 __FUNCTION__, bcmerror));
1000 DHD_ERROR(("%s: error reading nvram file: %d\n",
1001 __FUNCTION__, len));
1002 bcmerror = BCME_ERROR;
1007 MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
1010 dhd_os_close_image(image);
1016 #ifdef BCMEMBEDIMAGE
1018 dhdpcie_download_code_array(struct dhd_bus *bus)
1022 unsigned char *p_dlarray = NULL;
1023 unsigned int dlarray_size = 0;
1024 unsigned int downloded_len, remaining_len, len;
1025 char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
1026 uint8 *memblock = NULL, *memptr;
1032 p_dlarray = dlarray;
1033 dlarray_size = sizeof(dlarray);
1034 p_dlimagename = dlimagename;
1035 p_dlimagever = dlimagever;
1036 p_dlimagedate = dlimagedate;
1038 if ((p_dlarray == 0) || (dlarray_size == 0) ||(dlarray_size > bus->ramsize) ||
1039 (p_dlimagename == 0) || (p_dlimagever == 0) || (p_dlimagedate == 0))
1042 memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
1043 if (memblock == NULL) {
1044 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
1047 if ((uint32)(uintptr)memblock % DHD_SDALIGN)
1048 memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
1050 while (downloded_len < dlarray_size) {
1051 remaining_len = dlarray_size - downloded_len;
1052 if (remaining_len >= MEMBLOCK)
1055 len = remaining_len;
1057 memcpy(memptr, (p_dlarray + downloded_len), len);
1059 if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
1060 /* if address is 0, store the reset instruction to be written in 0 */
1062 bus->resetinstr = *(((uint32*)memptr));
1063 /* Add start of RAM address to the address given by user */
1064 offset += bus->dongle_ram_base;
1067 bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
1068 downloded_len += len;
1070 DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
1071 __FUNCTION__, bcmerror, MEMBLOCK, offset));
1078 /* Upload and compare the downloaded code */
1080 unsigned char *ularray = NULL;
1081 unsigned int uploded_len;
1084 ularray = MALLOC(bus->dhd->osh, dlarray_size);
1085 if (ularray == NULL)
1087 /* Upload image to verify downloaded contents. */
1088 offset = bus->dongle_ram_base;
1089 memset(ularray, 0xaa, dlarray_size);
1090 while (uploded_len < dlarray_size) {
1091 remaining_len = dlarray_size - uploded_len;
1092 if (remaining_len >= MEMBLOCK)
1095 len = remaining_len;
1096 bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
1097 (uint8 *)(ularray + uploded_len), len);
1099 DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
1100 __FUNCTION__, bcmerror, MEMBLOCK, offset));
1108 if (memcmp(p_dlarray, ularray, dlarray_size)) {
1109 DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
1110 __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
1114 DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
1115 __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
1118 MFREE(bus->dhd->osh, ularray, dlarray_size);
1120 #endif /* DHD_DEBUG */
1124 MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
1128 #endif /* BCMEMBEDIMAGE */
1132 _dhdpcie_download_firmware(struct dhd_bus *bus)
1136 bool embed = FALSE; /* download embedded firmware */
1137 bool dlok = FALSE; /* download firmware succeeded */
1139 /* Out immediately if no image to download */
1140 if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
1141 #ifdef BCMEMBEDIMAGE
1144 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
1149 /* Keep arm in reset */
1150 if (dhdpcie_bus_download_state(bus, TRUE)) {
1151 DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
1155 /* External image takes precedence if specified */
1156 if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
1157 if (dhdpcie_download_code_file(bus, bus->fw_path)) {
1158 DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
1159 #ifdef BCMEMBEDIMAGE
1171 #ifdef BCMEMBEDIMAGE
1173 if (dhdpcie_download_code_array(bus)) {
1174 DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
1182 BCM_REFERENCE(embed);
1185 DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
1189 /* EXAMPLE: nvram_array */
1190 /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
1191 /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
1194 /* External nvram takes precedence if specified */
1195 if (dhdpcie_download_nvram(bus)) {
1196 DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
1200 /* Take arm out of reset */
1201 if (dhdpcie_bus_download_state(bus, FALSE)) {
1202 DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
1212 int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
1218 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1220 if (bus->dhd->dongle_reset)
1223 /* Wait until control frame is available */
1224 timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
1226 bcopy(&bus->ioct_resp, msg, MIN(rxlen, sizeof(ioctl_comp_resp_msg_t)));
1230 DHD_CTL(("%s: resumed on rxctl frame, got %d\n", __FUNCTION__, rxlen));
1231 } else if (timeleft == 0) {
1232 DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
1233 bus->ioct_resp.cmn_hdr.request_id = 0;
1234 bus->ioct_resp.compl_hdr.status = 0xffff;
1235 bus->dhd->rxcnt_timeout++;
1236 DHD_ERROR(("%s: rxcnt_timeout=%d\n", __FUNCTION__, bus->dhd->rxcnt_timeout));
1237 } else if (pending == TRUE) {
1238 DHD_CTL(("%s: canceled\n", __FUNCTION__));
1239 return -ERESTARTSYS;
1241 DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
1245 bus->dhd->rxcnt_timeout = 0;
1248 bus->dhd->rx_ctlpkts++;
1250 bus->dhd->rx_ctlerrs++;
1252 if (bus->dhd->rxcnt_timeout >= MAX_CNTL_TX_TIMEOUT)
1255 if (bus->dhd->dongle_trap_occured)
1258 return rxlen ? (int)rxlen : -EIO;
1262 #define CONSOLE_LINE_MAX 192
1266 dhdpcie_bus_readconsole(dhd_bus_t *bus)
1268 dhd_console_t *c = &bus->console;
1269 uint8 line[CONSOLE_LINE_MAX], ch;
1270 uint32 n, idx, addr;
1273 /* Don't do anything until FWREADY updates console address */
1274 if (bus->console_addr == 0)
1277 /* Read console log struct */
1278 addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
1280 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
1283 /* Allocate console buffer (one time only) */
1284 if (c->buf == NULL) {
1285 c->bufsize = ltoh32(c->log.buf_size);
1286 if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
1289 idx = ltoh32(c->log.idx);
1291 /* Protect against corrupt value */
1292 if (idx > c->bufsize)
1295 /* Skip reading the console buffer if the index pointer has not moved */
1299 /* Read the console buffer */
1300 addr = ltoh32(c->log.buf);
1301 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
1304 while (c->last != idx) {
1305 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
1306 if (c->last == idx) {
1307 /* This would output a partial line. Instead, back up
1308 * the buffer pointer and output this line next time around.
1313 c->last = c->bufsize - n;
1316 ch = c->buf[c->last];
1317 c->last = (c->last + 1) % c->bufsize;
1324 if (line[n - 1] == '\r')
1327 printf("CONSOLE: %s\n", line);
1336 dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
1340 char *mbuffer = NULL;
1341 char *console_buffer = NULL;
1342 uint maxstrlen = 256;
1345 pciedev_shared_t *pciedev_shared = bus->pcie_sh;
1346 struct bcmstrbuf strbuf;
1347 uint32 console_ptr, console_size, console_index;
1348 uint8 line[CONSOLE_LINE_MAX], ch;
1352 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1354 if (DHD_NOCHECKDIED_ON())
1359 * Called after a rx ctrl timeout. "data" is NULL.
1360 * allocate memory to trace the trap or assert.
1363 mbuffer = data = MALLOC(bus->dhd->osh, msize);
1365 if (mbuffer == NULL) {
1366 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
1367 bcmerror = BCME_NOMEM;
1372 if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
1373 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
1374 bcmerror = BCME_NOMEM;
1378 if ((bcmerror = dhdpcie_readshared(bus)) < 0)
1381 bcm_binit(&strbuf, data, size);
1383 bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
1384 pciedev_shared->msgtrace_addr, pciedev_shared->console_addr);
1386 if ((pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
1387 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
1388 * (Avoids conflict with real asserts for programmatic parsing of output.)
1390 bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
1393 if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
1394 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
1395 * (Avoids conflict with real asserts for programmatic parsing of output.)
1397 bcm_bprintf(&strbuf, "No trap%s in dongle",
1398 (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
1401 if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
1402 /* Download assert */
1403 bcm_bprintf(&strbuf, "Dongle assert");
1404 if (bus->pcie_sh->assert_exp_addr != 0) {
1406 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
1407 bus->pcie_sh->assert_exp_addr,
1408 (uint8 *)str, maxstrlen)) < 0)
1411 str[maxstrlen - 1] = '\0';
1412 bcm_bprintf(&strbuf, " expr \"%s\"", str);
1415 if (bus->pcie_sh->assert_file_addr != 0) {
1417 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
1418 bus->pcie_sh->assert_file_addr,
1419 (uint8 *)str, maxstrlen)) < 0)
1422 str[maxstrlen - 1] = '\0';
1423 bcm_bprintf(&strbuf, " file \"%s\"", str);
1426 bcm_bprintf(&strbuf, " line %d ", bus->pcie_sh->assert_line);
1429 if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
1430 bus->dhd->dongle_trap_occured = TRUE;
1431 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
1432 bus->pcie_sh->trap_addr,
1433 (uint8*)&tr, sizeof(trap_t))) < 0)
1436 bcm_bprintf(&strbuf,
1437 "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
1438 "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
1439 "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
1440 "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
1441 ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
1442 ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
1443 ltoh32(bus->pcie_sh->trap_addr),
1444 ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
1445 ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
1447 addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
1448 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
1449 (uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
1452 addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
1453 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
1454 (uint8 *)&console_size, sizeof(console_size))) < 0)
1457 addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
1458 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
1459 (uint8 *)&console_index, sizeof(console_index))) < 0)
1462 console_ptr = ltoh32(console_ptr);
1463 console_size = ltoh32(console_size);
1464 console_index = ltoh32(console_index);
1466 if (console_size > CONSOLE_BUFFER_MAX ||
1467 !(console_buffer = MALLOC(bus->dhd->osh, console_size)))
1470 if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
1471 (uint8 *)console_buffer, console_size)) < 0)
1474 for (i = 0, n = 0; i < console_size; i += n + 1) {
1475 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
1476 ch = console_buffer[(console_index + i + n) % console_size];
1484 if (line[n - 1] == '\r')
1487 /* Don't use DHD_ERROR macro since we print
1488 * a lot of information quickly. The macro
1489 * will truncate a lot of the printfs
1492 if (dhd_msg_level & DHD_ERROR_VAL)
1493 printf("CONSOLE: %s\n", line);
1500 if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
1501 DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
1506 MFREE(bus->dhd->osh, mbuffer, msize);
1508 MFREE(bus->dhd->osh, str, maxstrlen);
1511 MFREE(bus->dhd->osh, console_buffer, console_size);
1515 #endif /* DHD_DEBUG */
1519 * Transfers bytes from host to dongle using pio mode.
1520 * Parameter 'address' is a backplane address.
1523 dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
1527 int detect_endian_flag = 0x01;
1529 #ifdef CONFIG_ARCH_MSM8994
1530 bool is_64bit_unaligned;
1533 /* Detect endianness. */
1534 little_endian = *(char *)&detect_endian_flag;
1536 #ifdef CONFIG_ARCH_MSM8994
1537 /* Check 64bit aligned or not. */
1538 is_64bit_unaligned = (address & 0x7);
1540 /* In remap mode, adjust address beyond socram and redirect
1541 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
1542 * is not backplane accessible
1545 /* Determine initial transfer parameters */
1546 dsize = sizeof(uint64);
1548 /* Do the transfer(s) */
1551 if (size >= sizeof(uint64) && little_endian) {
1552 #ifdef CONFIG_ARCH_MSM8994
1553 if (is_64bit_unaligned) {
1554 DHD_INFO(("%s: write unaligned %lx\n",
1555 __FUNCTION__, address));
1556 dhdpcie_bus_wtcm32(bus, address, *((uint32 *)data));
1560 is_64bit_unaligned = (address & 0x7);
1565 dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
1567 dsize = sizeof(uint8);
1568 dhdpcie_bus_wtcm8(bus, address, *data);
1571 /* Adjust for next transfer (if any) */
1572 if ((size -= dsize)) {
1579 if (size >= sizeof(uint64) && little_endian) {
1580 #ifdef CONFIG_ARCH_MSM8994
1581 if (is_64bit_unaligned) {
1582 DHD_INFO(("%s: read unaligned %lx\n",
1583 __FUNCTION__, address));
1584 *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
1588 is_64bit_unaligned = (address & 0x7);
1593 *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
1595 dsize = sizeof(uint8);
1596 *data = dhdpcie_bus_rtcm8(bus, address);
1599 /* Adjust for next transfer (if any) */
1600 if ((size -= dsize) > 0) {
1610 dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs)
1612 flow_ring_node_t *flow_ring_node;
1615 DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
1616 /* ASSERT on flow_id */
1617 if (flow_id >= bus->max_sub_queues) {
1618 DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
1619 flow_id, bus->max_sub_queues));
1623 flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
1626 unsigned long flags;
1628 flow_queue_t *queue;
1630 queue = &flow_ring_node->queue; /* queue associated with flow ring */
1632 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1634 if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
1635 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1636 return BCME_NOTREADY;
1639 while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
1642 #ifdef DHDTCPACK_SUPPRESS
1643 if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
1644 dhd_tcpack_check_xmit(bus->dhd, txp);
1646 #endif /* DHDTCPACK_SUPPRESS */
1647 /* Attempt to transfer packet over flow ring */
1649 ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
1650 if (ret != BCME_OK) { /* may not have resources in flow ring */
1651 DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
1652 dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
1653 /* reinsert at head */
1654 dhd_flow_queue_reinsert(bus->dhd, queue, txp);
1655 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1657 /* If we are able to requeue back, return success */
1662 dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
1664 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1670 #ifndef PCIE_TX_DEFERRAL
1671 /* Send a data frame to the dongle. Callee disposes of txp. */
1673 dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
1675 unsigned long flags;
1677 void *txp_pend = NULL;
1678 if (!bus->txmode_push) {
1680 flow_queue_t *queue;
1681 flow_ring_node_t *flow_ring_node;
1682 if (!bus->dhd->flowid_allocator) {
1683 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
1687 flowid = DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(txp));
1689 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
1691 DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
1692 __FUNCTION__, flowid, flow_ring_node->status,
1693 flow_ring_node->active));
1695 if ((flowid >= bus->dhd->num_flow_rings) ||
1696 (!flow_ring_node->active) ||
1697 (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING)) {
1698 DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
1699 __FUNCTION__, flowid, flow_ring_node->status,
1700 flow_ring_node->active));
1705 queue = &flow_ring_node->queue; /* queue associated with flow ring */
1707 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1709 if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
1712 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1714 if (flow_ring_node->status) {
1715 DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
1716 __FUNCTION__, flowid, flow_ring_node->status,
1717 flow_ring_node->active));
1724 ret = dhd_bus_schedule_queue(bus, flowid, FALSE);
1726 /* If we have anything pending, try to push into q */
1728 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1730 if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
1731 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1736 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1741 } else { /* bus->txmode_push */
1742 return dhd_prot_txdata(bus->dhd, txp, ifidx);
1746 DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
1747 PKTCFREE(bus->dhd->osh, txp, TRUE);
1750 #else /* PCIE_TX_DEFERRAL */
1752 dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
1754 unsigned long flags;
1757 flow_queue_t *queue;
1758 flow_ring_node_t *flow_ring_node;
1759 uint8 *pktdata = (uint8 *)PKTDATA(bus->dhd->osh, txp);
1760 struct ether_header *eh = (struct ether_header *)pktdata;
1762 if (!bus->dhd->flowid_allocator) {
1763 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
1767 flowid = dhd_flowid_find(bus->dhd, ifidx,
1768 bus->dhd->flow_prio_map[(PKTPRIO(txp))],
1769 eh->ether_shost, eh->ether_dhost);
1770 if (flowid == FLOWID_INVALID) {
1771 DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(txp), ifidx);
1772 skb_queue_tail(&bus->orphan_list, txp);
1773 queue_work(bus->tx_wq, &bus->create_flow_work);
1777 DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(txp), flowid);
1778 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
1779 queue = &flow_ring_node->queue; /* queue associated with flow ring */
1781 DHD_DATA(("%s: pkt flowid %d, status %d active %d\n",
1782 __FUNCTION__, flowid, flow_ring_node->status,
1783 flow_ring_node->active));
1785 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1786 if ((flowid >= bus->dhd->num_flow_rings) ||
1787 (!flow_ring_node->active) ||
1788 (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING)) {
1789 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1790 DHD_DATA(("%s: Dropping pkt flowid %d, status %d active %d\n",
1791 __FUNCTION__, flowid, flow_ring_node->status,
1792 flow_ring_node->active));
1797 if (flow_ring_node->status == FLOW_RING_STATUS_PENDING) {
1798 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1799 DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(txp), ifidx);
1800 skb_queue_tail(&bus->orphan_list, txp);
1801 queue_work(bus->tx_wq, &bus->create_flow_work);
1805 if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) {
1806 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1810 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1812 ret = dhd_bus_schedule_queue(bus, flowid, FALSE);
1817 DHD_DATA(("%s: Toss %d\n", __FUNCTION__, ret));
1818 PKTCFREE(bus->dhd->osh, txp, TRUE);
1821 #endif /* !PCIE_TX_DEFERRAL */
1825 dhd_bus_stop_queue(struct dhd_bus *bus)
1827 dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
1828 bus->bus_flowctrl = TRUE;
1832 dhd_bus_start_queue(struct dhd_bus *bus)
1834 dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
1835 bus->bus_flowctrl = TRUE;
1839 dhd_bus_update_retlen(dhd_bus_t *bus, uint32 retlen, uint32 pkt_id, uint16 status,
1842 bus->rxlen = retlen;
1843 bus->ioct_resp.cmn_hdr.request_id = pkt_id;
1844 bus->ioct_resp.compl_hdr.status = status;
1845 bus->ioct_resp.resp_len = (uint16)resp_len;
1848 #if defined(DHD_DEBUG)
1849 /* Device console input function */
1850 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
1852 dhd_bus_t *bus = dhd->bus;
1855 /* Address could be zero if CONSOLE := 0 in dongle Makefile */
1856 if (bus->console_addr == 0)
1857 return BCME_UNSUPPORTED;
1859 /* Don't allow input if dongle is in reset */
1860 if (bus->dhd->dongle_reset) {
1861 dhd_os_sdunlock(bus->dhd);
1862 return BCME_NOTREADY;
1865 /* Zero cbuf_index */
1866 addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
1868 if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
1871 /* Write message into cbuf */
1872 addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
1873 if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
1876 /* Write length into vcons_in */
1877 addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
1878 val = htol32(msglen);
1879 if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
1882 /* generate an interurpt to dongle to indicate that it needs to process cons command */
1883 dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
1887 #endif /* defined(DHD_DEBUG) */
1889 /* Process rx frame , Send up the layer to netif */
1891 dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
1893 dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
1896 #ifdef CONFIG_ARCH_MSM8994
1897 static ulong dhd_bus_cmn_check_offset(dhd_bus_t *bus, ulong offset)
1899 uint new_bar1_wbase = 0;
1902 new_bar1_wbase = (uint)offset & bus->bar1_win_mask;
1903 if (bus->bar1_win_base != new_bar1_wbase) {
1904 bus->bar1_win_base = new_bar1_wbase;
1905 dhdpcie_bus_cfg_set_bar1_win(bus, bus->bar1_win_base);
1906 DHD_ERROR(("%s: offset=%lx, switch bar1_win_base to %x\n",
1907 __FUNCTION__, offset, bus->bar1_win_base));
1910 address = offset - bus->bar1_win_base;
1915 #define dhd_bus_cmn_check_offset(x, y) y
1916 #endif /* CONFIG_ARCH_MSM8994 */
1918 /** 'offset' is a backplane address */
1920 dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
1922 *(volatile uint8 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint8)data;
1926 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
1928 volatile uint8 data;
1929 #ifdef BCM47XX_ACP_WAR
1930 data = R_REG(bus->dhd->osh,
1931 (volatile uint8 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
1933 data = *(volatile uint8 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
1939 dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
1941 *(volatile uint32 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint32)data;
1944 dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
1946 *(volatile uint16 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint16)data;
1949 dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
1951 *(volatile uint64 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint64)data;
1955 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
1957 volatile uint16 data;
1958 #ifdef BCM47XX_ACP_WAR
1959 data = R_REG(bus->dhd->osh,
1960 (volatile uint16 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
1962 data = *(volatile uint16 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
1968 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
1970 volatile uint32 data;
1971 #ifdef BCM47XX_ACP_WAR
1972 data = R_REG(bus->dhd->osh,
1973 (volatile uint32 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
1975 data = *(volatile uint32 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
1981 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
1983 volatile uint64 data;
1984 #ifdef BCM47XX_ACP_WAR
1985 data = R_REG(bus->dhd->osh,
1986 (volatile uint64 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
1988 data = *(volatile uint64 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
1994 dhd_bus_cmn_writeshared(dhd_bus_t *bus, void * data, uint32 len, uint8 type, uint16 ringid)
1998 pciedev_shared_t *sh;
1999 pciedev_shared_t *shmem = NULL;
2001 sh = (pciedev_shared_t*)bus->shared_addr;
2003 DHD_INFO(("%s: writing to msgbuf type %d, len %d\n", __FUNCTION__, type, len));
2006 case DNGL_TO_HOST_DMA_SCRATCH_BUFFER:
2007 long_data = HTOL64(*(uint64 *)data);
2008 tcm_offset = (ulong)&(sh->host_dma_scratch_buffer);
2009 dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2010 prhex(__FUNCTION__, data, len);
2013 case DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN :
2014 tcm_offset = (ulong)&(sh->host_dma_scratch_buffer_len);
2015 dhdpcie_bus_wtcm32(bus, tcm_offset, (uint32) HTOL32(*(uint32 *)data));
2016 prhex(__FUNCTION__, data, len);
2019 case HOST_TO_DNGL_DMA_WRITEINDX_BUFFER:
2020 /* ring_info_ptr stored in pcie_sh */
2021 shmem = (pciedev_shared_t *)bus->pcie_sh;
2023 long_data = HTOL64(*(uint64 *)data);
2024 tcm_offset = (ulong)shmem->rings_info_ptr;
2025 tcm_offset += OFFSETOF(ring_info_t, h2d_w_idx_hostaddr);
2026 dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2027 prhex(__FUNCTION__, data, len);
2030 case HOST_TO_DNGL_DMA_READINDX_BUFFER:
2031 /* ring_info_ptr stored in pcie_sh */
2032 shmem = (pciedev_shared_t *)bus->pcie_sh;
2034 long_data = HTOL64(*(uint64 *)data);
2035 tcm_offset = (ulong)shmem->rings_info_ptr;
2036 tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr);
2037 dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2038 prhex(__FUNCTION__, data, len);
2041 case DNGL_TO_HOST_DMA_WRITEINDX_BUFFER:
2042 /* ring_info_ptr stored in pcie_sh */
2043 shmem = (pciedev_shared_t *)bus->pcie_sh;
2045 long_data = HTOL64(*(uint64 *)data);
2046 tcm_offset = (ulong)shmem->rings_info_ptr;
2047 tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr);
2048 dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2049 prhex(__FUNCTION__, data, len);
2052 case DNGL_TO_HOST_DMA_READINDX_BUFFER:
2053 /* ring_info_ptr stored in pcie_sh */
2054 shmem = (pciedev_shared_t *)bus->pcie_sh;
2056 long_data = HTOL64(*(uint64 *)data);
2057 tcm_offset = (ulong)shmem->rings_info_ptr;
2058 tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr);
2059 dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2060 prhex(__FUNCTION__, data, len);
2063 case RING_LEN_ITEMS :
2064 tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
2065 tcm_offset += OFFSETOF(ring_mem_t, len_items);
2066 dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
2069 case RING_MAX_ITEM :
2070 tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
2071 tcm_offset += OFFSETOF(ring_mem_t, max_item);
2072 dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
2075 case RING_BUF_ADDR :
2076 long_data = HTOL64(*(uint64 *)data);
2077 tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
2078 tcm_offset += OFFSETOF(ring_mem_t, base_addr);
2079 dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8 *) &long_data, len);
2080 prhex(__FUNCTION__, data, len);
2083 case RING_WRITE_PTR :
2084 tcm_offset = bus->ring_sh[ringid].ring_state_w;
2085 dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
2087 case RING_READ_PTR :
2088 tcm_offset = bus->ring_sh[ringid].ring_state_r;
2089 dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
2093 dhdpcie_bus_wtcm32(bus, bus->d2h_mb_data_ptr_addr,
2094 (uint32) HTOL32(*(uint32 *)data));
2098 dhdpcie_bus_wtcm32(bus, bus->h2d_mb_data_ptr_addr,
2099 (uint32) HTOL32(*(uint32 *)data));
2108 dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
2110 pciedev_shared_t *sh;
2113 sh = (pciedev_shared_t*)bus->shared_addr;
2116 case RING_WRITE_PTR :
2117 tcm_offset = bus->ring_sh[ringid].ring_state_w;
2118 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
2120 case RING_READ_PTR :
2121 tcm_offset = bus->ring_sh[ringid].ring_state_r;
2122 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
2124 case TOTAL_LFRAG_PACKET_CNT :
2125 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
2126 (ulong) &sh->total_lfrag_pkt_cnt));
2129 *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->h2d_mb_data_ptr_addr));
2132 *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr));
2134 case MAX_HOST_RXBUFS :
2135 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
2136 (ulong) &sh->max_host_rxbufs));
2143 uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
2145 return ((pciedev_shared_t*)bus->pcie_sh)->flags;
2149 dhd_bus_clearcounts(dhd_pub_t *dhdp)
2154 dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
2155 void *params, int plen, void *arg, int len, bool set)
2157 dhd_bus_t *bus = dhdp->bus;
2158 const bcm_iovar_t *vi = NULL;
2163 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2168 /* Get MUST have return space */
2169 ASSERT(set || (arg && len));
2171 /* Set does NOT take qualifiers */
2172 ASSERT(!set || (!params && !plen));
2174 DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
2175 name, (set ? "set" : "get"), len, plen));
2177 /* Look up var locally; if not found pass to host driver */
2178 if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
2183 /* set up 'params' pointer in case this is a set command so that
2184 * the convenience int and bool code can be common to set and get
2186 if (params == NULL) {
2191 if (vi->type == IOVT_VOID)
2193 else if (vi->type == IOVT_BUFFER)
2196 /* all other types are integer sized */
2197 val_size = sizeof(int);
2199 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
2200 bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
2207 #include <bcm_buzzz.h>
2209 int dhd_buzzz_dump_cntrs3(char *p, uint32 *core, uint32 * ovhd, uint32 *log)
2212 uint32 ctr, curr[3], prev[3], delta[3];
2214 /* Compute elapsed counter values per counter event type */
2215 for (ctr = 0U; ctr < 3; ctr++) {
2216 prev[ctr] = core[ctr];
2218 core[ctr] = curr[ctr]; /* saved for next log */
2220 if (curr[ctr] < prev[ctr])
2221 delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
2223 delta[ctr] = (curr[ctr] - prev[ctr]);
2225 /* Adjust for instrumentation overhead */
2226 if (delta[ctr] >= ovhd[ctr])
2227 delta[ctr] -= ovhd[ctr];
2231 bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
2237 typedef union cm3_cnts { /* export this in bcm_buzzz.h */
2248 int dhd_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 * ovhd, uint32 *log)
2252 uint32 cyccnt, instrcnt;
2253 cm3_cnts_t cm3_cnts;
2256 { /* 32bit cyccnt */
2257 uint32 curr, prev, delta;
2258 prev = core[0]; curr = *log++; core[0] = curr;
2260 delta = curr + (~0U - prev);
2262 delta = (curr - prev);
2263 if (delta >= ovhd[0])
2268 bytes += sprintf(p + bytes, "%12u ", delta);
2272 { /* Extract the 4 cnts: cpi, exc, sleep and lsu */
2275 cm3_cnts_t curr, prev, delta;
2276 prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
2277 for (i = 0; i < 4; i++) {
2278 if (curr.u8[i] < prev.u8[i])
2279 delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
2281 delta.u8[i] = (curr.u8[i] - prev.u8[i]);
2282 if (delta.u8[i] >= ovhd[i + 1])
2283 delta.u8[i] -= ovhd[i + 1];
2286 bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
2288 cm3_cnts.u32 = delta.u32;
2291 { /* Extract the foldcnt from arg0 */
2292 uint8 curr, prev, delta, max8 = ~0;
2293 buzzz_arg0_t arg0; arg0.u32 = *log;
2294 prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
2296 delta = curr + (max8 - prev);
2298 delta = (curr - prev);
2299 if (delta >= ovhd[5])
2303 bytes += sprintf(p + bytes, "%4u ", delta);
2307 instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
2308 + cm3_cnts.u8[3]) + foldcnt;
2309 if (instrcnt > 0xFFFFFF00)
2310 bytes += sprintf(p + bytes, "[%10s] ", "~");
2312 bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
2316 int dhd_buzzz_dump_log(char * p, uint32 * core, uint32 * log, buzzz_t * buzzz)
2320 static uint8 * fmt[] = BUZZZ_FMT_STRINGS;
2322 if (buzzz->counters == 6) {
2323 bytes += dhd_buzzz_dump_cntrs6(p, core, buzzz->ovhd, log);
2324 log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
2326 bytes += dhd_buzzz_dump_cntrs3(p, core, buzzz->ovhd, log);
2327 log += 3; /* (3 x 32bit) CR4 */
2330 /* Dump the logged arguments using the registered formats */
2333 switch (arg0.klog.args) {
2335 bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
2339 uint32 arg1 = *log++;
2340 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
2344 printf("%s: Maximum one argument supported\n", __FUNCTION__);
2347 bytes += sprintf(p + bytes, "\n");
2352 void dhd_buzzz_dump(buzzz_t * buzzz_p, void * buffer_p, char * p)
2355 uint32 total, part1, part2, log_sz, core[BUZZZ_COUNTERS_MAX];
2358 for (i = 0; i < BUZZZ_COUNTERS_MAX; i++)
2361 log_sz = buzzz_p->log_sz;
2363 part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
2365 if (buzzz_p->wrap == TRUE) {
2366 part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
2367 total = (buzzz_p->buffer_sz - BUZZZ_LOGENTRY_MAXSZ) / log_sz;
2370 total = buzzz_p->count;
2374 printf("%s: buzzz_dump total<%u> done\n", __FUNCTION__, total);
2377 printf("%s: buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__,
2378 total, part2, part1);
2381 if (part2) { /* with wrap */
2382 log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
2383 while (part2--) { /* from cur to end : part2 */
2385 dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
2387 log = (void*)((size_t)log + buzzz_p->log_sz);
2391 log = (void*)buffer_p;
2394 dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
2396 log = (void*)((size_t)log + buzzz_p->log_sz);
2399 printf("%s: buzzz_dump done.\n", __FUNCTION__);
2402 int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
2404 buzzz_t * buzzz_p = NULL;
2405 void * buffer_p = NULL;
2406 char * page_p = NULL;
2407 pciedev_shared_t *sh;
2410 if (bus->dhd->busstate != DHD_BUS_DATA) {
2411 return BCME_UNSUPPORTED;
2413 if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
2414 printf("%s: Page memory allocation failure\n", __FUNCTION__);
2417 if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(buzzz_t))) == NULL) {
2418 printf("%s: Buzzz memory allocation failure\n", __FUNCTION__);
2422 ret = dhdpcie_readshared(bus);
2424 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
2430 DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzzz));
2432 if (sh->buzzz != 0U) { /* Fetch and display dongle BUZZZ Trace */
2433 dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzzz,
2434 (uint8 *)buzzz_p, sizeof(buzzz_t));
2435 if (buzzz_p->count == 0) {
2436 printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__);
2439 if (buzzz_p->counters != 3) { /* 3 counters for CR4 */
2440 printf("%s: Counters<%u> mismatch\n", __FUNCTION__, buzzz_p->counters);
2443 /* Allocate memory for trace buffer and format strings */
2444 buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
2445 if (buffer_p == NULL) {
2446 printf("%s: Buffer memory allocation failure\n", __FUNCTION__);
2449 /* Fetch the trace and format strings */
2450 dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */
2451 (uint8 *)buffer_p, buzzz_p->buffer_sz);
2452 /* Process and display the trace using formatted output */
2453 printf("%s: <#cycle> <#instruction> <#ctr3> <event information>\n", __FUNCTION__);
2454 dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
2455 printf("%s: ----- End of dongle BUZZZ Trace -----\n\n", __FUNCTION__);
2456 MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
2461 if (page_p) MFREE(bus->dhd->osh, page_p, 4096);
2462 if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(buzzz_t));
2463 if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
2467 #endif /* BCM_BUZZZ */
2469 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
2470 ((sih)->buscoretype == PCIE2_CORE_ID))
2473 pcie2_mdiosetblock(dhd_bus_t *bus, uint blk)
2475 uint mdiodata, mdioctrl, i = 0;
2476 uint pcie_serdes_spinwait = 200;
2478 mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF);
2479 mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE;
2481 si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdioctrl);
2482 si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, mdiodata);
2485 /* retry till the transaction is complete */
2486 while (i < pcie_serdes_spinwait) {
2487 uint mdioctrl_read = si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA,
2489 if (!(mdioctrl_read & MDIODATA2_DONE)) {
2496 if (i >= pcie_serdes_spinwait) {
2497 DHD_ERROR(("%s: pcie_mdiosetblock: timed out\n", __FUNCTION__));
2506 pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val,
2509 uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl;
2512 pcie2_mdiosetblock(bus, physmedia);
2514 /* enable mdio access to SERDES */
2515 mdio_ctrl = MDIOCTL2_DIVISOR_VAL;
2516 mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF);
2519 mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS;
2522 mdio_ctrl |= MDIOCTL2_READ;
2524 si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl);
2527 reg32 = PCIE2_MDIO_WR_DATA;
2528 si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0,
2529 *val | MDIODATA2_DONE);
2532 reg32 = PCIE2_MDIO_RD_DATA;
2534 /* retry till the transaction is complete */
2535 while (i < pcie_serdes_spinwait) {
2536 uint done_val = si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0);
2537 if (!(done_val & MDIODATA2_DONE)) {
2539 *val = si_corereg(bus->sih, bus->sih->buscoreidx,
2540 PCIE2_MDIO_RD_DATA, 0, 0);
2541 *val = *val & MDIODATA2_MASK;
2552 dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
2554 dhd_bus_t *bus = dhdp->bus;
2556 #ifdef CONFIG_ARCH_MSM
2557 int retry = POWERUP_MAX_RETRY;
2558 #endif /* CONFIG_ARCH_MSM */
2560 if (dhd_download_fw_on_driverload) {
2561 bcmerror = dhd_bus_start(dhdp);
2563 if (flag == TRUE) { /* Turn off WLAN */
2564 /* Removing Power */
2565 DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
2566 bus->dhd->up = FALSE;
2567 if (bus->dhd->busstate != DHD_BUS_DOWN) {
2569 dhdpcie_bus_intr_disable(bus);
2570 dhdpcie_free_irq(bus);
2572 #ifdef BCMPCIE_OOB_HOST_WAKE
2573 /* Clean up any pending host wake IRQ */
2574 dhd_bus_oob_intr_set(bus->dhd, FALSE);
2575 dhd_bus_oob_intr_unregister(bus->dhd);
2576 #endif /* BCMPCIE_OOB_HOST_WAKE */
2577 dhd_os_wd_timer(dhdp, 0);
2578 dhd_bus_stop(bus, TRUE);
2579 dhd_prot_clear(dhdp);
2581 dhd_bus_release_dongle(bus);
2582 dhdpcie_bus_free_resource(bus);
2583 bcmerror = dhdpcie_bus_disable_device(bus);
2585 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
2586 __FUNCTION__, bcmerror));
2589 #ifdef CONFIG_ARCH_MSM
2590 bcmerror = dhdpcie_bus_clock_stop(bus);
2592 DHD_ERROR(("%s: host clock stop failed: %d\n",
2593 __FUNCTION__, bcmerror));
2596 #endif /* CONFIG_ARCH_MSM */
2597 bus->dhd->busstate = DHD_BUS_DOWN;
2600 dhdpcie_bus_intr_disable(bus);
2601 dhdpcie_free_irq(bus);
2603 #ifdef BCMPCIE_OOB_HOST_WAKE
2604 /* Clean up any pending host wake IRQ */
2605 dhd_bus_oob_intr_set(bus->dhd, FALSE);
2606 dhd_bus_oob_intr_unregister(bus->dhd);
2607 #endif /* BCMPCIE_OOB_HOST_WAKE */
2608 dhd_prot_clear(dhdp);
2610 dhd_bus_release_dongle(bus);
2611 dhdpcie_bus_free_resource(bus);
2612 bcmerror = dhdpcie_bus_disable_device(bus);
2614 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
2615 __FUNCTION__, bcmerror));
2619 #ifdef CONFIG_ARCH_MSM
2620 bcmerror = dhdpcie_bus_clock_stop(bus);
2622 DHD_ERROR(("%s: host clock stop failed: %d\n",
2623 __FUNCTION__, bcmerror));
2626 #endif /* CONFIG_ARCH_MSM */
2629 bus->dhd->dongle_reset = TRUE;
2630 DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__));
2632 } else { /* Turn on WLAN */
2633 if (bus->dhd->busstate == DHD_BUS_DOWN) {
2635 DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
2636 #ifdef CONFIG_ARCH_MSM
2638 bcmerror = dhdpcie_bus_clock_start(bus);
2640 DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
2648 if (bcmerror && !retry) {
2649 DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
2650 __FUNCTION__, bcmerror));
2653 #endif /* CONFIG_ARCH_MSM */
2654 bcmerror = dhdpcie_bus_enable_device(bus);
2656 DHD_ERROR(("%s: host configuration restore failed: %d\n",
2657 __FUNCTION__, bcmerror));
2661 bcmerror = dhdpcie_bus_alloc_resource(bus);
2663 DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
2664 __FUNCTION__, bcmerror));
2668 bcmerror = dhdpcie_bus_dongle_attach(bus);
2670 DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
2671 __FUNCTION__, bcmerror));
2675 bcmerror = dhd_bus_request_irq(bus);
2677 DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
2678 __FUNCTION__, bcmerror));
2682 bus->dhd->dongle_reset = FALSE;
2684 bcmerror = dhd_bus_start(dhdp);
2686 DHD_ERROR(("%s: dhd_bus_start: %d\n",
2687 __FUNCTION__, bcmerror));
2691 bus->dhd->up = TRUE;
2692 DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
2694 DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
2701 bus->dhd->busstate = DHD_BUS_DOWN;
2707 dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
2708 void *params, int plen, void *arg, int len, int val_size)
2716 DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
2717 __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
2719 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
2722 if (plen >= (int)sizeof(int_val))
2723 bcopy(params, &int_val, sizeof(int_val));
2725 if (plen >= (int)sizeof(int_val) * 2)
2726 bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
2728 if (plen >= (int)sizeof(int_val) * 3)
2729 bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
2731 bool_val = (int_val != 0) ? TRUE : FALSE;
2733 /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
2734 if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
2735 actionid == IOV_GVAL(IOV_DEVRESET))) {
2736 bcmerror = BCME_NOTREADY;
2743 case IOV_SVAL(IOV_VARS):
2744 bcmerror = dhdpcie_downloadvars(bus, arg, len);
2747 case IOV_SVAL(IOV_PCIEREG):
2748 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
2750 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
2754 case IOV_GVAL(IOV_PCIEREG):
2755 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
2757 int_val = si_corereg(bus->sih, bus->sih->buscoreidx,
2758 OFFSETOF(sbpcieregs_t, configdata), 0, 0);
2759 bcopy(&int_val, arg, val_size);
2762 case IOV_GVAL(IOV_BAR0_SECWIN_REG):
2764 uint32 cur_base, base;
2766 volatile uint32 *offset;
2767 /* set the bar0 secondary window to this */
2768 /* write the register value */
2769 cur_base = dhdpcie_bus_cfg_read_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint));
2770 base = int_val & 0xFFFFF000;
2771 dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), base);
2772 bar0 = (uchar *)bus->regs;
2773 offset = (uint32 *)(bar0 + 0x4000 + (int_val & 0xFFF));
2775 bcopy(&int_val, arg, val_size);
2776 dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), cur_base);
2779 case IOV_SVAL(IOV_BAR0_SECWIN_REG):
2781 uint32 cur_base, base;
2783 volatile uint32 *offset;
2784 /* set the bar0 secondary window to this */
2785 /* write the register value */
2786 cur_base = dhdpcie_bus_cfg_read_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint));
2787 base = int_val & 0xFFFFF000;
2788 dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), base);
2789 bar0 = (uchar *)bus->regs;
2790 offset = (uint32 *)(bar0 + 0x4000 + (int_val & 0xFFF));
2792 bcopy(&int_val2, arg, val_size);
2793 dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), cur_base);
2797 case IOV_SVAL(IOV_PCIECOREREG):
2798 si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2);
2800 case IOV_GVAL(IOV_SBREG):
2803 uint32 addr, coreidx;
2805 bcopy(params, &sdreg, sizeof(sdreg));
2807 addr = sdreg.offset;
2808 coreidx = (addr & 0xF000) >> 12;
2810 int_val = si_corereg(bus->sih, coreidx, (addr & 0xFFF), 0, 0);
2811 bcopy(&int_val, arg, sizeof(int32));
2815 case IOV_SVAL(IOV_SBREG):
2818 uint32 addr, coreidx;
2820 bcopy(params, &sdreg, sizeof(sdreg));
2822 addr = sdreg.offset;
2823 coreidx = (addr & 0xF000) >> 12;
2825 si_corereg(bus->sih, coreidx, (addr & 0xFFF), ~0, sdreg.value);
2830 case IOV_GVAL(IOV_PCIESERDESREG):
2833 if (!PCIE_GEN2(bus->sih)) {
2834 DHD_ERROR(("%s: supported only in pcie gen2\n", __FUNCTION__));
2835 bcmerror = BCME_ERROR;
2838 if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) {
2839 bcopy(&val, arg, sizeof(int32));
2842 DHD_ERROR(("%s: pcie2_mdioop failed.\n", __FUNCTION__));
2843 bcmerror = BCME_ERROR;
2847 case IOV_SVAL(IOV_PCIESERDESREG):
2848 if (!PCIE_GEN2(bus->sih)) {
2849 DHD_ERROR(("%s: supported only in pcie gen2\n", __FUNCTION__));
2850 bcmerror = BCME_ERROR;
2853 if (pcie2_mdioop(bus, int_val, int_val2, TRUE, &int_val3, FALSE)) {
2854 DHD_ERROR(("%s: pcie2_mdioop failed.\n", __FUNCTION__));
2855 bcmerror = BCME_ERROR;
2858 case IOV_GVAL(IOV_PCIECOREREG):
2859 int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0);
2860 bcopy(&int_val, arg, val_size);
2863 case IOV_SVAL(IOV_PCIECFGREG):
2864 OSL_PCI_WRITE_CONFIG(bus->osh, int_val, 4, int_val2);
2867 case IOV_GVAL(IOV_PCIECFGREG):
2868 int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4);
2869 bcopy(&int_val, arg, val_size);
2872 case IOV_SVAL(IOV_PCIE_LPBK):
2873 bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
2876 case IOV_SVAL(IOV_PCIE_DMAXFER):
2877 bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3);
2880 case IOV_GVAL(IOV_PCIE_SUSPEND):
2881 int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
2882 bcopy(&int_val, arg, val_size);
2885 case IOV_SVAL(IOV_PCIE_SUSPEND):
2886 dhdpcie_bus_suspend(bus, bool_val);
2889 case IOV_GVAL(IOV_MEMSIZE):
2890 int_val = (int32)bus->ramsize;
2891 bcopy(&int_val, arg, val_size);
2893 case IOV_SVAL(IOV_MEMBYTES):
2894 case IOV_GVAL(IOV_MEMBYTES):
2896 uint32 address; /* absolute backplane address */
2900 bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
2902 ASSERT(plen >= 2*sizeof(int));
2904 address = (uint32)int_val;
2905 bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
2906 size = (uint)int_val;
2908 /* Do some validation */
2909 dsize = set ? plen - (2 * sizeof(int)) : len;
2911 DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
2912 __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
2913 bcmerror = BCME_BADARG;
2917 DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n dsize %d ", __FUNCTION__,
2918 (set ? "write" : "read"), size, address, dsize));
2921 if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
2922 /* if address is 0, store the reset instruction to be written in 0 */
2923 if (set && address == bus->dongle_ram_base) {
2924 bus->resetinstr = *(((uint32*)params) + 2);
2927 /* If we know about SOCRAM, check for a fit */
2928 if ((bus->orig_ramsize) &&
2929 ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
2931 uint8 enable, protect, remap;
2932 si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
2933 if (!enable || protect) {
2934 DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
2935 __FUNCTION__, bus->orig_ramsize, size, address));
2936 DHD_ERROR(("%s: socram enable %d, protect %d\n",
2937 __FUNCTION__, enable, protect));
2938 bcmerror = BCME_BADARG;
2942 if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) {
2943 uint32 devramsize = si_socdevram_size(bus->sih);
2944 if ((address < SOCDEVRAM_ARM_ADDR) ||
2945 (address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) {
2946 DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
2947 __FUNCTION__, address, size));
2948 DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
2949 __FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize));
2950 bcmerror = BCME_BADARG;
2953 /* move it such that address is real now */
2954 address -= SOCDEVRAM_ARM_ADDR;
2955 address += SOCDEVRAM_BP_ADDR;
2956 DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
2957 __FUNCTION__, (set ? "write" : "read"), size, address));
2958 } else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) {
2959 /* Can not access remap region while devram remap bit is set
2960 * ROM content would be returned in this case
2962 DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n",
2963 __FUNCTION__, address));
2964 bcmerror = BCME_ERROR;
2970 /* Generate the actual data pointer */
2971 data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
2973 /* Call to do the transfer */
2974 bcmerror = dhdpcie_bus_membytes(bus, set, address, data, size);
2980 case IOV_GVAL(IOV_BUZZZ_DUMP):
2981 bcmerror = dhd_buzzz_dump_dngl(bus);
2983 #endif /* BCM_BUZZZ */
2985 case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
2986 bcmerror = dhdpcie_bus_download_state(bus, bool_val);
2989 case IOV_GVAL(IOV_RAMSIZE):
2990 int_val = (int32)bus->ramsize;
2991 bcopy(&int_val, arg, val_size);
2994 case IOV_GVAL(IOV_RAMSTART):
2995 int_val = (int32)bus->dongle_ram_base;
2996 bcopy(&int_val, arg, val_size);
2999 case IOV_GVAL(IOV_CC_NVMSHADOW):
3001 struct bcmstrbuf dump_b;
3003 bcm_binit(&dump_b, arg, len);
3004 bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
3008 case IOV_GVAL(IOV_SLEEP_ALLOWED):
3009 bool_val = bus->sleep_allowed;
3010 bcopy(&bool_val, arg, val_size);
3013 case IOV_SVAL(IOV_SLEEP_ALLOWED):
3014 bus->sleep_allowed = bool_val;
3017 case IOV_GVAL(IOV_DONGLEISOLATION):
3018 int_val = bus->dhd->dongle_isolation;
3019 bcopy(&int_val, arg, val_size);
3022 case IOV_SVAL(IOV_DONGLEISOLATION):
3023 bus->dhd->dongle_isolation = bool_val;
3026 case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
3027 int_val = bus->ltrsleep_on_unload;
3028 bcopy(&int_val, arg, val_size);
3031 case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
3032 bus->ltrsleep_on_unload = bool_val;
3035 case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
3037 struct bcmstrbuf dump_b;
3038 bcm_binit(&dump_b, arg, len);
3039 bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
3042 case IOV_GVAL(IOV_DMA_RINGINDICES):
3043 { int h2d_support, d2h_support;
3045 d2h_support = DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0;
3046 h2d_support = DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0;
3047 int_val = d2h_support | (h2d_support << 1);
3048 bcopy(&int_val, arg, val_size);
3051 case IOV_SVAL(IOV_DMA_RINGINDICES):
3052 /* Can change it only during initialization/FW download */
3053 if (bus->dhd->busstate == DHD_BUS_DOWN) {
3054 if ((int_val > 3) || (int_val < 0)) {
3055 DHD_ERROR(("%s: Bad argument. Possible values: 0, 1, 2 & 3\n", __FUNCTION__));
3056 bcmerror = BCME_BADARG;
3058 bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
3059 bus->dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
3062 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
3064 bcmerror = BCME_NOTDOWN;
3068 case IOV_GVAL(IOV_RX_METADATALEN):
3069 int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
3070 bcopy(&int_val, arg, val_size);
3073 case IOV_SVAL(IOV_RX_METADATALEN):
3075 bcmerror = BCME_BUFTOOLONG;
3078 dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
3081 case IOV_SVAL(IOV_TXP_THRESHOLD):
3082 dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
3085 case IOV_GVAL(IOV_TXP_THRESHOLD):
3086 int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
3087 bcopy(&int_val, arg, val_size);
3090 case IOV_SVAL(IOV_DB1_FOR_MB):
3092 bus->db1_for_mb = TRUE;
3094 bus->db1_for_mb = FALSE;
3097 case IOV_GVAL(IOV_DB1_FOR_MB):
3098 if (bus->db1_for_mb)
3102 bcopy(&int_val, arg, val_size);
3105 case IOV_GVAL(IOV_TX_METADATALEN):
3106 int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
3107 bcopy(&int_val, arg, val_size);
3110 case IOV_SVAL(IOV_TX_METADATALEN):
3112 bcmerror = BCME_BUFTOOLONG;
3115 dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
3118 case IOV_GVAL(IOV_FLOW_PRIO_MAP):
3119 int_val = bus->dhd->flow_prio_map_type;
3120 bcopy(&int_val, arg, val_size);
3123 case IOV_SVAL(IOV_FLOW_PRIO_MAP):
3124 int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
3125 bcopy(&int_val, arg, val_size);
3128 case IOV_GVAL(IOV_TXBOUND):
3129 int_val = (int32)dhd_txbound;
3130 bcopy(&int_val, arg, val_size);
3133 case IOV_SVAL(IOV_TXBOUND):
3134 dhd_txbound = (uint)int_val;
3137 case IOV_GVAL(IOV_RXBOUND):
3138 int_val = (int32)dhd_rxbound;
3139 bcopy(&int_val, arg, val_size);
3142 case IOV_SVAL(IOV_RXBOUND):
3143 dhd_rxbound = (uint)int_val;
3147 bcmerror = BCME_UNSUPPORTED;
3155 /* Transfers bytes from host to dongle using pio mode */
3157 dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len)
3159 if (bus->dhd == NULL) {
3160 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
3163 if (bus->dhd->prot == NULL) {
3164 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
3167 if (bus->dhd->busstate != DHD_BUS_DATA) {
3168 DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__));
3171 dhdmsgbuf_lpbk_req(bus->dhd, len);
3176 dhd_bus_set_suspend_resume(dhd_pub_t *dhdp, bool state)
3178 struct dhd_bus *bus = dhdp->bus;
3180 dhdpcie_bus_suspend(bus, state);
3185 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
3192 if (bus->dhd == NULL) {
3193 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
3196 if (bus->dhd->prot == NULL) {
3197 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
3200 if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) {
3201 DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__));
3204 if (bus->dhd->dongle_reset)
3207 if (bus->suspended == state) /* Set to same state */
3211 bus->wait_for_d3_ack = 0;
3212 bus->suspended = TRUE;
3213 bus->dhd->busstate = DHD_BUS_SUSPEND;
3214 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
3215 dhd_os_set_ioctl_resp_timeout(DEFAULT_IOCTL_RESP_TIMEOUT);
3216 dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
3217 timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->wait_for_d3_ack, &pending);
3218 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
3219 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
3220 if (bus->wait_for_d3_ack) {
3221 /* Got D3 Ack. Suspend the bus */
3222 if (dhd_os_check_wakelock_all(bus->dhd)) {
3223 DHD_ERROR(("%s: Suspend failed because of wakelock\n", __FUNCTION__));
3224 bus->dev->current_state = PCI_D3hot;
3225 pci_set_master(bus->dev);
3226 rc = pci_set_power_state(bus->dev, PCI_D0);
3228 DHD_ERROR(("%s: pci_set_power_state failed:"
3229 " current_state[%d], ret[%d]\n",
3230 __FUNCTION__, bus->dev->current_state, rc));
3232 bus->suspended = FALSE;
3233 bus->dhd->busstate = DHD_BUS_DATA;
3236 dhdpcie_bus_intr_disable(bus);
3237 rc = dhdpcie_pci_suspend_resume(bus, state);
3239 } else if (timeleft == 0) {
3240 DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
3241 bus->dev->current_state = PCI_D3hot;
3242 pci_set_master(bus->dev);
3243 rc = pci_set_power_state(bus->dev, PCI_D0);
3245 DHD_ERROR(("%s: pci_set_power_state failed:"
3246 " current_state[%d], ret[%d]\n",
3247 __FUNCTION__, bus->dev->current_state, rc));
3249 bus->suspended = FALSE;
3250 bus->dhd->busstate = DHD_BUS_DATA;
3253 bus->wait_for_d3_ack = 1;
3256 #ifdef BCMPCIE_OOB_HOST_WAKE
3257 DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
3258 #endif /* BCMPCIE_OOB_HOST_WAKE */
3259 rc = dhdpcie_pci_suspend_resume(bus, state);
3260 bus->suspended = FALSE;
3261 bus->dhd->busstate = DHD_BUS_DATA;
3262 dhdpcie_bus_intr_enable(bus);
3267 /* Transfers bytes from host to dongle and to host again using DMA */
3269 dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay)
3271 if (bus->dhd == NULL) {
3272 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
3275 if (bus->dhd->prot == NULL) {
3276 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
3279 if (bus->dhd->busstate != DHD_BUS_DATA) {
3280 DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__));
3284 if (len < 5 || len > 4194296) {
3285 DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__));
3288 return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay);
3294 dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
3301 /* To enter download state, disable ARM and reset SOCRAM.
3302 * To exit download state, simply reset ARM (default is RAM boot).
3305 bus->alp_only = TRUE;
3307 /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
3308 cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
3310 if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
3311 !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
3312 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
3313 bcmerror = BCME_ERROR;
3317 if (cr4_regs == NULL) { /* no CR4 present on chip */
3318 si_core_disable(bus->sih, 0);
3320 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
3321 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
3322 bcmerror = BCME_ERROR;
3326 si_core_reset(bus->sih, 0, 0);
3329 /* Clear the top bit of memory */
3332 if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
3333 (uint8*)&zeros, 4) < 0) {
3334 bcmerror = BCME_ERROR;
3342 * Read RAM base address [0x18_0000]
3343 * [next] Download firmware
3344 * [done at else] Populate the reset vector
3345 * [done at else] Remove ARM halt
3347 /* Halt ARM & remove reset */
3348 si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
3349 if (bus->sih->chip == BCM43602_CHIP_ID) {
3350 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
3351 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
3352 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
3353 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
3355 /* reset last 4 bytes of RAM address. to be used for shared area */
3356 dhdpcie_init_shared_addr(bus);
3359 if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
3360 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
3361 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
3362 bcmerror = BCME_ERROR;
3366 if (!si_iscoreup(bus->sih)) {
3367 DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
3368 bcmerror = BCME_ERROR;
3373 /* Enable remap before ARM reset but after vars.
3374 * No backplane access in remap mode
3377 if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
3378 !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
3379 DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
3380 bcmerror = BCME_ERROR;
3385 if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
3386 !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
3387 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
3388 bcmerror = BCME_ERROR;
3392 if (bus->sih->chip == BCM43602_CHIP_ID) {
3393 /* Firmware crashes on SOCSRAM access when core is in reset */
3394 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
3395 DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
3397 bcmerror = BCME_ERROR;
3400 si_core_reset(bus->sih, 0, 0);
3401 si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
3405 if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
3406 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
3411 /* switch back to arm core again */
3412 if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
3413 DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
3414 bcmerror = BCME_ERROR;
3418 /* write address 0 with reset instruction */
3419 bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
3420 (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
3422 /* now remove reset and halt and continue to run CR4 */
3425 si_core_reset(bus->sih, 0, 0);
3427 /* Allow HT Clock now that the ARM is running. */
3428 bus->alp_only = FALSE;
3430 bus->dhd->busstate = DHD_BUS_LOAD;
3434 /* Always return to PCIE core */
3435 si_setcore(bus->sih, PCIE2_CORE_ID, 0);
3441 dhdpcie_bus_write_vars(dhd_bus_t *bus)
3444 uint32 varsize, phys_size;
3449 uint8 *nvram_ularray;
3450 #endif /* DHD_DEBUG */
3452 /* Even if there are no vars are to be written, we still need to set the ramsize. */
3453 varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
3454 varaddr = (bus->ramsize - 4) - varsize;
3456 varaddr += bus->dongle_ram_base;
3460 vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
3464 bzero(vbuffer, varsize);
3465 bcopy(bus->vars, vbuffer, bus->varsz);
3466 /* Write the vars list */
3467 bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
3469 /* Implement read back and verify later */
3471 /* Verify NVRAM bytes */
3472 DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize));
3473 nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
3477 /* Upload image to verify downloaded contents. */
3478 memset(nvram_ularray, 0xaa, varsize);
3480 /* Read the vars list to temp buffer for comparison */
3481 bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
3483 DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
3484 __FUNCTION__, bcmerror, varsize, varaddr));
3487 /* Compare the org NVRAM with the one read from RAM */
3488 if (memcmp(vbuffer, nvram_ularray, varsize)) {
3489 DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
3491 DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
3494 MFREE(bus->dhd->osh, nvram_ularray, varsize);
3495 #endif /* DHD_DEBUG */
3497 MFREE(bus->dhd->osh, vbuffer, varsize);
3500 phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
3502 phys_size += bus->dongle_ram_base;
3504 /* adjust to the user specified RAM */
3505 DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__,
3506 phys_size, bus->ramsize));
3507 DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__,
3509 varsize = ((phys_size - 4) - varaddr);
3512 * Determine the length token:
3513 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
3517 bus->nvram_csm = varsizew;
3519 varsizew = varsize / 4;
3520 varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
3521 bus->nvram_csm = varsizew;
3522 varsizew = htol32(varsizew);
3525 DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew));
3527 /* Write the length token to the last word */
3528 bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
3529 (uint8*)&varsizew, 4);
3535 dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
3537 int bcmerror = BCME_OK;
3539 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3541 /* Basic sanity checks */
3543 bcmerror = BCME_NOTDOWN;
3547 bcmerror = BCME_BUFTOOSHORT;
3551 /* Free the old ones and replace with passed variables */
3553 MFREE(bus->dhd->osh, bus->vars, bus->varsz);
3555 bus->vars = MALLOC(bus->dhd->osh, len);
3556 bus->varsz = bus->vars ? len : 0;
3557 if (bus->vars == NULL) {
3558 bcmerror = BCME_NOMEM;
3562 /* Copy the passed variables, which should include the terminating double-null */
3563 bcopy(arg, bus->vars, bus->varsz);
3568 #ifndef BCMPCIE_OOB_HOST_WAKE
3569 /* loop through the capability list and see if the pcie capabilty exists */
3571 dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
3577 /* check for Header type 0 */
3578 byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
3579 if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
3580 DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
3584 /* check if the capability pointer field exists */
3585 byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
3586 if (!(byte_val & PCI_CAPPTR_PRESENT)) {
3587 DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
3591 cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
3592 /* check if the capability pointer is 0x00 */
3593 if (cap_ptr == 0x00) {
3594 DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
3598 /* loop thr'u the capability list and see if the pcie capabilty exists */
3600 cap_id = read_pci_cfg_byte(cap_ptr);
3602 while (cap_id != req_cap_id) {
3603 cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
3604 if (cap_ptr == 0x00) break;
3605 cap_id = read_pci_cfg_byte(cap_ptr);
3613 dhdpcie_pme_active(osl_t *osh, bool enable)
3618 cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
3621 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
3625 pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
3626 DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
3628 pme_csr |= PME_CSR_PME_STAT;
3630 pme_csr |= PME_CSR_PME_EN;
3632 pme_csr &= ~PME_CSR_PME_EN;
3635 OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
3637 #endif /* BCMPCIE_OOB_HOST_WAKE */
3639 /* Add bus dump output to a buffer */
3640 void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
3643 flow_ring_node_t *flow_ring_node;
3645 dhd_prot_print_info(dhdp, strbuf);
3646 for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
3647 flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
3648 if (flow_ring_node->active) {
3649 bcm_bprintf(strbuf, "Flow:%d IF %d Prio %d Qlen %d ",
3650 flow_ring_node->flowid, flow_ring_node->flow_info.ifindex,
3651 flow_ring_node->flow_info.tid, flow_ring_node->queue.len);
3652 dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf);
3658 dhd_update_txflowrings(dhd_pub_t *dhd)
3661 flow_ring_node_t *flow_ring_node;
3662 struct dhd_bus *bus = dhd->bus;
3664 for (item = dll_head_p(&bus->const_flowring);
3665 !dll_end(&bus->const_flowring, item); item = next) {
3666 next = dll_next_p(item);
3668 flow_ring_node = dhd_constlist_to_flowring(item);
3669 dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
3674 /* Mailbox ringbell Function */
3676 dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
3678 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
3679 (bus->sih->buscorerev == 4)) {
3680 DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__));
3683 if (bus->db1_for_mb) {
3684 /* this is a pcie core register, not the config regsiter */
3685 DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__));
3686 si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
3689 DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__));
3690 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
3691 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
3695 /* doorbell ring Function */
3697 dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
3699 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
3700 (bus->sih->buscorerev == 4)) {
3701 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, PCIE_INTB, PCIE_INTB);
3703 /* this is a pcie core register, not the config regsiter */
3704 DHD_INFO(("%s: writing a door bell to the device\n", __FUNCTION__));
3705 si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox, ~0, 0x12345678);
3710 dhd_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
3712 W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
3716 dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
3719 w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
3720 W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
3724 dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
3726 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
3727 (bus->sih->buscorerev == 4)) {
3728 bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
3730 if (bus->pcie_mb_intr_addr) {
3731 bus->pcie_mb_intr_osh = si_osh(bus->sih);
3732 return dhd_bus_ringbell_oldpcie;
3735 bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
3737 if (bus->pcie_mb_intr_addr) {
3738 bus->pcie_mb_intr_osh = si_osh(bus->sih);
3739 return dhd_bus_ringbell_fast;
3742 return dhd_bus_ringbell;
3746 dhd_bus_dpc(struct dhd_bus *bus)
3748 uint32 intstatus = 0;
3749 uint32 newstatus = 0;
3750 bool resched = FALSE; /* Flag indicating resched wanted */
3752 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3754 if (bus->dhd->busstate == DHD_BUS_DOWN) {
3755 DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
3760 intstatus = bus->intstatus;
3762 if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
3763 (bus->sih->buscorerev == 2)) {
3764 newstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
3765 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, newstatus);
3766 /* Merge new bits with previous */
3767 intstatus |= newstatus;
3769 if (intstatus & I_MB) {
3770 resched = dhdpcie_bus_process_mailbox_intr(bus, intstatus);
3773 /* this is a PCIE core register..not a config register... */
3774 newstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
3775 intstatus |= (newstatus & bus->def_intmask);
3776 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, newstatus, newstatus);
3777 if (intstatus & bus->def_intmask) {
3778 resched = dhdpcie_bus_process_mailbox_intr(bus, intstatus);
3779 intstatus &= ~bus->def_intmask;
3784 // terence 20150420: no need to enable interrupt if busstate is down
3785 if (bus->dhd->busstate) {
3786 dhdpcie_bus_intr_enable(bus);
3795 dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
3797 uint32 cur_h2d_mb_data = 0;
3799 dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, HTOD_MB_DATA, 0);
3801 if (cur_h2d_mb_data != 0) {
3803 DHD_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n", __FUNCTION__, cur_h2d_mb_data));
3804 while ((i++ < 100) && cur_h2d_mb_data) {
3806 dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, HTOD_MB_DATA, 0);
3809 DHD_ERROR(("%s: waited 1ms for the dngl to ack the previous mb transaction\n", __FUNCTION__));
3812 dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), HTOD_MB_DATA, 0);
3813 dhd_bus_gen_devmb_intr(bus);
3815 if (h2d_mb_data == H2D_HOST_D3_INFORM)
3816 DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
3820 dhdpcie_handle_mb_data(dhd_bus_t *bus)
3822 uint32 d2h_mb_data = 0;
3824 dhd_bus_cmn_readshared(bus, &d2h_mb_data, DTOH_MB_DATA, 0);
3828 dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), DTOH_MB_DATA, 0);
3830 DHD_INFO(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data));
3831 if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
3832 /* what should we do */
3833 DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__));
3834 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
3835 DHD_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__));
3837 if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
3838 /* what should we do */
3839 DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__));
3841 if (d2h_mb_data & D2H_DEV_D3_ACK) {
3842 /* what should we do */
3843 DHD_INFO_HW4(("%s D2H_MB_DATA: Received D3 ACK\n", __FUNCTION__));
3844 if (!bus->wait_for_d3_ack) {
3845 bus->wait_for_d3_ack = 1;
3846 dhd_os_ioctl_resp_wake(bus->dhd);
3849 if (d2h_mb_data & D2H_DEV_FWHALT) {
3850 DHD_INFO(("%s: FW trap has happened\n", __FUNCTION__));
3852 dhdpcie_checkdied(bus, NULL, 0);
3854 bus->dhd->busstate = DHD_BUS_DOWN;
3859 dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
3861 bool resched = FALSE;
3863 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
3864 (bus->sih->buscorerev == 4)) {
3865 /* Msg stream interrupt */
3866 if (intstatus & I_BIT1) {
3867 resched = dhdpci_bus_read_frames(bus);
3868 } else if (intstatus & I_BIT0) {
3869 /* do nothing for Now */
3873 if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
3874 dhdpcie_handle_mb_data(bus);
3876 if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
3880 if (intstatus & PCIE_MB_D2H_MB_MASK) {
3881 resched = dhdpci_bus_read_frames(bus);
3888 /* Decode dongle to host message stream */
3890 dhdpci_bus_read_frames(dhd_bus_t *bus)
3894 /* There may be frames in both ctrl buf and data buf; check ctrl buf first */
3895 DHD_PERIM_LOCK(bus->dhd); /* Take the perimeter lock */
3896 dhd_prot_process_ctrlbuf(bus->dhd);
3897 /* Unlock to give chance for resp to be handled */
3898 DHD_PERIM_UNLOCK(bus->dhd); /* Release the perimeter lock */
3900 DHD_PERIM_LOCK(bus->dhd); /* Take the perimeter lock */
3901 /* update the flow ring cpls */
3902 dhd_update_txflowrings(bus->dhd);
3904 /* With heavy TX traffic, we could get a lot of TxStatus
3907 more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound);
3909 /* With heavy RX traffic, this routine potentially could spend some time
3910 * processing RX frames without RX bound
3912 more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound);
3913 DHD_PERIM_UNLOCK(bus->dhd); /* Release the perimeter lock */
3919 dhdpcie_readshared(dhd_bus_t *bus)
3922 int rv, w_init, r_init;
3924 pciedev_shared_t *sh = bus->pcie_sh;
3927 shaddr = bus->dongle_ram_base + bus->ramsize - 4;
3928 /* start a timer for 5 seconds */
3929 dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
3931 while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
3932 /* Read last word in memory to determine address of sdpcm_shared structure */
3933 addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
3936 if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
3938 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
3939 __FUNCTION__, addr));
3940 DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed));
3943 bus->shared_addr = (ulong)addr;
3944 DHD_ERROR(("%s: PCIe shared addr read took %u usec "
3945 "before dongle is ready\n", __FUNCTION__, tmo.elapsed));
3948 /* Read hndrte_shared structure */
3949 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
3950 sizeof(pciedev_shared_t))) < 0) {
3951 DHD_ERROR(("%s: Failed to read PCIe shared struct,"
3952 "size read %d < %d\n", __FUNCTION__, rv, (int)sizeof(pciedev_shared_t)));
3957 sh->flags = ltoh32(sh->flags);
3958 sh->trap_addr = ltoh32(sh->trap_addr);
3959 sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
3960 sh->assert_file_addr = ltoh32(sh->assert_file_addr);
3961 sh->assert_line = ltoh32(sh->assert_line);
3962 sh->console_addr = ltoh32(sh->console_addr);
3963 sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
3964 sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
3965 sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
3966 /* load bus console address */
3969 bus->console_addr = sh->console_addr;
3972 /* Read the dma rx offset */
3973 bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
3974 dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
3976 DHD_ERROR(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset));
3978 if ((sh->flags & PCIE_SHARED_VERSION_MASK) > PCIE_SHARED_VERSION) {
3979 DHD_ERROR(("%s: pcie_shared version %d in dhd "
3980 "is older than pciedev_shared version %d in dongle\n",
3981 __FUNCTION__, PCIE_SHARED_VERSION,
3982 sh->flags & PCIE_SHARED_VERSION_MASK));
3985 if ((sh->flags & PCIE_SHARED_VERSION_MASK) >= 4) {
3986 if (sh->flags & PCIE_SHARED_TXPUSH_SPRT) {
3987 #ifdef DHDTCPACK_SUPPRESS
3988 /* Do not use tcpack suppress as packets don't stay in queue */
3989 dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
3991 bus->txmode_push = TRUE;
3993 bus->txmode_push = FALSE;
3995 DHD_ERROR(("%s: bus->txmode_push is set to %d\n", __FUNCTION__, bus->txmode_push));
3997 /* Does the FW support DMA'ing r/w indices */
3998 if (sh->flags & PCIE_SHARED_DMA_INDEX) {
4000 DHD_ERROR(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
4002 (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0),
4003 (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0)));
4005 } else if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ||
4006 DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) {
4009 DHD_ERROR(("%s: Incompatible FW. FW does not support DMAing indices\n",
4013 DHD_ERROR(("%s: Host supports DMAing indices but FW does not\n",
4015 bus->dhd->dma_d2h_ring_upd_support = FALSE;
4016 bus->dhd->dma_h2d_ring_upd_support = FALSE;
4020 /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
4022 ring_info_t ring_info;
4024 if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
4025 (uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
4028 bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
4029 bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
4032 bus->max_sub_queues = ltoh16(ring_info.max_sub_queues);
4034 /* If both FW and Host support DMA'ing indices, allocate memory and notify FW
4035 * The max_sub_queues is read from FW initialized ring_info
4037 if (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) {
4038 w_init = dhd_prot_init_index_dma_block(bus->dhd,
4039 HOST_TO_DNGL_DMA_WRITEINDX_BUFFER,
4040 bus->max_sub_queues);
4041 r_init = dhd_prot_init_index_dma_block(bus->dhd,
4042 DNGL_TO_HOST_DMA_READINDX_BUFFER,
4043 BCMPCIE_D2H_COMMON_MSGRINGS);
4045 if ((w_init != BCME_OK) || (r_init != BCME_OK)) {
4046 DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
4047 "Host will use w/r indices in TCM\n",
4049 bus->dhd->dma_h2d_ring_upd_support = FALSE;
4053 if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support)) {
4054 w_init = dhd_prot_init_index_dma_block(bus->dhd,
4055 DNGL_TO_HOST_DMA_WRITEINDX_BUFFER,
4056 BCMPCIE_D2H_COMMON_MSGRINGS);
4057 r_init = dhd_prot_init_index_dma_block(bus->dhd,
4058 HOST_TO_DNGL_DMA_READINDX_BUFFER,
4059 bus->max_sub_queues);
4061 if ((w_init != BCME_OK) || (r_init != BCME_OK)) {
4062 DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
4063 "Host will use w/r indices in TCM\n",
4065 bus->dhd->dma_d2h_ring_upd_support = FALSE;
4069 /* read ringmem and ringstate ptrs from shared area and store in host variables */
4070 dhd_fillup_ring_sharedptr_info(bus, &ring_info);
4072 bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
4073 DHD_INFO(("%s: ring_info\n", __FUNCTION__));
4075 DHD_ERROR(("%s: max H2D queues %d\n", __FUNCTION__, ltoh16(ring_info.max_sub_queues)));
4077 DHD_INFO(("%s: mail box address\n", __FUNCTION__));
4078 DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n", __FUNCTION__, bus->h2d_mb_data_ptr_addr));
4079 DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n", __FUNCTION__, bus->d2h_mb_data_ptr_addr));
4082 bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
4083 DHD_INFO(("%s: d2h_sync_mode 0x%08x\n", __FUNCTION__, bus->dhd->d2h_sync_mode));
4087 /* Read ring mem and ring state ptr info from shared are in TCM */
4089 dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
4094 uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
4096 /* Ring mem ptr info */
4097 /* Alloated in the order
4098 H2D_MSGRING_CONTROL_SUBMIT 0
4099 H2D_MSGRING_RXPOST_SUBMIT 1
4100 D2H_MSGRING_CONTROL_COMPLETE 2
4101 D2H_MSGRING_TX_COMPLETE 3
4102 D2H_MSGRING_RX_COMPLETE 4
4107 /* ringmemptr holds start of the mem block address space */
4108 tcm_memloc = ltoh32(ring_info->ringmem_ptr);
4110 /* Find out ringmem ptr for each ring common ring */
4111 for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
4112 bus->ring_sh[i].ring_mem_addr = tcm_memloc;
4113 /* Update mem block */
4114 tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
4115 DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
4116 i, bus->ring_sh[i].ring_mem_addr));
4120 if (bus->txmode_push) {
4121 bus->ring_sh[i].ring_mem_addr = tcm_memloc;
4122 DHD_INFO(("%s: TX ring ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
4123 i, bus->ring_sh[i].ring_mem_addr));
4127 /* Ring state mem ptr info */
4129 d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
4130 d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
4131 h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
4132 h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
4133 /* Store h2d common ring write/read pointers */
4134 for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
4135 bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
4136 bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
4138 /* update mem block */
4139 h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32);
4140 h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32);
4142 DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i,
4143 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
4145 /* Store d2h common ring write/read pointers */
4146 for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
4147 bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
4148 bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
4150 /* update mem block */
4151 d2h_w_idx_ptr = d2h_w_idx_ptr + sizeof(uint32);
4152 d2h_r_idx_ptr = d2h_r_idx_ptr + sizeof(uint32);
4154 DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i,
4155 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
4158 /* Store txflow ring write/read pointers */
4159 if (bus->txmode_push) {
4160 bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
4161 bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
4163 DHD_INFO(("%s: txflow : idx %d write %x read %x \n", __FUNCTION__, i,
4164 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
4166 for (j = 0; j < (bus->max_sub_queues - BCMPCIE_H2D_COMMON_MSGRINGS);
4169 bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
4170 bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
4172 /* update mem block */
4173 h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32);
4174 h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32);
4176 DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n",
4178 bus->ring_sh[i].ring_state_w,
4179 bus->ring_sh[i].ring_state_r));
4185 /* Initialize bus module: prepare for communication w/dongle */
4186 int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
4188 dhd_bus_t *bus = dhdp->bus;
4191 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4197 /* Make sure we're talking to the core. */
4198 bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
4199 ASSERT(bus->reg != NULL);
4201 /* before opening up bus for data transfer, check if shared are is intact */
4202 ret = dhdpcie_readshared(bus);
4204 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
4209 /* Make sure we're talking to the core. */
4210 bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
4211 ASSERT(bus->reg != NULL);
4213 /* Set bus state according to enable result */
4214 dhdp->busstate = DHD_BUS_DATA;
4216 /* Enable the interrupt after device is up */
4217 dhdpcie_bus_intr_enable(bus);
4219 /* bcmsdh_intr_unmask(bus->sdh); */
4227 dhdpcie_init_shared_addr(dhd_bus_t *bus)
4231 addr = bus->dongle_ram_base + bus->ramsize - 4;
4232 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
4237 dhdpcie_chipmatch(uint16 vendor, uint16 device)
4239 if (vendor != PCI_VENDOR_ID_BROADCOM) {
4240 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
4245 if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
4246 (device == BCM4350_D11AC5G_ID) || BCM4350_CHIP(device))
4249 if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
4250 (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID))
4253 if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
4254 (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID))
4257 if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
4258 (device == BCM4345_D11AC5G_ID) || (device == BCM4345_CHIP_ID))
4261 if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
4262 (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID))
4265 if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
4266 (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID))
4269 if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
4270 (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID))
4273 if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
4274 (device == BCM4358_D11AC5G_ID) || (device == BCM4358_CHIP_ID))
4277 if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
4278 (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID))
4280 if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
4281 (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID))
4283 if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
4284 (device == BCM4359_D11AC5G_ID) || (device == BCM4359_CHIP_ID))
4288 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
4295 Name: dhdpcie_cc_nvmshadow
4298 A shadow of OTP/SPROM exists in ChipCommon Region
4299 betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
4300 Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
4301 can also be read from ChipCommon Registers.
4305 dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
4307 uint16 dump_offset = 0;
4308 uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
4310 /* Table for 65nm OTP Size (in bits) */
4311 int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
4313 volatile uint16 *nvm_shadow;
4317 chipcregs_t *chipcregs;
4320 /* Save the current core */
4321 cur_coreid = si_coreid(bus->sih);
4322 /* Switch to ChipC */
4323 chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
4324 chipc_corerev = si_corerev(bus->sih);
4326 /* Check ChipcommonCore Rev */
4327 if (chipc_corerev < 44) {
4328 DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
4329 return BCME_UNSUPPORTED;
4333 if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) &&
4334 ((uint16)bus->sih->chip != BCM4345_CHIP_ID)) {
4335 DHD_ERROR(("%s: cc_nvmdump cmd. supported for 4350/4345 only\n",
4337 return BCME_UNSUPPORTED;
4340 /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
4341 if (chipcregs->sromcontrol & SRC_PRESENT) {
4342 /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
4343 sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
4344 >> SRC_SIZE_SHIFT))) * 1024;
4345 bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
4348 if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
4349 bcm_bprintf(b, "\nOTP Present");
4351 if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
4352 == OTPL_WRAP_TYPE_40NM) {
4353 /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
4354 otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE)
4355 >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
4356 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
4358 /* This part is untested since newer chips have 40nm OTP */
4359 otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
4360 >> CC_CAP_OTPSIZE_SHIFT];
4361 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
4362 DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
4367 if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
4368 ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
4369 DHD_ERROR(("%s: SPROM and OTP could not be found \n",
4371 return BCME_NOTFOUND;
4374 /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
4375 if ((chipcregs->sromcontrol & SRC_OTPSEL) &&
4376 (chipcregs->sromcontrol & SRC_OTPPRESENT)) {
4378 bcm_bprintf(b, "OTP Strap selected.\n"
4379 "\nOTP Shadow in ChipCommon:\n");
4381 dump_size = otp_size / 16 ; /* 16bit words */
4383 } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
4384 (chipcregs->sromcontrol & SRC_PRESENT)) {
4386 bcm_bprintf(b, "SPROM Strap selected\n"
4387 "\nSPROM Shadow in ChipCommon:\n");
4389 /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
4390 /* dump_size in 16bit words */
4391 dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
4394 DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
4396 return BCME_NOTFOUND;
4399 if (bus->regs == NULL) {
4400 DHD_ERROR(("ChipCommon Regs. not initialized\n"));
4401 return BCME_NOTREADY;
4403 bcm_bprintf(b, "\n OffSet:");
4405 /* Point to the SPROM/OTP shadow in ChipCommon */
4406 nvm_shadow = chipcregs->sromotp;
4409 * Read 16 bits / iteration.
4410 * dump_size & dump_offset in 16-bit words
4412 while (dump_offset < dump_size) {
4413 if (dump_offset % 2 == 0)
4414 /* Print the offset in the shadow space in Bytes */
4415 bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
4417 bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
4422 /* Switch back to the original core */
4423 si_setcore(bus->sih, cur_coreid, 0);
4430 dhd_bus_is_txmode_push(dhd_bus_t *bus)
4432 return bus->txmode_push;
4435 void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
4438 flow_queue_t *queue;
4439 flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
4440 unsigned long flags;
4442 queue = &flow_ring_node->queue;
4444 #ifdef DHDTCPACK_SUPPRESS
4445 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
4446 * when there is a newly coming packet from network stack.
4448 dhd_tcpack_info_tbl_clean(bus->dhd);
4449 #endif /* DHDTCPACK_SUPPRESS */
4451 /* clean up BUS level info */
4452 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4454 /* Flush all pending packets in the queue, if any */
4455 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
4456 PKTFREE(bus->dhd->osh, pkt, TRUE);
4458 ASSERT(flow_queue_empty(queue));
4460 flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
4461 flow_ring_node->active = FALSE;
4462 dll_delete(&flow_ring_node->list);
4464 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4466 /* Call Flow ring clean up */
4467 dhd_prot_clean_flow_ring(bus->dhd, flow_ring_node->prot_info);
4468 dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
4469 flow_ring_node->flowid);
4474 * Allocate a Flow ring buffer,
4476 * Send Msg to device about flow ring creation
4479 dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
4481 flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
4483 DHD_INFO(("%s :Flow create\n", __FUNCTION__));
4485 /* Send Msg to device about flow ring creation */
4486 if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
4493 dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
4495 flow_ring_node_t *flow_ring_node;
4496 unsigned long flags;
4498 DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
4500 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
4501 ASSERT(flow_ring_node->flowid == flowid);
4503 if (status != BCME_OK) {
4504 DHD_ERROR(("%s Flow create Response failure error status = %d \n",
4505 __FUNCTION__, status));
4506 /* Call Flow clean up */
4507 dhd_bus_clean_flow_ring(bus, flow_ring_node);
4511 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4512 flow_ring_node->status = FLOW_RING_STATUS_OPEN;
4513 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4515 dhd_bus_schedule_queue(bus, flowid, FALSE);
4521 dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
4524 flow_queue_t *queue;
4525 flow_ring_node_t *flow_ring_node;
4526 unsigned long flags;
4528 DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
4530 flow_ring_node = (flow_ring_node_t *)arg;
4532 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4533 if (flow_ring_node->status & FLOW_RING_STATUS_DELETE_PENDING) {
4534 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4535 DHD_ERROR(("%s :Delete Pending\n", __FUNCTION__));
4538 flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
4540 queue = &flow_ring_node->queue; /* queue associated with flow ring */
4542 #ifdef DHDTCPACK_SUPPRESS
4543 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
4544 * when there is a newly coming packet from network stack.
4546 dhd_tcpack_info_tbl_clean(bus->dhd);
4547 #endif /* DHDTCPACK_SUPPRESS */
4548 /* Flush all pending packets in the queue, if any */
4549 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
4550 PKTFREE(bus->dhd->osh, pkt, TRUE);
4552 ASSERT(flow_queue_empty(queue));
4554 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4556 /* Send Msg to device about flow ring deletion */
4557 dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
4563 dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
4565 flow_ring_node_t *flow_ring_node;
4567 DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
4569 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
4570 ASSERT(flow_ring_node->flowid == flowid);
4572 if (status != BCME_OK) {
4573 DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
4574 __FUNCTION__, status));
4577 /* Call Flow clean up */
4578 dhd_bus_clean_flow_ring(bus, flow_ring_node);
4584 int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
4587 flow_queue_t *queue;
4588 flow_ring_node_t *flow_ring_node;
4589 unsigned long flags;
4591 DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
4593 flow_ring_node = (flow_ring_node_t *)arg;
4594 queue = &flow_ring_node->queue; /* queue associated with flow ring */
4596 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4598 #ifdef DHDTCPACK_SUPPRESS
4599 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
4600 * when there is a newly coming packet from network stack.
4602 dhd_tcpack_info_tbl_clean(bus->dhd);
4603 #endif /* DHDTCPACK_SUPPRESS */
4604 /* Flush all pending packets in the queue, if any */
4605 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
4606 PKTFREE(bus->dhd->osh, pkt, TRUE);
4608 ASSERT(flow_queue_empty(queue));
4610 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4612 /* Send Msg to device about flow ring flush */
4613 dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
4615 flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
4620 dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
4622 flow_ring_node_t *flow_ring_node;
4624 if (status != BCME_OK) {
4625 DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
4626 __FUNCTION__, status));
4630 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
4631 ASSERT(flow_ring_node->flowid == flowid);
4633 flow_ring_node->status = FLOW_RING_STATUS_OPEN;
4638 dhd_bus_max_h2d_queues(struct dhd_bus *bus, uint8 *txpush)
4640 if (bus->txmode_push)
4644 return bus->max_sub_queues;
4648 dhdpcie_bus_clock_start(struct dhd_bus *bus)
4650 return dhdpcie_start_host_pcieclock(bus);
4654 dhdpcie_bus_clock_stop(struct dhd_bus *bus)
4656 return dhdpcie_stop_host_pcieclock(bus);
4660 dhdpcie_bus_disable_device(struct dhd_bus *bus)
4662 return dhdpcie_disable_device(bus);
4666 dhdpcie_bus_enable_device(struct dhd_bus *bus)
4668 return dhdpcie_enable_device(bus);
4672 dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
4674 return dhdpcie_alloc_resource(bus);
4678 dhdpcie_bus_free_resource(struct dhd_bus *bus)
4680 dhdpcie_free_resource(bus);
4684 dhd_bus_request_irq(struct dhd_bus *bus)
4686 return dhdpcie_bus_request_irq(bus);
4690 dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
4692 return dhdpcie_dongle_attach(bus);
4696 dhd_bus_release_dongle(struct dhd_bus *bus)
4698 bool dongle_isolation;
4701 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4708 dongle_isolation = bus->dhd->dongle_isolation;
4709 dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
4716 #ifdef BCMPCIE_OOB_HOST_WAKE
4717 int dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
4719 return dhdpcie_oob_intr_register(dhdp->bus);
4722 void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
4724 dhdpcie_oob_intr_unregister(dhdp->bus);
4727 void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
4729 dhdpcie_oob_intr_set(dhdp->bus, enable);
4731 #endif /* BCMPCIE_OOB_HOST_WAKE */