net: wifi: rockchip: update broadcom drivers for kernel4.4
[firefly-linux-kernel-4.4.55.git] / drivers / net / wireless / rockchip_wlan / rkwifi / bcmdhd / dhd_pcie.c
1 /*
2  * DHD Bus Module for PCIE
3  *
4  * Copyright (C) 1999-2016, Broadcom Corporation
5  * 
6  *      Unless you and Broadcom execute a separate written software license
7  * agreement governing use of this software, this software is licensed to you
8  * under the terms of the GNU General Public License version 2 (the "GPL"),
9  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10  * following added to such license:
11  * 
12  *      As a special exception, the copyright holders of this software give you
13  * permission to link this software with independent modules, and to copy and
14  * distribute the resulting executable under terms of your choice, provided that
15  * you also meet, for each linked independent module, the terms and conditions of
16  * the license of that module.  An independent module is a module which is not
17  * derived from this software.  The special exception does not apply to any
18  * modifications of the software.
19  * 
20  *      Notwithstanding the above, under no circumstances may you combine this
21  * software in any way with any other Broadcom software provided under a license
22  * other than the GPL, without Broadcom's express prior written consent.
23  *
24  *
25  * <<Broadcom-WL-IPTag/Open:>>
26  *
27  * $Id: dhd_pcie.c 609007 2015-12-30 07:44:52Z $
28  */
29
30
31 /* include files */
32 #include <typedefs.h>
33 #include <bcmutils.h>
34 #include <bcmdevs.h>
35 #include <siutils.h>
36 #include <hndsoc.h>
37 #include <hndpmu.h>
38 #include <sbchipc.h>
39 #include <hnd_armtrap.h>
40 #if defined(DHD_DEBUG)
41 #include <hnd_cons.h>
42 #endif /* defined(DHD_DEBUG) */
43 #include <dngl_stats.h>
44 #include <pcie_core.h>
45 #include <dhd.h>
46 #include <dhd_bus.h>
47 #include <dhd_flowring.h>
48 #include <dhd_proto.h>
49 #include <dhd_dbg.h>
50 #include <dhdioctl.h>
51 #include <sdiovar.h>
52 #include <bcmmsgbuf.h>
53 #include <pcicfg.h>
54 #include <dhd_pcie.h>
55 #include <bcmpcie.h>
56 #include <bcmendian.h>
57 #ifdef DHDTCPACK_SUPPRESS
58 #include <dhd_ip.h>
59 #endif /* DHDTCPACK_SUPPRESS */
60 #include <dhd_config.h>
61
62 #ifdef BCMEMBEDIMAGE
63 #include BCMEMBEDIMAGE
64 #endif /* BCMEMBEDIMAGE */
65
66 #ifdef PCIE_OOB
67 #include "ftdi_sio_external.h"
68 #endif /* PCIE_OOB */
69
70 #define MEMBLOCK        2048            /* Block size used for downloading of dongle image */
71 #define MAX_WKLK_IDLE_CHECK     3       /* times wake_lock checked before deciding not to suspend */
72
73 #define ARMCR4REG_BANKIDX       (0x40/sizeof(uint32))
74 #define ARMCR4REG_BANKPDA       (0x4C/sizeof(uint32))
75 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
76
77 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
78         extern unsigned int system_rev;
79 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
80
81 int dhd_dongle_memsize;
82 int dhd_dongle_ramsize;
83 static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
84 #ifdef DHD_DEBUG
85 static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
86 #endif /* DHD_DEBUG */
87 #if defined(DHD_FW_COREDUMP)
88 static int dhdpcie_mem_dump(dhd_bus_t *bus);
89 #endif /* DHD_FW_COREDUMP */
90
91 static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
92 static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
93         const char *name, void *params,
94         int plen, void *arg, int len, int val_size);
95 static int dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 intval);
96 static int dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus,
97         uint32 len, uint32 srcdelay, uint32 destdelay);
98 static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
99 static int _dhdpcie_download_firmware(struct dhd_bus *bus);
100 static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
101 static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
102 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
103 static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
104 static int dhdpcie_readshared(dhd_bus_t *bus);
105 static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
106 static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
107 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
108 static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
109         bool dongle_isolation, bool reset_flag);
110 static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
111 static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
112 static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
113 static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
114 static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
115 static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
116 static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
117 static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
118 static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
119 static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset);
120 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
121 static void dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size);
122 static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
123 static void dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data);
124 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
125 extern void dhd_dpc_kill(dhd_pub_t *dhdp);
126
127 #ifdef BCMEMBEDIMAGE
128 static int dhdpcie_download_code_array(dhd_bus_t *bus);
129 #endif /* BCMEMBEDIMAGE */
130
131
132 #ifdef EXYNOS_PCIE_DEBUG
133 extern void exynos_pcie_register_dump(int ch_num);
134 #endif /* EXYNOS_PCIE_DEBUG */
135
136 #define     PCI_VENDOR_ID_BROADCOM          0x14e4
137
138 static void dhd_bus_set_device_wake(struct dhd_bus *bus, bool val);
139 extern void wl_nddbg_wpp_log(const char *format, ...);
140 #ifdef PCIE_OOB
141 static void dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus);
142
143 #define DHD_DEFAULT_DOORBELL_TIMEOUT 200        /* ms */
144 static uint dhd_doorbell_timeout = DHD_DEFAULT_DOORBELL_TIMEOUT;
145
146 #define HOST_WAKE 4   /* GPIO_0 (HOST_WAKE) - Output from WLAN */
147 #define DEVICE_WAKE 5  /* GPIO_1 (DEVICE_WAKE) - Input to WLAN */
148 #define BIT_WL_REG_ON 6
149 #define BIT_BT_REG_ON 7
150
151 int gpio_handle_val = 0;
152 unsigned char gpio_port = 0;
153 unsigned char gpio_direction = 0;
154 #define OOB_PORT "ttyUSB0"
155 #endif /* PCIE_OOB */
156 static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
157
158 /* IOVar table */
159 enum {
160         IOV_INTR = 1,
161         IOV_MEMBYTES,
162         IOV_MEMSIZE,
163         IOV_SET_DOWNLOAD_STATE,
164         IOV_DEVRESET,
165         IOV_VARS,
166         IOV_MSI_SIM,
167         IOV_PCIE_LPBK,
168         IOV_CC_NVMSHADOW,
169         IOV_RAMSIZE,
170         IOV_RAMSTART,
171         IOV_SLEEP_ALLOWED,
172         IOV_PCIE_DMAXFER,
173         IOV_PCIE_SUSPEND,
174         IOV_PCIEREG,
175         IOV_PCIECFGREG,
176         IOV_PCIECOREREG,
177         IOV_PCIESERDESREG,
178         IOV_BAR0_SECWIN_REG,
179         IOV_SBREG,
180         IOV_DONGLEISOLATION,
181         IOV_LTRSLEEPON_UNLOOAD,
182         IOV_METADATA_DBG,
183         IOV_RX_METADATALEN,
184         IOV_TX_METADATALEN,
185         IOV_TXP_THRESHOLD,
186         IOV_BUZZZ_DUMP,
187         IOV_DUMP_RINGUPD_BLOCK,
188         IOV_DMA_RINGINDICES,
189         IOV_DB1_FOR_MB,
190         IOV_FLOW_PRIO_MAP,
191 #ifdef DHD_PCIE_RUNTIMEPM
192         IOV_IDLETIME,
193 #endif /* DHD_PCIE_RUNTIMEPM */
194         IOV_RXBOUND,
195         IOV_TXBOUND,
196         IOV_HANGREPORT,
197 #ifdef PCIE_OOB
198         IOV_OOB_BT_REG_ON,
199         IOV_OOB_ENABLE
200 #endif /* PCIE_OOB */
201 };
202
203
204 const bcm_iovar_t dhdpcie_iovars[] = {
205         {"intr",        IOV_INTR,       0,      IOVT_BOOL,      0 },
206         {"membytes",    IOV_MEMBYTES,   0,      IOVT_BUFFER,    2 * sizeof(int) },
207         {"memsize",     IOV_MEMSIZE,    0,      IOVT_UINT32,    0 },
208         {"dwnldstate",  IOV_SET_DOWNLOAD_STATE, 0,      IOVT_BOOL,      0 },
209         {"vars",        IOV_VARS,       0,      IOVT_BUFFER,    0 },
210         {"devreset",    IOV_DEVRESET,   0,      IOVT_BOOL,      0 },
211         {"pcie_lpbk",   IOV_PCIE_LPBK,  0,      IOVT_UINT32,    0 },
212         {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, IOVT_BUFFER, 0 },
213         {"ramsize",     IOV_RAMSIZE,    0,      IOVT_UINT32,    0 },
214         {"ramstart",    IOV_RAMSTART,   0,      IOVT_UINT32,    0 },
215         {"pciereg",     IOV_PCIEREG,    0,      IOVT_BUFFER,    2 * sizeof(int32) },
216         {"pciecfgreg",  IOV_PCIECFGREG, 0,      IOVT_BUFFER,    2 * sizeof(int32) },
217         {"pciecorereg", IOV_PCIECOREREG,        0,      IOVT_BUFFER,    2 * sizeof(int32) },
218         {"pcieserdesreg",       IOV_PCIESERDESREG,      0,      IOVT_BUFFER,    3 * sizeof(int32) },
219         {"bar0secwinreg",       IOV_BAR0_SECWIN_REG,    0,      IOVT_BUFFER,    sizeof(sdreg_t) },
220         {"sbreg",       IOV_SBREG,      0,      IOVT_BUFFER,    sizeof(sdreg_t) },
221         {"pcie_dmaxfer",        IOV_PCIE_DMAXFER,       0,      IOVT_BUFFER,    3 * sizeof(int32) },
222         {"pcie_suspend", IOV_PCIE_SUSPEND,      0,      IOVT_UINT32,    0 },
223 #ifdef PCIE_OOB
224         {"oob_bt_reg_on", IOV_OOB_BT_REG_ON,    0,  IOVT_UINT32,    0 },
225         {"oob_enable",   IOV_OOB_ENABLE,    0,  IOVT_UINT32,    0 },
226 #endif /* PCIE_OOB */
227         {"sleep_allowed",       IOV_SLEEP_ALLOWED,      0,      IOVT_BOOL,      0 },
228         {"dngl_isolation", IOV_DONGLEISOLATION, 0,      IOVT_UINT32,    0 },
229         {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD,  0,      IOVT_UINT32,    0 },
230         {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK,     0,      IOVT_BUFFER,    0 },
231         {"dma_ring_indices", IOV_DMA_RINGINDICES,       0,      IOVT_UINT32,    0},
232         {"metadata_dbg", IOV_METADATA_DBG,      0,      IOVT_BOOL,      0 },
233         {"rx_metadata_len", IOV_RX_METADATALEN, 0,      IOVT_UINT32,    0 },
234         {"tx_metadata_len", IOV_TX_METADATALEN, 0,      IOVT_UINT32,    0 },
235         {"db1_for_mb", IOV_DB1_FOR_MB,  0,      IOVT_UINT32,    0 },
236         {"txp_thresh", IOV_TXP_THRESHOLD,       0,      IOVT_UINT32,    0 },
237         {"buzzz_dump", IOV_BUZZZ_DUMP,          0,      IOVT_UINT32,    0 },
238         {"flow_prio_map", IOV_FLOW_PRIO_MAP,    0,      IOVT_UINT32,    0 },
239 #ifdef DHD_PCIE_RUNTIMEPM
240         {"idletime",    IOV_IDLETIME,   0,      IOVT_INT32,     0 },
241 #endif /* DHD_PCIE_RUNTIMEPM */
242         {"rxbound",     IOV_RXBOUND,    0,      IOVT_UINT32,    0 },
243         {"txbound",     IOV_TXBOUND,    0,      IOVT_UINT32,    0 },
244         {"fw_hang_report", IOV_HANGREPORT,      0,      IOVT_BOOL,      0 },
245         {NULL, 0, 0, 0, 0 }
246 };
247
248
249 #define MAX_READ_TIMEOUT        5 * 1000 * 1000
250
251 #ifndef DHD_RXBOUND
252 #define DHD_RXBOUND             64
253 #endif
254 #ifndef DHD_TXBOUND
255 #define DHD_TXBOUND             64
256 #endif
257 uint dhd_rxbound = DHD_RXBOUND;
258 uint dhd_txbound = DHD_TXBOUND;
259
260 /* Register/Unregister functions are called by the main DHD entry
261  * point (e.g. module insertion) to link with the bus driver, in
262  * order to look for or await the device.
263  */
264
265 int
266 dhd_bus_register(void)
267 {
268         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
269
270         return dhdpcie_bus_register();
271 }
272
273 void
274 dhd_bus_unregister(void)
275 {
276         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
277
278         dhdpcie_bus_unregister();
279         return;
280 }
281
282
283 /** returns a host virtual address */
284 uint32 *
285 dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
286 {
287         return (uint32 *)REG_MAP(addr, size);
288 }
289
290 void
291 dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size)
292 {
293         REG_UNMAP((void*)(uintptr)addr);
294         return;
295 }
296
297 /**
298  * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
299  * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
300  * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
301  *
302  * 'tcm' is the *host* virtual address at which tcm is mapped.
303  */
304 dhd_bus_t* dhdpcie_bus_attach(osl_t *osh,
305         volatile char *regs, volatile char *tcm, void *pci_dev)
306 {
307         dhd_bus_t *bus;
308
309         DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
310
311         do {
312                 if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
313                         DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
314                         break;
315                 }
316
317                 bus->regs = regs;
318                 bus->tcm = tcm;
319                 bus->osh = osh;
320                 /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
321                 bus->dev = (struct pci_dev *)pci_dev;
322
323                 dll_init(&bus->const_flowring);
324
325                 /* Attach pcie shared structure */
326                 if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
327                         DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
328                         break;
329                 }
330
331                 /* dhd_common_init(osh); */
332
333                 if (dhdpcie_dongle_attach(bus)) {
334                         DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
335                         break;
336                 }
337
338                 /* software resources */
339                 if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
340                         DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
341
342                         break;
343                 }
344                 bus->dhd->busstate = DHD_BUS_DOWN;
345                 bus->db1_for_mb = TRUE;
346                 bus->dhd->hang_report = TRUE;
347                 bus->irq_registered = FALSE;
348
349                 bus->d3_ack_war_cnt = 0;
350
351                 DHD_TRACE(("%s: EXIT SUCCESS\n",
352                         __FUNCTION__));
353
354                 return bus;
355         } while (0);
356
357         DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
358
359         if (bus && bus->pcie_sh) {
360                 MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
361         }
362
363         if (bus) {
364                 MFREE(osh, bus, sizeof(dhd_bus_t));
365         }
366         return NULL;
367 }
368
369 uint
370 dhd_bus_chip(struct dhd_bus *bus)
371 {
372         ASSERT(bus->sih != NULL);
373         return bus->sih->chip;
374 }
375
376 uint
377 dhd_bus_chiprev(struct dhd_bus *bus)
378 {
379         ASSERT(bus);
380         ASSERT(bus->sih != NULL);
381         return bus->sih->chiprev;
382 }
383
384 void *
385 dhd_bus_pub(struct dhd_bus *bus)
386 {
387         return bus->dhd;
388 }
389
390 void *
391 dhd_bus_sih(struct dhd_bus *bus)
392 {
393         return (void *)bus->sih;
394 }
395
396 void *
397 dhd_bus_txq(struct dhd_bus *bus)
398 {
399         return &bus->txq;
400 }
401
402 /** Get Chip ID version */
403 uint dhd_bus_chip_id(dhd_pub_t *dhdp)
404 {
405         dhd_bus_t *bus = dhdp->bus;
406         return  bus->sih->chip;
407 }
408
409 /** Get Chip Rev ID version */
410 uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
411 {
412         dhd_bus_t *bus = dhdp->bus;
413         return bus->sih->chiprev;
414 }
415
416 /** Get Chip Pkg ID version */
417 uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
418 {
419         dhd_bus_t *bus = dhdp->bus;
420         return bus->sih->chippkg;
421 }
422
423 /** Read and clear intstatus. This should be called with interupts disabled or inside isr */
424 uint32
425 dhdpcie_bus_intstatus(dhd_bus_t *bus)
426 {
427         uint32 intstatus = 0;
428         uint32 intmask = 0;
429
430         if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
431                 (bus->sih->buscorerev == 2)) {
432                 intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
433                 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
434                 intstatus &= I_MB;
435         } else {
436                 /* this is a PCIE core register..not a config register... */
437                 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
438
439                 /* this is a PCIE core register..not a config register... */
440                 intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
441
442                 /*
443                  * The fourth argument to si_corereg is the "mask" fields of the register to update
444                  * and the fifth field is the "value" to update. Now if we are interested in only
445                  * few fields of the "mask" bit map, we should not be writing back what we read
446                  * By doing so, we might clear/ack interrupts that are not handled yet.
447                  */
448                 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask,
449                         intstatus);
450
451                 intstatus &= intmask;
452
453                 /* Is device removed. intstatus & intmask read 0xffffffff */
454                 if (intstatus == (uint32)-1) {
455                         DHD_ERROR(("%s: !!!!!!Device Removed or dead chip.\n", __FUNCTION__));
456                         intstatus = 0;
457 #ifdef CUSTOMER_HW4_DEBUG
458 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
459                         bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
460                         dhd_os_send_hang_message(bus->dhd);
461 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
462 #endif /* CUSTOMER_HW4_DEBUG */
463                 }
464
465                 intstatus &= bus->def_intmask;
466         }
467
468         return intstatus;
469 }
470
471 /**
472  * Name:  dhdpcie_bus_isr
473  * Parameters:
474  * 1: IN int irq   -- interrupt vector
475  * 2: IN void *arg      -- handle to private data structure
476  * Return value:
477  * Status (TRUE or FALSE)
478  *
479  * Description:
480  * Interrupt Service routine checks for the status register,
481  * disable interrupt and queue DPC if mail box interrupts are raised.
482  */
483 int32
484 dhdpcie_bus_isr(dhd_bus_t *bus)
485 {
486         uint32 intstatus = 0;
487
488         do {
489                 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
490                 /* verify argument */
491                 if (!bus) {
492                         DHD_ERROR(("%s : bus is null pointer, exit \n", __FUNCTION__));
493                         break;
494                 }
495
496                 if (bus->dhd->dongle_reset) {
497                         break;
498                 }
499
500                 if (bus->dhd->busstate == DHD_BUS_DOWN) {
501                         DHD_ERROR(("%s: BUS is down, not processing the interrupt \r\n",
502                                 __FUNCTION__));
503                         break;
504                 }
505
506                 intstatus = dhdpcie_bus_intstatus(bus);
507
508                 /* Check if the interrupt is ours or not */
509                 if (intstatus == 0) {
510                         break;
511                 }
512
513                 /* save the intstatus */
514                 bus->intstatus = intstatus;
515
516                 /*  Overall operation:
517                  *    - Mask further interrupts
518                  *    - Read/ack intstatus
519                  *    - Take action based on bits and state
520                  *    - Reenable interrupts (as per state)
521                  */
522
523                 /* Count the interrupt call */
524                 bus->intrcount++;
525
526                 /* read interrupt status register!! Status bits will be cleared in DPC !! */
527                 bus->ipend = TRUE;
528                 dhdpcie_bus_intr_disable(bus); /* Disable interrupt!! */
529                 bus->intdis = TRUE;
530
531 #if defined(PCIE_ISR_THREAD)
532
533                 DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
534                 DHD_OS_WAKE_LOCK(bus->dhd);
535                 while (dhd_bus_dpc(bus));
536                 DHD_OS_WAKE_UNLOCK(bus->dhd);
537 #else
538                 bus->dpc_sched = TRUE;
539                 dhd_sched_dpc(bus->dhd);     /* queue DPC now!! */
540 #endif /* defined(SDIO_ISR_THREAD) */
541
542                 DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
543                 return TRUE;
544
545         } while (0);
546
547         DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__));
548         return FALSE;
549 }
550
551 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
552 dhd_pub_t *link_recovery = NULL;
553 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
554 static bool
555 dhdpcie_dongle_attach(dhd_bus_t *bus)
556 {
557
558         osl_t *osh = bus->osh;
559         void *regsva = (void*)bus->regs;
560         uint16 devid = bus->cl_devid;
561         uint32 val;
562         sbpcieregs_t *sbpcieregs;
563
564         DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
565
566 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
567         link_recovery = bus->dhd;
568 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
569
570         bus->alp_only = TRUE;
571         bus->sih = NULL;
572
573         /* Set bar0 window to si_enum_base */
574         dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE);
575
576         /* Checking PCIe bus status with reading configuration space */
577         val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
578         if ((val & 0xFFFF) != VENDOR_BROADCOM) {
579                 DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
580                 goto fail;
581         }
582
583         /* si_attach() will provide an SI handle and scan the backplane */
584         if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
585                                    &bus->vars, &bus->varsz))) {
586                 DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
587                 goto fail;
588         }
589
590
591         si_setcore(bus->sih, PCIE2_CORE_ID, 0);
592         sbpcieregs = (sbpcieregs_t*)(bus->regs);
593
594         /* WAR where the BAR1 window may not be sized properly */
595         W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
596         val = R_REG(osh, &sbpcieregs->configdata);
597         W_REG(osh, &sbpcieregs->configdata, val);
598
599         /* Get info on the ARM and SOCRAM cores... */
600         /* Should really be qualified by device id */
601         if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
602             (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
603             (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
604             (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
605                 bus->armrev = si_corerev(bus->sih);
606         } else {
607                 DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
608                 goto fail;
609         }
610
611         if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
612                 if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
613                         DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
614                         goto fail;
615                 }
616                 /* also populate base address */
617                 bus->dongle_ram_base = CA7_4365_RAM_BASE;
618         } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
619                 if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
620                         DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
621                         goto fail;
622                 }
623         } else {
624                 /* cr4 has a different way to find the RAM size from TCM's */
625                 if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
626                         DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
627                         goto fail;
628                 }
629                 /* also populate base address */
630                 switch ((uint16)bus->sih->chip) {
631                 case BCM4339_CHIP_ID:
632                 case BCM4335_CHIP_ID:
633                         bus->dongle_ram_base = CR4_4335_RAM_BASE;
634                         break;
635                 case BCM4358_CHIP_ID:
636                 case BCM4356_CHIP_ID:
637                 case BCM4354_CHIP_ID:
638                 case BCM43567_CHIP_ID:
639                 case BCM43569_CHIP_ID:
640                 case BCM4350_CHIP_ID:
641                 case BCM43570_CHIP_ID:
642                         bus->dongle_ram_base = CR4_4350_RAM_BASE;
643                         break;
644                 case BCM4360_CHIP_ID:
645                         bus->dongle_ram_base = CR4_4360_RAM_BASE;
646                         break;
647                 CASE_BCM4345_CHIP:
648                         bus->dongle_ram_base = (bus->sih->chiprev < 6)  /* changed at 4345C0 */
649                                 ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
650                         break;
651                 CASE_BCM43602_CHIP:
652                         bus->dongle_ram_base = CR4_43602_RAM_BASE;
653                         break;
654                 case BCM4349_CHIP_GRPID:
655                         /* RAM base changed from 4349c0(revid=9) onwards */
656                         bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
657                         CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
658                         break;
659                 default:
660                         bus->dongle_ram_base = 0;
661                         DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
662                                    __FUNCTION__, bus->dongle_ram_base));
663                 }
664         }
665         bus->ramsize = bus->orig_ramsize;
666         if (dhd_dongle_memsize)
667                 dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
668
669         DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
670                    bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
671
672         bus->srmemsize = si_socram_srmem_size(bus->sih);
673
674
675         bus->def_intmask = PCIE_MB_D2H_MB_MASK | PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
676
677         /* Set the poll and/or interrupt flags */
678         bus->intr = (bool)dhd_intr;
679
680         bus->wait_for_d3_ack = 1;
681         bus->suspended = FALSE;
682
683 #ifdef PCIE_OOB
684         gpio_handle_val = get_handle(OOB_PORT);
685         if (gpio_handle_val < 0)
686         {
687                 DHD_ERROR(("%s: Could not get GPIO handle.\n", __FUNCTION__));
688                 ASSERT(FALSE);
689         }
690
691         gpio_direction = 0;
692         ftdi_set_bitmode(gpio_handle_val, 0, BITMODE_BITBANG);
693
694         /* Note BT core is also enabled here */
695         gpio_port = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
696         gpio_write_port(gpio_handle_val, gpio_port);
697
698         gpio_direction = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
699         ftdi_set_bitmode(gpio_handle_val, gpio_direction, BITMODE_BITBANG);
700
701         bus->oob_enabled = TRUE;
702
703         /* drive the Device_Wake GPIO low on startup */
704         bus->device_wake_state = TRUE;
705         dhd_bus_set_device_wake(bus, FALSE);
706         dhd_bus_doorbell_timeout_reset(bus);
707 #endif /* PCIE_OOB */
708
709         DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
710         return 0;
711
712 fail:
713         if (bus->sih != NULL) {
714                 si_detach(bus->sih);
715                 bus->sih = NULL;
716         }
717         DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__));
718         return -1;
719 }
720
721 int
722 dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
723 {
724         dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
725         return 0;
726 }
727 int
728 dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
729 {
730         dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
731         return 0;
732 }
733
734 void
735 dhdpcie_bus_intr_enable(dhd_bus_t *bus)
736 {
737         DHD_TRACE(("%s: enable interrupts\n", __FUNCTION__));
738         if (bus && bus->sih && !bus->is_linkdown) {
739                 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
740                         (bus->sih->buscorerev == 4)) {
741                         dhpcie_bus_unmask_interrupt(bus);
742                 } else {
743                 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
744                         bus->def_intmask, bus->def_intmask);
745                 }
746         } else {
747                 DHD_ERROR(("****** %s: failed ******\n", __FUNCTION__));
748                 DHD_ERROR(("bus: %p sih: %p bus->is_linkdown %d\n",
749                                 bus, bus ? bus->sih : NULL, bus ? bus->is_linkdown: -1));
750         }
751 }
752
753 void
754 dhdpcie_bus_intr_disable(dhd_bus_t *bus)
755 {
756
757         DHD_TRACE(("%s Enter\n", __FUNCTION__));
758
759         if (bus && bus->sih && !bus->is_linkdown) {
760                 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
761                         (bus->sih->buscorerev == 4)) {
762                         dhpcie_bus_mask_interrupt(bus);
763                 } else {
764                         si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
765                                 bus->def_intmask, 0);
766                 }
767         } else {
768                 DHD_ERROR(("****** %s: failed ******\n", __FUNCTION__));
769                 DHD_ERROR(("bus: %p sih: %p bus->is_linkdown %d\n",
770                                 bus, bus ? bus->sih : NULL, bus ? bus->is_linkdown: -1));
771         }
772
773         DHD_TRACE(("%s Exit\n", __FUNCTION__));
774 }
775
776 /*
777  *  dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
778  * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
779  * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
780  * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
781  * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
782  */
783 static void
784 dhdpcie_advertise_bus_cleanup(dhd_pub_t  *dhdp)
785 {
786         unsigned long flags;
787         int timeleft;
788
789         DHD_GENERAL_LOCK(dhdp, flags);
790         dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
791         DHD_GENERAL_UNLOCK(dhdp, flags);
792
793         timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
794         if (timeleft == 0) {
795                 DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
796                                 __FUNCTION__, dhdp->dhd_bus_busy_state));
797                 BUG_ON(1);
798         }
799
800         return;
801 }
802
803 static void
804 dhdpcie_bus_remove_prep(dhd_bus_t *bus)
805 {
806         unsigned long flags;
807         DHD_TRACE(("%s Enter\n", __FUNCTION__));
808
809         DHD_GENERAL_LOCK(bus->dhd, flags);
810         bus->dhd->busstate = DHD_BUS_DOWN;
811         DHD_GENERAL_UNLOCK(bus->dhd, flags);
812
813         dhd_os_sdlock(bus->dhd);
814
815         dhdpcie_bus_intr_disable(bus);
816         if (!bus->dhd->dongle_isolation) {
817                 pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
818         }
819
820         dhd_os_sdunlock(bus->dhd);
821
822         DHD_TRACE(("%s Exit\n", __FUNCTION__));
823 }
824
825 /** Detach and free everything */
826 void
827 dhdpcie_bus_release(dhd_bus_t *bus)
828 {
829         bool dongle_isolation = FALSE;
830         osl_t *osh = NULL;
831
832         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
833
834         if (bus) {
835
836                 osh = bus->osh;
837                 ASSERT(osh);
838
839                 if (bus->dhd) {
840                         dhdpcie_advertise_bus_cleanup(bus->dhd);
841                         dongle_isolation = bus->dhd->dongle_isolation;
842                         dhdpcie_bus_remove_prep(bus);
843
844                         if (bus->intr) {
845                                 dhdpcie_bus_intr_disable(bus);
846                                 dhdpcie_free_irq(bus);
847                         }
848                         dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
849                         dhd_detach(bus->dhd);
850                         dhd_free(bus->dhd);
851                         bus->dhd = NULL;
852                 }
853
854                 /* unmap the regs and tcm here!! */
855                 if (bus->regs) {
856                         dhdpcie_bus_reg_unmap(osh, (ulong)bus->regs, DONGLE_REG_MAP_SIZE);
857                         bus->regs = NULL;
858                 }
859                 if (bus->tcm) {
860                         dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, DONGLE_TCM_MAP_SIZE);
861                         bus->tcm = NULL;
862                 }
863
864                 dhdpcie_bus_release_malloc(bus, osh);
865                 /* Detach pcie shared structure */
866                 if (bus->pcie_sh) {
867                         MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
868                         bus->pcie_sh = NULL;
869                 }
870
871 #ifdef DHD_DEBUG
872
873                 if (bus->console.buf != NULL)
874                         MFREE(osh, bus->console.buf, bus->console.bufsize);
875 #endif
876
877
878                 /* Finally free bus info */
879                 MFREE(osh, bus, sizeof(dhd_bus_t));
880
881         }
882
883         DHD_TRACE(("%s: Exit\n", __FUNCTION__));
884 } /* dhdpcie_bus_release */
885
886
887 void
888 dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
889 {
890         DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
891                 bus->dhd, bus->dhd->dongle_reset));
892
893         if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
894                 DHD_TRACE(("%s Exit\n", __FUNCTION__));
895                 return;
896         }
897
898         if (bus->sih) {
899
900                 if (!dongle_isolation)
901                         pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
902
903                 if (bus->ltrsleep_on_unload) {
904                         si_corereg(bus->sih, bus->sih->buscoreidx,
905                                 OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
906                 }
907
908                 if (bus->sih->buscorerev == 13)
909                          pcie_serdes_iddqdisable(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
910
911                 if (bus->sih != NULL) {
912                         si_detach(bus->sih);
913                         bus->sih = NULL;
914                 }
915                 if (bus->vars && bus->varsz)
916                         MFREE(osh, bus->vars, bus->varsz);
917                 bus->vars = NULL;
918         }
919
920         DHD_TRACE(("%s Exit\n", __FUNCTION__));
921 }
922
923 uint32
924 dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
925 {
926         uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
927         return data;
928 }
929
930 /** 32 bit config write */
931 void
932 dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
933 {
934         OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
935 }
936
937 void
938 dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
939 {
940         OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
941 }
942
943 void
944 dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
945 {
946         int32 min_size =  DONGLE_MIN_MEMSIZE;
947         /* Restrict the memsize to user specified limit */
948         DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
949                 dhd_dongle_memsize, min_size));
950         if ((dhd_dongle_memsize > min_size) &&
951                 (dhd_dongle_memsize < (int32)bus->orig_ramsize))
952                 bus->ramsize = dhd_dongle_memsize;
953 }
954
955 void
956 dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
957 {
958         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
959
960         if (bus->dhd && bus->dhd->dongle_reset)
961                 return;
962
963         if (bus->vars && bus->varsz) {
964                 MFREE(osh, bus->vars, bus->varsz);
965                 bus->vars = NULL;
966         }
967
968         DHD_TRACE(("%s: Exit\n", __FUNCTION__));
969         return;
970
971 }
972
973 /** Stop bus module: clear pending frames, disable data flow */
974 void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
975 {
976         uint32 status;
977         unsigned long flags;
978
979         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
980
981         if (!bus->dhd)
982                 return;
983
984         if (bus->dhd->busstate == DHD_BUS_DOWN) {
985                 DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
986                 goto done;
987         }
988
989         DHD_DISABLE_RUNTIME_PM(bus->dhd);
990
991         DHD_GENERAL_LOCK(bus->dhd, flags);
992         bus->dhd->busstate = DHD_BUS_DOWN;
993         DHD_GENERAL_UNLOCK(bus->dhd, flags);
994
995         dhdpcie_bus_intr_disable(bus);
996         status =  dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
997         dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
998
999         if (!dhd_download_fw_on_driverload) {
1000                 dhd_dpc_kill(bus->dhd);
1001         }
1002
1003         /* Clear rx control and wake any waiters */
1004         dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
1005         dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
1006
1007 done:
1008         return;
1009 }
1010
1011 /** Watchdog timer function */
1012 bool dhd_bus_watchdog(dhd_pub_t *dhd)
1013 {
1014         unsigned long flags;
1015 #ifdef DHD_DEBUG
1016         dhd_bus_t *bus;
1017         bus = dhd->bus;
1018
1019         DHD_GENERAL_LOCK(dhd, flags);
1020         if (dhd->busstate == DHD_BUS_DOWN ||
1021                         dhd->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
1022                 DHD_GENERAL_UNLOCK(dhd, flags);
1023                 return FALSE;
1024         }
1025         dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_WD;
1026         DHD_GENERAL_UNLOCK(dhd, flags);
1027
1028 #ifdef DHD_PCIE_RUNTIMEPM
1029         dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0));
1030 #endif /* DHD_PCIE_RUNTIMEPM */
1031
1032
1033
1034         /* Poll for console output periodically */
1035         if (dhd->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
1036                 bus->console.count += dhd_watchdog_ms;
1037                 if (bus->console.count >= dhd_console_ms) {
1038                         bus->console.count -= dhd_console_ms;
1039                         /* Make sure backplane clock is on */
1040                         if (dhdpcie_bus_readconsole(bus) < 0)
1041                                 dhd_console_ms = 0;     /* On error, stop trying */
1042                 }
1043         }
1044 #endif /* DHD_DEBUG */
1045
1046 #ifdef PCIE_OOB
1047         /* If haven't communicated with device for a while, deassert the Device_Wake GPIO */
1048         if (dhd_doorbell_timeout != 0 && !(bus->dhd->busstate == DHD_BUS_SUSPEND) &&
1049                 dhd_timeout_expired(&bus->doorbell_timer)) {
1050                 dhd_bus_set_device_wake(bus, FALSE);
1051         }
1052 #endif /* PCIE_OOB */
1053
1054         DHD_GENERAL_LOCK(dhd, flags);
1055         dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_WD;
1056         DHD_GENERAL_UNLOCK(dhd, flags);
1057
1058         return TRUE;
1059 } /* dhd_bus_watchdog */
1060
1061
1062 #define DEADBEEF_PATTERN 0xADDEADDE     // "DeadDead"
1063 #define MEMCHECKINFO "/data/.memcheck.info"
1064
1065 static int
1066 dhd_get_memcheck_info(void)
1067 {
1068         struct file *fp = NULL;
1069         uint32 mem_val = 0;
1070         int ret = 0;
1071         char *filepath = MEMCHECKINFO;
1072
1073         fp = filp_open(filepath, O_RDONLY, 0);
1074         if (IS_ERR(fp)) {
1075                 DHD_ERROR(("[WIFI_SEC] %s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
1076                 goto done;
1077         } else {
1078                 ret = kernel_read(fp, 0, (char *)&mem_val, 4);
1079                 if (ret < 0) {
1080                         DHD_ERROR(("[WIFI_SEC] %s: File read error, ret=%d\n", __FUNCTION__, ret));
1081                         filp_close(fp, NULL);
1082                         goto done;
1083                 }
1084
1085                 mem_val = bcm_atoi((char *)&mem_val);
1086
1087                 DHD_ERROR(("[WIFI_SEC]%s: MEMCHECK ENABLED = %d\n", __FUNCTION__, mem_val));
1088                 filp_close(fp, NULL);
1089         }
1090 done:
1091         return mem_val;
1092 }
1093
1094 static int
1095 dhdpcie_mem_check(struct dhd_bus *bus)
1096 {
1097         int bcmerror = BCME_OK;
1098         int offset = 0;
1099         int len = 0;
1100         uint8 *memblock = NULL, *memptr;
1101         int size = bus->ramsize;
1102         int i;
1103         uint32 memcheck_enabled;
1104
1105         /* Read memcheck info from the file */
1106         /* 0 : Disable */
1107         /* 1 : "Dead Beef" pattern write */
1108         /* 2 : "Dead Beef" pattern write and checking the pattern value */
1109
1110         memcheck_enabled = dhd_get_memcheck_info();
1111
1112         DHD_ERROR(("%s: memcheck_enabled: %d \n", __FUNCTION__, memcheck_enabled));
1113
1114         if (memcheck_enabled == 0) {
1115                 return bcmerror;
1116         }
1117
1118         memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
1119         if (memblock == NULL) {
1120                 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
1121                 goto err;
1122         }
1123
1124         if ((ulong)memblock % DHD_SDALIGN) {
1125                 memptr += (DHD_SDALIGN - ((ulong)memblock % DHD_SDALIGN));
1126         }
1127
1128         for (i = 0; i < MEMBLOCK; i = i + 4) {
1129                 *(ulong*)(memptr + i) = DEADBEEF_PATTERN;
1130         }
1131
1132         if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
1133                         si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
1134                 if (offset == 0) {
1135                         /* Add start of RAM address to the address given by user */
1136                         offset += bus->dongle_ram_base;
1137                 }
1138         }
1139
1140         /* Write  "DeadBeef" pattern with MEMBLOCK size */
1141         while (size) {
1142                 len = MIN(MEMBLOCK, size);
1143
1144                 bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
1145                 if (bcmerror) {
1146                         DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
1147                                 __FUNCTION__, bcmerror, MEMBLOCK, offset));
1148                         goto err;
1149                 }
1150
1151                 if (memcheck_enabled == 2) {
1152                         bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, (uint8 *)memptr, len);
1153                         if (bcmerror) {
1154                                 DHD_ERROR(("%s: error %d on read %d membytes at 0x%08x\n",
1155                                         __FUNCTION__, bcmerror, MEMBLOCK, offset));
1156                                 goto err;
1157                         } else {
1158                                 for (i = 0; i < len; i = i+4) {
1159                                         if ((*(uint32*)(memptr + i)) != DEADBEEF_PATTERN) {
1160                                                 DHD_ERROR(("%s: error on reading pattern at "
1161                                                         "0x%08x\n", __FUNCTION__, (offset + i)));
1162                                                 bcmerror = BCME_ERROR;
1163                                                 goto err;
1164                                         }
1165                                 }
1166                         }
1167                 }
1168                 offset += MEMBLOCK;
1169                 size -= MEMBLOCK;
1170         }
1171
1172         DHD_ERROR(("%s: Writing the Dead Beef pattern is Done \n", __FUNCTION__));
1173
1174 err:
1175         if (memblock) {
1176                 MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
1177         }
1178
1179         return bcmerror;
1180 }
1181
1182 /* Download firmware image and nvram image */
1183 int
1184 dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
1185                           char *pfw_path, char *pnv_path, char *pconf_path)
1186 {
1187         int ret;
1188
1189         bus->fw_path = pfw_path;
1190         bus->nv_path = pnv_path;
1191         bus->dhd->conf_path = pconf_path;
1192
1193         DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
1194                 __FUNCTION__, bus->fw_path, bus->nv_path));
1195
1196         dhdpcie_mem_check(bus);
1197
1198         ret = dhdpcie_download_firmware(bus, osh);
1199
1200         return ret;
1201 }
1202
1203 static int
1204 dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
1205 {
1206         int ret = 0;
1207 #if defined(BCM_REQUEST_FW)
1208         uint chipid = bus->sih->chip;
1209         uint revid = bus->sih->chiprev;
1210         char fw_path[64] = "/lib/firmware/brcm/bcm";    /* path to firmware image */
1211         char nv_path[64];               /* path to nvram vars file */
1212         bus->fw_path = fw_path;
1213         bus->nv_path = nv_path;
1214         switch (chipid) {
1215         case BCM43570_CHIP_ID:
1216                 bcmstrncat(fw_path, "43570", 5);
1217                 switch (revid) {
1218                 case 0:
1219                         bcmstrncat(fw_path, "a0", 2);
1220                         break;
1221                 case 2:
1222                         bcmstrncat(fw_path, "a2", 2);
1223                         break;
1224                 default:
1225                         DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__,
1226                         revid));
1227                         break;
1228                 }
1229                 break;
1230         default:
1231                 DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__,
1232                 chipid));
1233                 return 0;
1234         }
1235         /* load board specific nvram file */
1236         snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
1237         /* load firmware */
1238         snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
1239 #endif /* BCM_REQUEST_FW */
1240
1241         DHD_OS_WAKE_LOCK(bus->dhd);
1242
1243         /* External conf takes precedence if specified */
1244         dhd_conf_preinit(bus->dhd);
1245         dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
1246         dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
1247         dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path);
1248
1249         printf("Final fw_path=%s\n", bus->fw_path);
1250         printf("Final nv_path=%s\n", bus->nv_path);
1251         printf("Final conf_path=%s\n", bus->dhd->conf_path);
1252
1253         ret = _dhdpcie_download_firmware(bus);
1254
1255         DHD_OS_WAKE_UNLOCK(bus->dhd);
1256         return ret;
1257 }
1258
1259 static int
1260 dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
1261 {
1262         int bcmerror = BCME_ERROR;
1263         int offset = 0;
1264         int len = 0;
1265         char *imgbuf = NULL;
1266         uint8 *memblock = NULL, *memptr;
1267         uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct
1268
1269         int offset_end = bus->ramsize;
1270
1271         DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
1272
1273         /* Should succeed in opening image if it is actually given through registry
1274          * entry or in module param.
1275          */
1276         imgbuf = dhd_os_open_image(pfw_path);
1277         if (imgbuf == NULL) {
1278                 printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
1279                 goto err;
1280         }
1281
1282         memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
1283         if (memblock == NULL) {
1284                 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
1285                 goto err;
1286         }
1287         if (dhd_msg_level & DHD_TRACE_VAL) {
1288                 memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
1289                 if (memptr_tmp == NULL) {
1290                         DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
1291                         goto err;
1292                 }
1293         }
1294         if ((uint32)(uintptr)memblock % DHD_SDALIGN)
1295                 memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
1296
1297         DHD_INFO_HW4(("%s: dongle_ram_base: 0x%x ramsize: 0x%x tcm: %p\n",
1298                         __FUNCTION__, bus->dongle_ram_base, bus->ramsize, bus->tcm));
1299         /* Download image with MEMBLOCK size */
1300         while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) {
1301                 if (len < 0) {
1302                         DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
1303                         bcmerror = BCME_ERROR;
1304                         goto err;
1305                 }
1306                 /* check if CR4/CA7 */
1307                 if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
1308                         si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
1309                         /* if address is 0, store the reset instruction to be written in 0 */
1310                         if (offset == 0) {
1311                                 bus->resetinstr = *(((uint32*)memptr));
1312                                 /* Add start of RAM address to the address given by user */
1313                                 offset += bus->dongle_ram_base;
1314                                 offset_end += offset;
1315                         }
1316                 }
1317                 bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
1318                 if (bcmerror) {
1319                         DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
1320                                 __FUNCTION__, bcmerror, MEMBLOCK, offset));
1321                         goto err;
1322                 }
1323
1324                 if (dhd_msg_level & DHD_TRACE_VAL) {
1325                         bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, memptr_tmp, len);
1326                         if (bcmerror) {
1327                                 DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
1328                                         __FUNCTION__, bcmerror, MEMBLOCK, offset));
1329                                 goto err;
1330                         }
1331                         if (memcmp(memptr_tmp, memptr, len)) {
1332                                 DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__));
1333                                 goto err;
1334                         } else
1335                                 DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
1336                 }
1337                 offset += MEMBLOCK;
1338
1339                 if (offset >= offset_end) {
1340                         DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
1341                                 __FUNCTION__, offset, offset_end));
1342                         bcmerror = BCME_ERROR;
1343                         goto err;
1344                 }
1345         }
1346
1347 err:
1348         if (memblock)
1349                 MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
1350         if (dhd_msg_level & DHD_TRACE_VAL) {
1351                 if (memptr_tmp)
1352                         MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN);
1353         }
1354
1355         if (imgbuf)
1356                 dhd_os_close_image(imgbuf);
1357
1358         return bcmerror;
1359 } /* dhdpcie_download_code_file */
1360
1361 #ifdef CUSTOMER_HW4_DEBUG
1362 #define MIN_NVRAMVARS_SIZE 128
1363 #endif /* CUSTOMER_HW4_DEBUG */
1364
1365 static int
1366 dhdpcie_download_nvram(struct dhd_bus *bus)
1367 {
1368         int bcmerror = BCME_ERROR;
1369         uint len;
1370         char * memblock = NULL;
1371         char *bufp;
1372         char *pnv_path;
1373         bool nvram_file_exists;
1374         bool nvram_uefi_exists = FALSE;
1375         bool local_alloc = FALSE;
1376         pnv_path = bus->nv_path;
1377
1378         nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
1379
1380         /* First try UEFI */
1381         len = MAX_NVRAMBUF_SIZE;
1382         dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, &len);
1383
1384         /* If UEFI empty, then read from file system */
1385         if ((len == 0) || (memblock[0] == '\0')) {
1386
1387                 if (nvram_file_exists) {
1388                         len = MAX_NVRAMBUF_SIZE;
1389                         dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, &len);
1390                         if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
1391                                 goto err;
1392                         }
1393                 }
1394                 else {
1395                         /* For SROM OTP no external file or UEFI required */
1396                         bcmerror = BCME_OK;
1397                 }
1398         } else {
1399                 nvram_uefi_exists = TRUE;
1400         }
1401
1402         DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
1403
1404         if (len > 0 && len <= MAX_NVRAMBUF_SIZE) {
1405                 bufp = (char *) memblock;
1406
1407 #ifdef CACHE_FW_IMAGES
1408                 if (bus->processed_nvram_params_len) {
1409                         len = bus->processed_nvram_params_len;
1410                 }
1411
1412                 if (!bus->processed_nvram_params_len) {
1413                         bufp[len] = 0;
1414                         if (nvram_uefi_exists || nvram_file_exists) {
1415                                 len = process_nvram_vars(bufp, len);
1416                                 bus->processed_nvram_params_len = len;
1417                         }
1418                 } else
1419 #else
1420                 {
1421                         bufp[len] = 0;
1422                         if (nvram_uefi_exists || nvram_file_exists) {
1423                                 len = process_nvram_vars(bufp, len);
1424                         }
1425                 }
1426 #endif /* CACHE_FW_IMAGES */
1427
1428                 DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
1429 #ifdef CUSTOMER_HW4_DEBUG
1430                 if (len < MIN_NVRAMVARS_SIZE) {
1431                         DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
1432                                 __FUNCTION__));
1433                         bcmerror = BCME_ERROR;
1434                         goto err;
1435                 }
1436 #endif /* CUSTOMER_HW4_DEBUG */
1437
1438                 if (len % 4) {
1439                         len += 4 - (len % 4);
1440                 }
1441                 bufp += len;
1442                 *bufp++ = 0;
1443                 if (len)
1444                         bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
1445                 if (bcmerror) {
1446                         DHD_ERROR(("%s: error downloading vars: %d\n",
1447                                 __FUNCTION__, bcmerror));
1448                 }
1449         }
1450
1451
1452 err:
1453         if (memblock) {
1454                 if (local_alloc) {
1455                         MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
1456                 } else {
1457                         dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE);
1458                 }
1459         }
1460
1461         return bcmerror;
1462 }
1463
1464
1465 #ifdef BCMEMBEDIMAGE
1466 int
1467 dhdpcie_download_code_array(struct dhd_bus *bus)
1468 {
1469         int bcmerror = -1;
1470         int offset = 0;
1471         unsigned char *p_dlarray  = NULL;
1472         unsigned int dlarray_size = 0;
1473         unsigned int downloded_len, remaining_len, len;
1474         char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
1475         uint8 *memblock = NULL, *memptr;
1476
1477         downloded_len = 0;
1478         remaining_len = 0;
1479         len = 0;
1480
1481         p_dlarray = dlarray;
1482         dlarray_size = sizeof(dlarray);
1483         p_dlimagename = dlimagename;
1484         p_dlimagever  = dlimagever;
1485         p_dlimagedate = dlimagedate;
1486
1487         if ((p_dlarray == 0) || (dlarray_size == 0) ||(dlarray_size > bus->ramsize) ||
1488                 (p_dlimagename == 0) || (p_dlimagever  == 0) || (p_dlimagedate == 0))
1489                 goto err;
1490
1491         memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
1492         if (memblock == NULL) {
1493                 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
1494                 goto err;
1495         }
1496         if ((uint32)(uintptr)memblock % DHD_SDALIGN)
1497                 memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
1498
1499         while (downloded_len  < dlarray_size) {
1500                 remaining_len = dlarray_size - downloded_len;
1501                 if (remaining_len >= MEMBLOCK)
1502                         len = MEMBLOCK;
1503                 else
1504                         len = remaining_len;
1505
1506                 memcpy(memptr, (p_dlarray + downloded_len), len);
1507                 /* check if CR4/CA7 */
1508                 if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
1509                         si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
1510                         /* if address is 0, store the reset instruction to be written in 0 */
1511                         if (offset == 0) {
1512                                 bus->resetinstr = *(((uint32*)memptr));
1513                                 /* Add start of RAM address to the address given by user */
1514                                 offset += bus->dongle_ram_base;
1515                         }
1516                 }
1517                 bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
1518                 downloded_len += len;
1519                 if (bcmerror) {
1520                         DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
1521                                 __FUNCTION__, bcmerror, MEMBLOCK, offset));
1522                         goto err;
1523                 }
1524                 offset += MEMBLOCK;
1525         }
1526
1527 #ifdef DHD_DEBUG
1528         /* Upload and compare the downloaded code */
1529         {
1530                 unsigned char *ularray = NULL;
1531                 unsigned int uploded_len;
1532                 uploded_len = 0;
1533                 bcmerror = -1;
1534                 ularray = MALLOC(bus->dhd->osh, dlarray_size);
1535                 if (ularray == NULL)
1536                         goto upload_err;
1537                 /* Upload image to verify downloaded contents. */
1538                 offset = bus->dongle_ram_base;
1539                 memset(ularray, 0xaa, dlarray_size);
1540                 while (uploded_len  < dlarray_size) {
1541                         remaining_len = dlarray_size - uploded_len;
1542                         if (remaining_len >= MEMBLOCK)
1543                                 len = MEMBLOCK;
1544                         else
1545                                 len = remaining_len;
1546                         bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
1547                                 (uint8 *)(ularray + uploded_len), len);
1548                         if (bcmerror) {
1549                                 DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
1550                                         __FUNCTION__, bcmerror, MEMBLOCK, offset));
1551                                 goto upload_err;
1552                         }
1553
1554                         uploded_len += len;
1555                         offset += MEMBLOCK;
1556                 }
1557
1558                 if (memcmp(p_dlarray, ularray, dlarray_size)) {
1559                         DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
1560                                 __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
1561                         goto upload_err;
1562
1563                 } else
1564                         DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
1565                                 __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
1566 upload_err:
1567                 if (ularray)
1568                         MFREE(bus->dhd->osh, ularray, dlarray_size);
1569         }
1570 #endif /* DHD_DEBUG */
1571 err:
1572
1573         if (memblock)
1574                 MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
1575
1576         return bcmerror;
1577 } /* dhdpcie_download_code_array */
1578 #endif /* BCMEMBEDIMAGE */
1579
1580
1581 static int
1582 _dhdpcie_download_firmware(struct dhd_bus *bus)
1583 {
1584         int bcmerror = -1;
1585
1586         bool embed = FALSE;     /* download embedded firmware */
1587         bool dlok = FALSE;      /* download firmware succeeded */
1588
1589         /* Out immediately if no image to download */
1590         if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
1591 #ifdef BCMEMBEDIMAGE
1592                 embed = TRUE;
1593 #else
1594                 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
1595                 return 0;
1596 #endif
1597         }
1598
1599         /* Keep arm in reset */
1600         if (dhdpcie_bus_download_state(bus, TRUE)) {
1601                 DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
1602                 goto err;
1603         }
1604
1605         /* External image takes precedence if specified */
1606         if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
1607                 if (dhdpcie_download_code_file(bus, bus->fw_path)) {
1608                         DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
1609 #ifdef BCMEMBEDIMAGE
1610                         embed = TRUE;
1611 #else
1612                         goto err;
1613 #endif
1614                 } else {
1615                         embed = FALSE;
1616                         dlok = TRUE;
1617                 }
1618         }
1619
1620 #ifdef BCMEMBEDIMAGE
1621         if (embed) {
1622                 if (dhdpcie_download_code_array(bus)) {
1623                         DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
1624                         goto err;
1625                 } else {
1626                         dlok = TRUE;
1627                 }
1628         }
1629 #else
1630         BCM_REFERENCE(embed);
1631 #endif
1632         if (!dlok) {
1633                 DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
1634                 goto err;
1635         }
1636
1637         /* EXAMPLE: nvram_array */
1638         /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
1639         /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
1640
1641
1642         /* External nvram takes precedence if specified */
1643         if (dhdpcie_download_nvram(bus)) {
1644                 DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
1645                 goto err;
1646         }
1647
1648         /* Take arm out of reset */
1649         if (dhdpcie_bus_download_state(bus, FALSE)) {
1650                 DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
1651                 goto err;
1652         }
1653
1654         bcmerror = 0;
1655
1656 err:
1657         return bcmerror;
1658 } /* _dhdpcie_download_firmware */
1659
1660 #define CONSOLE_LINE_MAX        192
1661
1662 #ifdef DHD_DEBUG
1663 static int
1664 dhdpcie_bus_readconsole(dhd_bus_t *bus)
1665 {
1666         dhd_console_t *c = &bus->console;
1667         uint8 line[CONSOLE_LINE_MAX], ch;
1668         uint32 n, idx, addr;
1669         int rv;
1670
1671         /* Don't do anything until FWREADY updates console address */
1672         if (bus->console_addr == 0)
1673                 return -1;
1674
1675         /* Read console log struct */
1676         addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
1677
1678         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
1679                 return rv;
1680
1681         /* Allocate console buffer (one time only) */
1682         if (c->buf == NULL) {
1683                 c->bufsize = ltoh32(c->log.buf_size);
1684                 if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
1685                         return BCME_NOMEM;
1686         }
1687         idx = ltoh32(c->log.idx);
1688
1689         /* Protect against corrupt value */
1690         if (idx > c->bufsize)
1691                 return BCME_ERROR;
1692
1693         /* Skip reading the console buffer if the index pointer has not moved */
1694         if (idx == c->last)
1695                 return BCME_OK;
1696
1697         /* Read the console buffer */
1698         addr = ltoh32(c->log.buf);
1699         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
1700                 return rv;
1701
1702         while (c->last != idx) {
1703                 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
1704                         if (c->last == idx) {
1705                                 /* This would output a partial line.  Instead, back up
1706                                  * the buffer pointer and output this line next time around.
1707                                  */
1708                                 if (c->last >= n)
1709                                         c->last -= n;
1710                                 else
1711                                         c->last = c->bufsize - n;
1712                                 goto break2;
1713                         }
1714                         ch = c->buf[c->last];
1715                         c->last = (c->last + 1) % c->bufsize;
1716                         if (ch == '\n')
1717                                 break;
1718                         line[n] = ch;
1719                 }
1720
1721                 if (n > 0) {
1722                         if (line[n - 1] == '\r')
1723                                 n--;
1724                         line[n] = 0;
1725                         printf("CONSOLE: %s\n", line);
1726
1727                 }
1728         }
1729 break2:
1730
1731         return BCME_OK;
1732 } /* dhdpcie_bus_readconsole */
1733 #endif /* DHD_DEBUG */
1734
1735 static int
1736 dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
1737 {
1738         int bcmerror = 0;
1739         uint msize = 512;
1740         char *mbuffer = NULL;
1741         char *console_buffer = NULL;
1742         uint maxstrlen = 256;
1743         char *str = NULL;
1744         trap_t tr;
1745         pciedev_shared_t *pciedev_shared = bus->pcie_sh;
1746         struct bcmstrbuf strbuf;
1747         uint32 console_ptr, console_size, console_index;
1748         uint8 line[CONSOLE_LINE_MAX], ch;
1749         uint32 n, i, addr;
1750         int rv;
1751
1752         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1753
1754         if (DHD_NOCHECKDIED_ON()) {
1755                 return 0;
1756         }
1757
1758         if (data == NULL) {
1759                 /*
1760                  * Called after a rx ctrl timeout. "data" is NULL.
1761                  * allocate memory to trace the trap or assert.
1762                  */
1763                 size = msize;
1764                 mbuffer = data = MALLOC(bus->dhd->osh, msize);
1765
1766                 if (mbuffer == NULL) {
1767                         DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
1768                         bcmerror = BCME_NOMEM;
1769                         goto done;
1770                 }
1771         }
1772
1773         if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
1774                 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
1775                 bcmerror = BCME_NOMEM;
1776                 goto done;
1777         }
1778
1779         if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
1780                 goto done;
1781         }
1782
1783         bcm_binit(&strbuf, data, size);
1784
1785         bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
1786                     pciedev_shared->msgtrace_addr, pciedev_shared->console_addr);
1787
1788         if ((pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
1789                 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
1790                  * (Avoids conflict with real asserts for programmatic parsing of output.)
1791                  */
1792                 bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
1793         }
1794
1795         if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
1796                 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
1797                  * (Avoids conflict with real asserts for programmatic parsing of output.)
1798                  */
1799                 bcm_bprintf(&strbuf, "No trap%s in dongle",
1800                           (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
1801                           ?"/assrt" :"");
1802         } else {
1803                 if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
1804                         /* Download assert */
1805                         bcm_bprintf(&strbuf, "Dongle assert");
1806                         if (bus->pcie_sh->assert_exp_addr != 0) {
1807                                 str[0] = '\0';
1808                                 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
1809                                         bus->pcie_sh->assert_exp_addr,
1810                                         (uint8 *)str, maxstrlen)) < 0) {
1811                                         goto done;
1812                                 }
1813
1814                                 str[maxstrlen - 1] = '\0';
1815                                 bcm_bprintf(&strbuf, " expr \"%s\"", str);
1816                         }
1817
1818                         if (bus->pcie_sh->assert_file_addr != 0) {
1819                                 str[0] = '\0';
1820                                 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
1821                                         bus->pcie_sh->assert_file_addr,
1822                                         (uint8 *)str, maxstrlen)) < 0) {
1823                                         goto done;
1824                                 }
1825
1826                                 str[maxstrlen - 1] = '\0';
1827                                 bcm_bprintf(&strbuf, " file \"%s\"", str);
1828                         }
1829
1830                         bcm_bprintf(&strbuf, " line %d ",  bus->pcie_sh->assert_line);
1831                 }
1832
1833                 if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
1834                         bus->dhd->dongle_trap_occured = TRUE;
1835                         if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
1836                                 bus->pcie_sh->trap_addr, (uint8*)&tr, sizeof(trap_t))) < 0) {
1837                                 goto done;
1838                         }
1839
1840                         bcm_bprintf(&strbuf,
1841                         "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
1842                         " lp 0x%x, rpc 0x%x"
1843                         "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
1844                         "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
1845                         ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
1846                         ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
1847                         ltoh32(bus->pcie_sh->trap_addr),
1848                         ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
1849                         ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
1850
1851                         addr =  bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
1852                         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
1853                                 (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
1854                                 goto printbuf;
1855                         }
1856
1857                         addr =  bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
1858                         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
1859                                 (uint8 *)&console_size, sizeof(console_size))) < 0) {
1860                                 goto printbuf;
1861                         }
1862
1863                         addr =  bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
1864                         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
1865                                 (uint8 *)&console_index, sizeof(console_index))) < 0) {
1866                                 goto printbuf;
1867                         }
1868
1869                         console_ptr = ltoh32(console_ptr);
1870                         console_size = ltoh32(console_size);
1871                         console_index = ltoh32(console_index);
1872
1873                         if (console_size > CONSOLE_BUFFER_MAX ||
1874                                 !(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
1875                                 goto printbuf;
1876                         }
1877
1878                         if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
1879                                 (uint8 *)console_buffer, console_size)) < 0) {
1880                                 goto printbuf;
1881                         }
1882
1883                         for (i = 0, n = 0; i < console_size; i += n + 1) {
1884                                 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
1885                                         ch = console_buffer[(console_index + i + n) % console_size];
1886                                         if (ch == '\n')
1887                                                 break;
1888                                         line[n] = ch;
1889                                 }
1890
1891
1892                                 if (n > 0) {
1893                                         if (line[n - 1] == '\r')
1894                                                 n--;
1895                                         line[n] = 0;
1896                                         /* Don't use DHD_ERROR macro since we print
1897                                          * a lot of information quickly. The macro
1898                                          * will truncate a lot of the printfs
1899                                          */
1900
1901                                         printf("CONSOLE: %s\n", line);
1902                                 }
1903                         }
1904                 }
1905         }
1906
1907 printbuf:
1908         if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
1909                 printf("%s: %s\n", __FUNCTION__, strbuf.origbuf);
1910
1911                 /* wake up IOCTL wait event */
1912                 dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
1913
1914 #if defined(DHD_FW_COREDUMP)
1915                 /* save core dump or write to a file */
1916                 if (bus->dhd->memdump_enabled) {
1917                         bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
1918                         dhdpcie_mem_dump(bus);
1919                 }
1920 #endif /* DHD_FW_COREDUMP */
1921
1922
1923         }
1924
1925 done:
1926         if (mbuffer)
1927                 MFREE(bus->dhd->osh, mbuffer, msize);
1928         if (str)
1929                 MFREE(bus->dhd->osh, str, maxstrlen);
1930
1931         if (console_buffer)
1932                 MFREE(bus->dhd->osh, console_buffer, console_size);
1933
1934         return bcmerror;
1935 } /* dhdpcie_checkdied */
1936
1937
1938 /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
1939 void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
1940 {
1941         int ret = 0;
1942         int size; /* Full mem size */
1943         int start; /* Start address */
1944         int read_size = 0; /* Read size of each iteration */
1945         uint8 *databuf = buf;
1946
1947         if (bus == NULL) {
1948                 return;
1949         }
1950
1951         start = bus->dongle_ram_base;
1952         /* Get full mem size */
1953         size = bus->ramsize;
1954         /* Read mem content */
1955         while (size)
1956         {
1957                 read_size = MIN(MEMBLOCK, size);
1958                 if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) {
1959                         return;
1960                 }
1961
1962                 /* Decrement size and increment start address */
1963                 size -= read_size;
1964                 start += read_size;
1965                 databuf += read_size;
1966         }
1967         bus->dhd->soc_ram = buf;
1968         bus->dhd->soc_ram_length = bus->ramsize;
1969         return;
1970 }
1971
1972
1973 #if defined(DHD_FW_COREDUMP)
1974 static int
1975 dhdpcie_mem_dump(dhd_bus_t *bus)
1976 {
1977         int ret = 0;
1978         int size; /* Full mem size */
1979         int start = bus->dongle_ram_base; /* Start address */
1980         int read_size = 0; /* Read size of each iteration */
1981         uint8 *buf = NULL, *databuf = NULL;
1982
1983 #ifdef EXYNOS_PCIE_DEBUG
1984         exynos_pcie_register_dump(1);
1985 #endif /* EXYNOS_PCIE_DEBUG */
1986
1987 #ifdef SUPPORT_LINKDOWN_RECOVERY
1988         if (bus->is_linkdown) {
1989                 DHD_ERROR(("%s: PCIe link was down so skip\n", __FUNCTION__));
1990                 return BCME_ERROR;
1991         }
1992 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1993
1994         /* Get full mem size */
1995         size = bus->ramsize;
1996 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
1997         buf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_MEMDUMP_BUF, size);
1998         bzero(buf, size);
1999 #else
2000         buf = MALLOC(bus->dhd->osh, size);
2001 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
2002         if (!buf) {
2003                 DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
2004                 return BCME_ERROR;
2005         }
2006
2007         /* Read mem content */
2008         DHD_TRACE_HW4(("Dump dongle memory"));
2009         databuf = buf;
2010         while (size)
2011         {
2012                 read_size = MIN(MEMBLOCK, size);
2013                 if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size)))
2014                 {
2015                         DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
2016                         if (buf) {
2017                                 MFREE(bus->dhd->osh, buf, size);
2018                         }
2019                         return BCME_ERROR;
2020                 }
2021                 DHD_TRACE(("."));
2022
2023                 /* Decrement size and increment start address */
2024                 size -= read_size;
2025                 start += read_size;
2026                 databuf += read_size;
2027         }
2028
2029         DHD_TRACE_HW4(("%s FUNC: Copy fw image to the embedded buffer \n", __FUNCTION__));
2030
2031         dhd_save_fwdump(bus->dhd, buf, bus->ramsize);
2032         dhd_schedule_memdump(bus->dhd, buf, bus->ramsize);
2033
2034         return ret;
2035 }
2036
2037 int
2038 dhd_bus_mem_dump(dhd_pub_t *dhdp)
2039 {
2040         dhd_bus_t *bus = dhdp->bus;
2041
2042         if (bus->suspended) {
2043                 DHD_ERROR(("%s: Bus is suspend so skip\n", __FUNCTION__));
2044                 return 0;
2045         }
2046
2047         return dhdpcie_mem_dump(bus);
2048 }
2049 #endif /* DHD_FW_COREDUMP */
2050
2051 int
2052 dhd_socram_dump(dhd_bus_t *bus)
2053 {
2054 #if defined(DHD_FW_COREDUMP)
2055         return (dhdpcie_mem_dump(bus));
2056 #else
2057         return -1;
2058 #endif
2059 }
2060
2061 /**
2062  * Transfers bytes from host to dongle using pio mode.
2063  * Parameter 'address' is a backplane address.
2064  */
2065 static int
2066 dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
2067 {
2068         uint dsize;
2069         int detect_endian_flag = 0x01;
2070         bool little_endian;
2071
2072         if (write && bus->is_linkdown) {
2073                 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
2074                 return BCME_ERROR;
2075         }
2076
2077         /* Detect endianness. */
2078         little_endian = *(char *)&detect_endian_flag;
2079
2080         /* In remap mode, adjust address beyond socram and redirect
2081          * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
2082          * is not backplane accessible
2083          */
2084
2085         /* Determine initial transfer parameters */
2086         dsize = sizeof(uint64);
2087
2088         /* Do the transfer(s) */
2089         DHD_INFO(("%s: %s %d bytes in window 0x%08x\n",
2090                   __FUNCTION__, (write ? "write" : "read"), size, address));
2091         if (write) {
2092                 while (size) {
2093                         if (size >= sizeof(uint64) && little_endian &&
2094 #ifdef CONFIG_64BIT
2095                                 !(address % 8) &&
2096 #endif /* CONFIG_64BIT */
2097                                 1) {
2098                                 dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
2099                         } else {
2100                                 dsize = sizeof(uint8);
2101                                 dhdpcie_bus_wtcm8(bus, address, *data);
2102                         }
2103
2104                         /* Adjust for next transfer (if any) */
2105                         if ((size -= dsize)) {
2106                                 data += dsize;
2107                                 address += dsize;
2108                         }
2109                 }
2110         } else {
2111                 while (size) {
2112                         if (size >= sizeof(uint64) && little_endian &&
2113 #ifdef CONFIG_64BIT
2114                                 !(address % 8) &&
2115 #endif /* CONFIG_64BIT */
2116                                 1) {
2117                                 *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
2118                         } else {
2119                                 dsize = sizeof(uint8);
2120                                 *data = dhdpcie_bus_rtcm8(bus, address);
2121                         }
2122
2123                         /* Adjust for next transfer (if any) */
2124                         if ((size -= dsize) > 0) {
2125                                 data += dsize;
2126                                 address += dsize;
2127                         }
2128                 }
2129         }
2130         return BCME_OK;
2131 } /* dhdpcie_bus_membytes */
2132
2133 /**
2134  * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
2135  * to the (non flow controlled) flow ring.
2136  */
2137 int BCMFASTPATH
2138 dhd_bus_schedule_queue(struct dhd_bus  *bus, uint16 flow_id, bool txs)
2139 {
2140         flow_ring_node_t *flow_ring_node;
2141         int ret = BCME_OK;
2142 #ifdef DHD_LOSSLESS_ROAMING
2143         dhd_pub_t *dhdp = bus->dhd;
2144 #endif
2145         DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
2146
2147         /* ASSERT on flow_id */
2148         if (flow_id >= bus->max_sub_queues) {
2149                 DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
2150                         flow_id, bus->max_sub_queues));
2151                 return 0;
2152         }
2153
2154         flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
2155
2156 #ifdef DHD_LOSSLESS_ROAMING
2157         if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
2158                 DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
2159                         __FUNCTION__, flow_ring_node->flow_info.tid));
2160                 return BCME_OK;
2161         }
2162 #endif /* DHD_LOSSLESS_ROAMING */
2163
2164         {
2165                 unsigned long flags;
2166                 void *txp = NULL;
2167                 flow_queue_t *queue;
2168 #ifdef DHD_LOSSLESS_ROAMING
2169                 struct ether_header *eh;
2170                 uint8 *pktdata;
2171 #endif /* DHD_LOSSLESS_ROAMING */
2172
2173                 queue = &flow_ring_node->queue; /* queue associated with flow ring */
2174
2175                 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
2176
2177                 if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
2178                         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2179                         return BCME_NOTREADY;
2180                 }
2181
2182                 while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
2183                         PKTORPHAN(txp);
2184
2185                         /*
2186                          * Modifying the packet length caused P2P cert failures.
2187                          * Specifically on test cases where a packet of size 52 bytes
2188                          * was injected, the sniffer capture showed 62 bytes because of
2189                          * which the cert tests failed. So making the below change
2190                          * only Router specific.
2191                          */
2192
2193 #ifdef DHDTCPACK_SUPPRESS
2194                         if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
2195                                 ret = dhd_tcpack_check_xmit(bus->dhd, txp);
2196                                 if (ret != BCME_OK) {
2197                                         DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
2198                                                 __FUNCTION__));
2199                                 }
2200                         }
2201 #endif /* DHDTCPACK_SUPPRESS */
2202 #ifdef DHD_LOSSLESS_ROAMING
2203                         pktdata = (uint8 *)PKTDATA(OSH_NULL, txp);
2204                         eh = (struct ether_header *) pktdata;
2205                         if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
2206                                 uint8 prio = (uint8)PKTPRIO(txp);
2207
2208                                 /* Restore to original priority for 802.1X packet */
2209                                 if (prio == PRIO_8021D_NC) {
2210                                         PKTSETPRIO(txp, PRIO_8021D_BE);
2211                                 }
2212                         }
2213 #endif /* DHD_LOSSLESS_ROAMING */
2214
2215                         /* Attempt to transfer packet over flow ring */
2216                         ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
2217                         if (ret != BCME_OK) { /* may not have resources in flow ring */
2218                                 DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
2219                                 dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
2220                                 /* reinsert at head */
2221                                 dhd_flow_queue_reinsert(bus->dhd, queue, txp);
2222                                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2223
2224                                 /* If we are able to requeue back, return success */
2225                                 return BCME_OK;
2226                         }
2227                 }
2228
2229                 dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
2230
2231                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2232         }
2233
2234         return ret;
2235 } /* dhd_bus_schedule_queue */
2236
2237 /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
2238 int BCMFASTPATH
2239 dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
2240 {
2241         uint16 flowid;
2242         flow_queue_t *queue;
2243         flow_ring_node_t *flow_ring_node;
2244         unsigned long flags;
2245         int ret = BCME_OK;
2246         void *txp_pend = NULL;
2247
2248         if (!bus->dhd->flowid_allocator) {
2249                 DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
2250                 goto toss;
2251         }
2252
2253         flowid = DHD_PKT_GET_FLOWID(txp);
2254
2255         flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
2256
2257         DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
2258                 __FUNCTION__, flowid, flow_ring_node->status,
2259                 flow_ring_node->active));
2260
2261         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
2262         if ((flowid >= bus->dhd->num_flow_rings) ||
2263                 (!flow_ring_node->active) ||
2264                 (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
2265                 (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING)) {
2266                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2267                 DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
2268                         __FUNCTION__, flowid, flow_ring_node->status,
2269                         flow_ring_node->active));
2270                 ret = BCME_ERROR;
2271                 goto toss;
2272         }
2273
2274         queue = &flow_ring_node->queue; /* queue associated with flow ring */
2275
2276         if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) {
2277                 txp_pend = txp;
2278         }
2279
2280         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2281
2282         if (flow_ring_node->status) {
2283                 DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
2284                         __FUNCTION__, flowid, flow_ring_node->status,
2285                         flow_ring_node->active));
2286                 if (txp_pend) {
2287                         txp = txp_pend;
2288                         goto toss;
2289                 }
2290                 return BCME_OK;
2291         }
2292         ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
2293
2294         /* If we have anything pending, try to push into q */
2295         if (txp_pend) {
2296                 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
2297
2298                 if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
2299                         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2300                         txp = txp_pend;
2301                         goto toss;
2302                 }
2303
2304                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2305         }
2306
2307         return ret;
2308
2309 toss:
2310         DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
2311         PKTCFREE(bus->dhd->osh, txp, TRUE);
2312         return ret;
2313 } /* dhd_bus_txdata */
2314
2315
2316 void
2317 dhd_bus_stop_queue(struct dhd_bus *bus)
2318 {
2319         dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
2320         bus->bus_flowctrl = TRUE;
2321 }
2322
2323 void
2324 dhd_bus_start_queue(struct dhd_bus *bus)
2325 {
2326         dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
2327         bus->bus_flowctrl = TRUE;
2328 }
2329
2330 #if defined(DHD_DEBUG)
2331 /* Device console input function */
2332 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
2333 {
2334         dhd_bus_t *bus = dhd->bus;
2335         uint32 addr, val;
2336         int rv;
2337         /* Address could be zero if CONSOLE := 0 in dongle Makefile */
2338         if (bus->console_addr == 0)
2339                 return BCME_UNSUPPORTED;
2340
2341         /* Don't allow input if dongle is in reset */
2342         if (bus->dhd->dongle_reset) {
2343                 dhd_os_sdunlock(bus->dhd);
2344                 return BCME_NOTREADY;
2345         }
2346
2347         /* Zero cbuf_index */
2348         addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
2349         val = htol32(0);
2350         if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
2351                 goto done;
2352
2353         /* Write message into cbuf */
2354         addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
2355         if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
2356                 goto done;
2357
2358         /* Write length into vcons_in */
2359         addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
2360         val = htol32(msglen);
2361         if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
2362                 goto done;
2363
2364         /* generate an interrupt to dongle to indicate that it needs to process cons command */
2365         dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
2366 done:
2367         return rv;
2368 } /* dhd_bus_console_in */
2369 #endif /* defined(DHD_DEBUG) */
2370
2371 /**
2372  * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
2373  * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
2374  */
2375 void BCMFASTPATH
2376 dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
2377 {
2378         dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
2379 }
2380
2381 /** 'offset' is a backplane address */
2382 void
2383 dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
2384 {
2385         *(volatile uint8 *)(bus->tcm + offset) = (uint8)data;
2386 }
2387
2388 uint8
2389 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
2390 {
2391         volatile uint8 data;
2392
2393                 data = *(volatile uint8 *)(bus->tcm + offset);
2394
2395         return data;
2396 }
2397
2398 void
2399 dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
2400 {
2401         *(volatile uint32 *)(bus->tcm + offset) = (uint32)data;
2402 }
2403 void
2404 dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
2405 {
2406         *(volatile uint16 *)(bus->tcm + offset) = (uint16)data;
2407 }
2408 void
2409 dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
2410 {
2411         *(volatile uint64 *)(bus->tcm + offset) = (uint64)data;
2412 }
2413
2414 uint16
2415 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
2416 {
2417         volatile uint16 data;
2418
2419                 data = *(volatile uint16 *)(bus->tcm + offset);
2420
2421         return data;
2422 }
2423
2424 uint32
2425 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
2426 {
2427         volatile uint32 data;
2428
2429                 data = *(volatile uint32 *)(bus->tcm + offset);
2430
2431         return data;
2432 }
2433
2434 uint64
2435 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
2436 {
2437         volatile uint64 data;
2438
2439                 data = *(volatile uint64 *)(bus->tcm + offset);
2440
2441         return data;
2442 }
2443
2444 /** A snippet of dongle memory is shared between host and dongle */
2445 void
2446 dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
2447 {
2448         uint64 long_data;
2449         ulong tcm_offset;
2450
2451         DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
2452
2453         if (bus->is_linkdown) {
2454                 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
2455                 return;
2456         }
2457
2458         switch (type) {
2459                 case D2H_DMA_SCRATCH_BUF:
2460                 {
2461                         pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
2462                         long_data = HTOL64(*(uint64 *)data);
2463                         tcm_offset = (ulong)&(sh->host_dma_scratch_buffer);
2464                         dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2465                         prhex(__FUNCTION__, data, len);
2466                         break;
2467                 }
2468
2469                 case D2H_DMA_SCRATCH_BUF_LEN:
2470                 {
2471                         pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
2472                         tcm_offset = (ulong)&(sh->host_dma_scratch_buffer_len);
2473                         dhdpcie_bus_wtcm32(bus, tcm_offset, (uint32) HTOL32(*(uint32 *)data));
2474                         prhex(__FUNCTION__, data, len);
2475                         break;
2476                 }
2477
2478                 case H2D_DMA_INDX_WR_BUF:
2479                 {
2480                         pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
2481
2482                         long_data = HTOL64(*(uint64 *)data);
2483                         tcm_offset = (ulong)shmem->rings_info_ptr;
2484                         tcm_offset += OFFSETOF(ring_info_t, h2d_w_idx_hostaddr);
2485                         dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2486                         prhex(__FUNCTION__, data, len);
2487                         break;
2488                 }
2489
2490                 case H2D_DMA_INDX_RD_BUF:
2491                 {
2492                         pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
2493                         long_data = HTOL64(*(uint64 *)data);
2494                         tcm_offset = (ulong)shmem->rings_info_ptr;
2495                         tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr);
2496                         dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2497                         prhex(__FUNCTION__, data, len);
2498                         break;
2499                 }
2500
2501                 case D2H_DMA_INDX_WR_BUF:
2502                 {
2503                         pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
2504                         long_data = HTOL64(*(uint64 *)data);
2505                         tcm_offset = (ulong)shmem->rings_info_ptr;
2506                         tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr);
2507                         dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2508                         prhex(__FUNCTION__, data, len);
2509                         break;
2510                 }
2511
2512                 case D2H_DMA_INDX_RD_BUF:
2513                 {
2514                         pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
2515                         long_data = HTOL64(*(uint64 *)data);
2516                         tcm_offset = (ulong)shmem->rings_info_ptr;
2517                         tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr);
2518                         dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2519                         prhex(__FUNCTION__, data, len);
2520                         break;
2521                 }
2522
2523                 case RING_ITEM_LEN:
2524                         tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
2525                         tcm_offset += OFFSETOF(ring_mem_t, len_items);
2526                         dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
2527                         break;
2528
2529                 case RING_MAX_ITEMS:
2530                         tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
2531                         tcm_offset += OFFSETOF(ring_mem_t, max_item);
2532                         dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
2533                         break;
2534
2535                 case RING_BUF_ADDR:
2536                         long_data = HTOL64(*(uint64 *)data);
2537                         tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
2538                         tcm_offset += OFFSETOF(ring_mem_t, base_addr);
2539                         dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8 *) &long_data, len);
2540                         prhex(__FUNCTION__, data, len);
2541                         break;
2542
2543                 case RING_WR_UPD:
2544                         tcm_offset = bus->ring_sh[ringid].ring_state_w;
2545                         dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
2546                         break;
2547
2548                 case RING_RD_UPD:
2549                         tcm_offset = bus->ring_sh[ringid].ring_state_r;
2550                         dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
2551                         break;
2552
2553                 case D2H_MB_DATA:
2554                         dhdpcie_bus_wtcm32(bus, bus->d2h_mb_data_ptr_addr,
2555                                 (uint32) HTOL32(*(uint32 *)data));
2556                         break;
2557
2558                 case H2D_MB_DATA:
2559                         dhdpcie_bus_wtcm32(bus, bus->h2d_mb_data_ptr_addr,
2560                                 (uint32) HTOL32(*(uint32 *)data));
2561                         break;
2562
2563                 default:
2564                         break;
2565         }
2566 } /* dhd_bus_cmn_writeshared */
2567
2568 /** A snippet of dongle memory is shared between host and dongle */
2569 void
2570 dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
2571 {
2572         ulong tcm_offset;
2573
2574         switch (type) {
2575                 case RING_WR_UPD:
2576                         tcm_offset = bus->ring_sh[ringid].ring_state_w;
2577                         *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
2578                         break;
2579                 case RING_RD_UPD:
2580                         tcm_offset = bus->ring_sh[ringid].ring_state_r;
2581                         *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
2582                         break;
2583                 case TOTAL_LFRAG_PACKET_CNT:
2584                 {
2585                         pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
2586                         *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
2587                                 (ulong) &sh->total_lfrag_pkt_cnt));
2588                         break;
2589                 }
2590                 case H2D_MB_DATA:
2591                         *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->h2d_mb_data_ptr_addr));
2592                         break;
2593                 case D2H_MB_DATA:
2594                         *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr));
2595                         break;
2596                 case MAX_HOST_RXBUFS:
2597                 {
2598                         pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
2599                         *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
2600                                 (ulong) &sh->max_host_rxbufs));
2601                         break;
2602                 }
2603                 default :
2604                         break;
2605         }
2606 }
2607
2608 uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
2609 {
2610         return ((pciedev_shared_t*)bus->pcie_sh)->flags;
2611 }
2612
2613 void
2614 dhd_bus_clearcounts(dhd_pub_t *dhdp)
2615 {
2616 }
2617
2618 int
2619 dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
2620                  void *params, int plen, void *arg, int len, bool set)
2621 {
2622         dhd_bus_t *bus = dhdp->bus;
2623         const bcm_iovar_t *vi = NULL;
2624         int bcmerror = 0;
2625         int val_size;
2626         uint32 actionid;
2627
2628         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2629
2630         ASSERT(name);
2631         ASSERT(len >= 0);
2632
2633         /* Get MUST have return space */
2634         ASSERT(set || (arg && len));
2635
2636         /* Set does NOT take qualifiers */
2637         ASSERT(!set || (!params && !plen));
2638
2639         DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
2640                  name, (set ? "set" : "get"), len, plen));
2641
2642         /* Look up var locally; if not found pass to host driver */
2643         if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
2644                 goto exit;
2645         }
2646
2647
2648         /* set up 'params' pointer in case this is a set command so that
2649          * the convenience int and bool code can be common to set and get
2650          */
2651         if (params == NULL) {
2652                 params = arg;
2653                 plen = len;
2654         }
2655
2656         if (vi->type == IOVT_VOID)
2657                 val_size = 0;
2658         else if (vi->type == IOVT_BUFFER)
2659                 val_size = len;
2660         else
2661                 /* all other types are integer sized */
2662                 val_size = sizeof(int);
2663
2664         actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
2665         bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
2666
2667 exit:
2668         return bcmerror;
2669 } /* dhd_bus_iovar_op */
2670
2671 #ifdef BCM_BUZZZ
2672 #include <bcm_buzzz.h>
2673
2674 int
2675 dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log,
2676         const int num_counters)
2677 {
2678         int bytes = 0;
2679         uint32 ctr;
2680         uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX];
2681         uint32 delta[BCM_BUZZZ_COUNTERS_MAX];
2682
2683         /* Compute elapsed counter values per counter event type */
2684         for (ctr = 0U; ctr < num_counters; ctr++) {
2685                 prev[ctr] = core[ctr];
2686                 curr[ctr] = *log++;
2687                 core[ctr] = curr[ctr];  /* saved for next log */
2688
2689                 if (curr[ctr] < prev[ctr])
2690                         delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
2691                 else
2692                         delta[ctr] = (curr[ctr] - prev[ctr]);
2693
2694                 bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
2695         }
2696
2697         return bytes;
2698 }
2699
2700 typedef union cm3_cnts { /* export this in bcm_buzzz.h */
2701         uint32 u32;
2702         uint8  u8[4];
2703         struct {
2704                 uint8 cpicnt;
2705                 uint8 exccnt;
2706                 uint8 sleepcnt;
2707                 uint8 lsucnt;
2708         };
2709 } cm3_cnts_t;
2710
2711 int
2712 dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log)
2713 {
2714         int bytes = 0;
2715
2716         uint32 cyccnt, instrcnt;
2717         cm3_cnts_t cm3_cnts;
2718         uint8 foldcnt;
2719
2720         {   /* 32bit cyccnt */
2721                 uint32 curr, prev, delta;
2722                 prev = core[0]; curr = *log++; core[0] = curr;
2723                 if (curr < prev)
2724                         delta = curr + (~0U - prev);
2725                 else
2726                         delta = (curr - prev);
2727
2728                 bytes += sprintf(p + bytes, "%12u ", delta);
2729                 cyccnt = delta;
2730         }
2731
2732         {       /* Extract the 4 cnts: cpi, exc, sleep and lsu */
2733                 int i;
2734                 uint8 max8 = ~0;
2735                 cm3_cnts_t curr, prev, delta;
2736                 prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
2737                 for (i = 0; i < 4; i++) {
2738                         if (curr.u8[i] < prev.u8[i])
2739                                 delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
2740                         else
2741                                 delta.u8[i] = (curr.u8[i] - prev.u8[i]);
2742                         bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
2743                 }
2744                 cm3_cnts.u32 = delta.u32;
2745         }
2746
2747         {   /* Extract the foldcnt from arg0 */
2748                 uint8 curr, prev, delta, max8 = ~0;
2749                 bcm_buzzz_arg0_t arg0; arg0.u32 = *log;
2750                 prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
2751                 if (curr < prev)
2752                         delta = curr + (max8 - prev);
2753                 else
2754                         delta = (curr - prev);
2755                 bytes += sprintf(p + bytes, "%4u ", delta);
2756                 foldcnt = delta;
2757         }
2758
2759         instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
2760                                  + cm3_cnts.u8[3]) + foldcnt;
2761         if (instrcnt > 0xFFFFFF00)
2762                 bytes += sprintf(p + bytes, "[%10s] ", "~");
2763         else
2764                 bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
2765         return bytes;
2766 }
2767
2768 int
2769 dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz)
2770 {
2771         int bytes = 0;
2772         bcm_buzzz_arg0_t arg0;
2773         static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS;
2774
2775         if (buzzz->counters == 6) {
2776                 bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log);
2777                 log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
2778         } else {
2779                 bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters);
2780                 log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */
2781         }
2782
2783         /* Dump the logged arguments using the registered formats */
2784         arg0.u32 = *log++;
2785
2786         switch (arg0.klog.args) {
2787                 case 0:
2788                         bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
2789                         break;
2790                 case 1:
2791                 {
2792                         uint32 arg1 = *log++;
2793                         bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
2794                         break;
2795                 }
2796                 case 2:
2797                 {
2798                         uint32 arg1, arg2;
2799                         arg1 = *log++; arg2 = *log++;
2800                         bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2);
2801                         break;
2802                 }
2803                 case 3:
2804                 {
2805                         uint32 arg1, arg2, arg3;
2806                         arg1 = *log++; arg2 = *log++; arg3 = *log++;
2807                         bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3);
2808                         break;
2809                 }
2810                 case 4:
2811                 {
2812                         uint32 arg1, arg2, arg3, arg4;
2813                         arg1 = *log++; arg2 = *log++;
2814                         arg3 = *log++; arg4 = *log++;
2815                         bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4);
2816                         break;
2817                 }
2818                 default:
2819                         printf("%s: Maximum one argument supported\n", __FUNCTION__);
2820                         break;
2821         }
2822
2823         bytes += sprintf(p + bytes, "\n");
2824
2825         return bytes;
2826 }
2827
2828 void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p)
2829 {
2830         int i;
2831         uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX];
2832         void * log;
2833
2834         for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) {
2835                 core[i] = 0;
2836         }
2837
2838         log_sz = buzzz_p->log_sz;
2839
2840         part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
2841
2842         if (buzzz_p->wrap == TRUE) {
2843                 part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
2844                 total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz;
2845         } else {
2846                 part2 = 0U;
2847                 total = buzzz_p->count;
2848         }
2849
2850         if (total == 0U) {
2851                 printf("%s: bcm_buzzz_dump total<%u> done\n", __FUNCTION__, total);
2852                 return;
2853         } else {
2854                 printf("%s: bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__,
2855                        total, part2, part1);
2856         }
2857
2858         if (part2) {   /* with wrap */
2859                 log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
2860                 while (part2--) {   /* from cur to end : part2 */
2861                         p[0] = '\0';
2862                         dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
2863                         printf("%s", p);
2864                         log = (void*)((size_t)log + buzzz_p->log_sz);
2865                 }
2866         }
2867
2868         log = (void*)buffer_p;
2869         while (part1--) {
2870                 p[0] = '\0';
2871                 dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
2872                 printf("%s", p);
2873                 log = (void*)((size_t)log + buzzz_p->log_sz);
2874         }
2875
2876         printf("%s: bcm_buzzz_dump done.\n", __FUNCTION__);
2877 }
2878
2879 int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
2880 {
2881         bcm_buzzz_t * buzzz_p = NULL;
2882         void * buffer_p = NULL;
2883         char * page_p = NULL;
2884         pciedev_shared_t *sh;
2885         int ret = 0;
2886
2887         if (bus->dhd->busstate != DHD_BUS_DATA) {
2888                 return BCME_UNSUPPORTED;
2889         }
2890         if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
2891                 printf("%s: Page memory allocation failure\n", __FUNCTION__);
2892                 goto done;
2893         }
2894         if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) {
2895                 printf("%s: BCM BUZZZ memory allocation failure\n", __FUNCTION__);
2896                 goto done;
2897         }
2898
2899         ret = dhdpcie_readshared(bus);
2900         if (ret < 0) {
2901                 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
2902                 goto done;
2903         }
2904
2905         sh = bus->pcie_sh;
2906
2907         DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzzz));
2908
2909         if (sh->buzzz != 0U) {  /* Fetch and display dongle BUZZZ Trace */
2910
2911                 dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzzz,
2912                                      (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
2913
2914                 printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
2915                         "count<%u> status<%u> wrap<%u>\n"
2916                         "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
2917                         (int)sh->buzzz,
2918                         (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end,
2919                         buzzz_p->count, buzzz_p->status, buzzz_p->wrap,
2920                         buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group,
2921                         buzzz_p->buffer_sz, buzzz_p->log_sz);
2922
2923                 if (buzzz_p->count == 0) {
2924                         printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__);
2925                         goto done;
2926                 }
2927
2928                 /* Allocate memory for trace buffer and format strings */
2929                 buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
2930                 if (buffer_p == NULL) {
2931                         printf("%s: Buffer memory allocation failure\n", __FUNCTION__);
2932                         goto done;
2933                 }
2934
2935                 /* Fetch the trace. format strings are exported via bcm_buzzz.h */
2936                 dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log,   /* Trace */
2937                                      (uint8 *)buffer_p, buzzz_p->buffer_sz);
2938
2939                 /* Process and display the trace using formatted output */
2940
2941                 {
2942                         int ctr;
2943                         for (ctr = 0; ctr < buzzz_p->counters; ctr++) {
2944                                 printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]);
2945                         }
2946                         printf("<code execution point>\n");
2947                 }
2948
2949                 dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
2950
2951                 printf("%s: ----- End of dongle BCM BUZZZ Trace -----\n\n", __FUNCTION__);
2952
2953                 MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
2954         }
2955
2956 done:
2957
2958         if (page_p)   MFREE(bus->dhd->osh, page_p, 4096);
2959         if (buzzz_p)  MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t));
2960         if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
2961
2962         return BCME_OK;
2963 }
2964 #endif /* BCM_BUZZZ */
2965
2966 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
2967         ((sih)->buscoretype == PCIE2_CORE_ID))
2968
2969 static bool
2970 pcie2_mdiosetblock(dhd_bus_t *bus, uint blk)
2971 {
2972         uint mdiodata, mdioctrl, i = 0;
2973         uint pcie_serdes_spinwait = 200;
2974
2975         mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF);
2976         mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE;
2977
2978         si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdioctrl);
2979         si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, mdiodata);
2980
2981         OSL_DELAY(10);
2982         /* retry till the transaction is complete */
2983         while (i < pcie_serdes_spinwait) {
2984                 uint mdioctrl_read = si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA,
2985                         0, 0);
2986                 if (!(mdioctrl_read & MDIODATA2_DONE)) {
2987                         break;
2988                 }
2989                 OSL_DELAY(1000);
2990                 i++;
2991         }
2992
2993         if (i >= pcie_serdes_spinwait) {
2994                 DHD_ERROR(("%s: pcie_mdiosetblock: timed out\n", __FUNCTION__));
2995                 return FALSE;
2996         }
2997
2998         return TRUE;
2999 }
3000
3001
3002 int
3003 dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
3004 {
3005         dhd_bus_t *bus = dhdp->bus;
3006         int bcmerror = 0;
3007         unsigned long flags;
3008 #ifdef CONFIG_ARCH_MSM
3009         int retry = POWERUP_MAX_RETRY;
3010 #endif /* CONFIG_ARCH_MSM */
3011
3012         if (dhd_download_fw_on_driverload) {
3013                 bcmerror = dhd_bus_start(dhdp);
3014         } else {
3015                 if (flag == TRUE) { /* Turn off WLAN */
3016                         /* Removing Power */
3017                         DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
3018
3019                         bus->dhd->up = FALSE;
3020
3021                         if (bus->dhd->busstate != DHD_BUS_DOWN) {
3022                                 dhdpcie_advertise_bus_cleanup(bus->dhd);
3023                                 if (bus->intr) {
3024                                         dhdpcie_bus_intr_disable(bus);
3025                                         dhdpcie_free_irq(bus);
3026                                 }
3027 #ifdef BCMPCIE_OOB_HOST_WAKE
3028                                 /* Clean up any pending host wake IRQ */
3029                                 dhd_bus_oob_intr_set(bus->dhd, FALSE);
3030                                 dhd_bus_oob_intr_unregister(bus->dhd);
3031 #endif /* BCMPCIE_OOB_HOST_WAKE */
3032                                 dhd_os_wd_timer(dhdp, 0);
3033                                 dhd_bus_stop(bus, TRUE);
3034                                 dhd_prot_reset(dhdp);
3035                                 dhd_clear(dhdp);
3036                                 dhd_bus_release_dongle(bus);
3037                                 dhdpcie_bus_free_resource(bus);
3038                                 bcmerror = dhdpcie_bus_disable_device(bus);
3039                                 if (bcmerror) {
3040                                         DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
3041                                                 __FUNCTION__, bcmerror));
3042                                         goto done;
3043                                 }
3044 #ifdef CONFIG_ARCH_MSM
3045                                 bcmerror = dhdpcie_bus_clock_stop(bus);
3046                                 if (bcmerror) {
3047                                         DHD_ERROR(("%s: host clock stop failed: %d\n",
3048                                                 __FUNCTION__, bcmerror));
3049                                         goto done;
3050                                 }
3051 #endif /* CONFIG_ARCH_MSM */
3052                                 DHD_GENERAL_LOCK(bus->dhd, flags);
3053                                 bus->dhd->busstate = DHD_BUS_DOWN;
3054                                 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3055                         } else {
3056                                 if (bus->intr) {
3057                                         dhdpcie_free_irq(bus);
3058                                 }
3059 #ifdef BCMPCIE_OOB_HOST_WAKE
3060                                 /* Clean up any pending host wake IRQ */
3061                                 dhd_bus_oob_intr_set(bus->dhd, FALSE);
3062                                 dhd_bus_oob_intr_unregister(bus->dhd);
3063 #endif /* BCMPCIE_OOB_HOST_WAKE */
3064                                 dhd_prot_reset(dhdp);
3065                                 dhd_clear(dhdp);
3066                                 dhd_bus_release_dongle(bus);
3067                                 dhdpcie_bus_free_resource(bus);
3068                                 bcmerror = dhdpcie_bus_disable_device(bus);
3069                                 if (bcmerror) {
3070                                         DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
3071                                                 __FUNCTION__, bcmerror));
3072                                         goto done;
3073                                 }
3074
3075 #ifdef CONFIG_ARCH_MSM
3076                                 bcmerror = dhdpcie_bus_clock_stop(bus);
3077                                 if (bcmerror) {
3078                                         DHD_ERROR(("%s: host clock stop failed: %d\n",
3079                                                 __FUNCTION__, bcmerror));
3080                                         goto done;
3081                                 }
3082 #endif  /* CONFIG_ARCH_MSM */
3083                         }
3084
3085                         bus->dhd->dongle_reset = TRUE;
3086                         DHD_ERROR(("%s:  WLAN OFF Done\n", __FUNCTION__));
3087
3088                 } else { /* Turn on WLAN */
3089                         if (bus->dhd->busstate == DHD_BUS_DOWN) {
3090                                 /* Powering On */
3091                                 DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
3092 #ifdef CONFIG_ARCH_MSM
3093                                 while (--retry) {
3094                                         bcmerror = dhdpcie_bus_clock_start(bus);
3095                                         if (!bcmerror) {
3096                                                 DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
3097                                                         __FUNCTION__));
3098                                                 break;
3099                                         } else {
3100                                                 OSL_SLEEP(10);
3101                                         }
3102                                 }
3103
3104                                 if (bcmerror && !retry) {
3105                                         DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
3106                                                 __FUNCTION__, bcmerror));
3107                                         goto done;
3108                                 }
3109 #endif /* CONFIG_ARCH_MSM */
3110                                 bus->is_linkdown = 0;
3111                                 bus->pci_d3hot_done = 0;
3112                                 bcmerror = dhdpcie_bus_enable_device(bus);
3113                                 if (bcmerror) {
3114                                         DHD_ERROR(("%s: host configuration restore failed: %d\n",
3115                                                 __FUNCTION__, bcmerror));
3116                                         goto done;
3117                                 }
3118
3119                                 bcmerror = dhdpcie_bus_alloc_resource(bus);
3120                                 if (bcmerror) {
3121                                         DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
3122                                                 __FUNCTION__, bcmerror));
3123                                         goto done;
3124                                 }
3125
3126                                 bcmerror = dhdpcie_bus_dongle_attach(bus);
3127                                 if (bcmerror) {
3128                                         DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
3129                                                 __FUNCTION__, bcmerror));
3130                                         goto done;
3131                                 }
3132
3133                                 bcmerror = dhd_bus_request_irq(bus);
3134                                 if (bcmerror) {
3135                                         DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
3136                                                 __FUNCTION__, bcmerror));
3137                                         goto done;
3138                                 }
3139
3140                                 bus->dhd->dongle_reset = FALSE;
3141
3142                                 bcmerror = dhd_bus_start(dhdp);
3143                                 if (bcmerror) {
3144                                         DHD_ERROR(("%s: dhd_bus_start: %d\n",
3145                                                 __FUNCTION__, bcmerror));
3146                                         goto done;
3147                                 }
3148
3149                                 bus->dhd->up = TRUE;
3150                                 DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
3151                         } else {
3152                                 DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
3153                                 goto done;
3154                         }
3155                 }
3156         }
3157
3158 done:
3159         if (bcmerror) {
3160                 DHD_GENERAL_LOCK(bus->dhd, flags);
3161                 bus->dhd->busstate = DHD_BUS_DOWN;
3162                 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3163         }
3164
3165         return bcmerror;
3166 }
3167
3168 static int
3169 pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val,
3170         bool slave_bypass)
3171 {
3172         uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl;
3173         uint32 reg32;
3174
3175         pcie2_mdiosetblock(bus, physmedia);
3176
3177         /* enable mdio access to SERDES */
3178         mdio_ctrl = MDIOCTL2_DIVISOR_VAL;
3179         mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF);
3180
3181         if (slave_bypass)
3182                 mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS;
3183
3184         if (!write)
3185                 mdio_ctrl |= MDIOCTL2_READ;
3186
3187         si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl);
3188
3189         if (write) {
3190                 reg32 =  PCIE2_MDIO_WR_DATA;
3191                 si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0,
3192                         *val | MDIODATA2_DONE);
3193         } else
3194                 reg32 =  PCIE2_MDIO_RD_DATA;
3195
3196         /* retry till the transaction is complete */
3197         while (i < pcie_serdes_spinwait) {
3198                 uint done_val =  si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0);
3199                 if (!(done_val & MDIODATA2_DONE)) {
3200                         if (!write) {
3201                                 *val = si_corereg(bus->sih, bus->sih->buscoreidx,
3202                                         PCIE2_MDIO_RD_DATA, 0, 0);
3203                                 *val = *val & MDIODATA2_MASK;
3204                         }
3205                         return 0;
3206                 }
3207                 OSL_DELAY(1000);
3208                 i++;
3209         }
3210         return -1;
3211 }
3212
3213 static int
3214 dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
3215                 void *params, int plen, void *arg, int len, int val_size)
3216 {
3217         int bcmerror = 0;
3218         int32 int_val = 0;
3219         int32 int_val2 = 0;
3220         int32 int_val3 = 0;
3221         bool bool_val = 0;
3222
3223         DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
3224                    __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
3225
3226         if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
3227                 goto exit;
3228
3229         if (plen >= (int)sizeof(int_val))
3230                 bcopy(params, &int_val, sizeof(int_val));
3231
3232         if (plen >= (int)sizeof(int_val) * 2)
3233                 bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
3234
3235         if (plen >= (int)sizeof(int_val) * 3)
3236                 bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
3237
3238         bool_val = (int_val != 0) ? TRUE : FALSE;
3239
3240         /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
3241         if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
3242                                         actionid == IOV_GVAL(IOV_DEVRESET))) {
3243                 bcmerror = BCME_NOTREADY;
3244                 goto exit;
3245         }
3246
3247         switch (actionid) {
3248
3249
3250         case IOV_SVAL(IOV_VARS):
3251                 bcmerror = dhdpcie_downloadvars(bus, arg, len);
3252                 break;
3253
3254         case IOV_SVAL(IOV_PCIEREG):
3255                 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
3256                         int_val);
3257                 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
3258                         int_val2);
3259                 break;
3260
3261         case IOV_GVAL(IOV_PCIEREG):
3262                 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
3263                         int_val);
3264                 int_val = si_corereg(bus->sih, bus->sih->buscoreidx,
3265                         OFFSETOF(sbpcieregs_t, configdata), 0, 0);
3266                 bcopy(&int_val, arg, sizeof(int_val));
3267                 break;
3268
3269         case IOV_SVAL(IOV_PCIECOREREG):
3270                 si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2);
3271                 break;
3272         case IOV_GVAL(IOV_BAR0_SECWIN_REG):
3273         {
3274                 sdreg_t sdreg;
3275                 uint32 addr, size;
3276
3277                 bcopy(params, &sdreg, sizeof(sdreg));
3278
3279                 addr = sdreg.offset;
3280                 size = sdreg.func;
3281
3282                 if (si_backplane_access(bus->sih, addr, size, &int_val, TRUE) != BCME_OK) {
3283                         DHD_ERROR(("Invalid size/addr combination \n"));
3284                         bcmerror = BCME_ERROR;
3285                         break;
3286                 }
3287                 bcopy(&int_val, arg, sizeof(int32));
3288                 break;
3289         }
3290
3291         case IOV_SVAL(IOV_BAR0_SECWIN_REG):
3292         {
3293                 sdreg_t sdreg;
3294                 uint32 addr, size;
3295
3296                 bcopy(params, &sdreg, sizeof(sdreg));
3297
3298                 addr = sdreg.offset;
3299                 size = sdreg.func;
3300                 if (si_backplane_access(bus->sih, addr, size, &sdreg.value, FALSE) != BCME_OK) {
3301                         DHD_ERROR(("Invalid size/addr combination \n"));
3302                         bcmerror = BCME_ERROR;
3303                 }
3304                 break;
3305         }
3306
3307         case IOV_GVAL(IOV_SBREG):
3308         {
3309                 sdreg_t sdreg;
3310                 uint32 addr, size;
3311
3312                 bcopy(params, &sdreg, sizeof(sdreg));
3313
3314                 addr = sdreg.offset | SI_ENUM_BASE;
3315                 size = sdreg.func;
3316
3317                 if (si_backplane_access(bus->sih, addr, size, &int_val, TRUE) != BCME_OK) {
3318                         DHD_ERROR(("Invalid size/addr combination \n"));
3319                         bcmerror = BCME_ERROR;
3320                         break;
3321                 }
3322                 bcopy(&int_val, arg, sizeof(int32));
3323                 break;
3324         }
3325
3326         case IOV_SVAL(IOV_SBREG):
3327         {
3328                 sdreg_t sdreg;
3329                 uint32 addr, size;
3330
3331                 bcopy(params, &sdreg, sizeof(sdreg));
3332
3333                 addr = sdreg.offset | SI_ENUM_BASE;
3334                 size = sdreg.func;
3335                 if (si_backplane_access(bus->sih, addr, size, &sdreg.value, FALSE) != BCME_OK) {
3336                         DHD_ERROR(("Invalid size/addr combination \n"));
3337                         bcmerror = BCME_ERROR;
3338                 }
3339                 break;
3340         }
3341
3342         case IOV_GVAL(IOV_PCIESERDESREG):
3343         {
3344                 uint val;
3345                 if (!PCIE_GEN2(bus->sih)) {
3346                         DHD_ERROR(("%s: supported only in pcie gen2\n", __FUNCTION__));
3347                         bcmerror = BCME_ERROR;
3348                         break;
3349                 }
3350
3351                 if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) {
3352                         bcopy(&val, arg, sizeof(int32));
3353                 } else {
3354                         DHD_ERROR(("%s: pcie2_mdioop failed.\n", __FUNCTION__));
3355                         bcmerror = BCME_ERROR;
3356                 }
3357                 break;
3358         }
3359
3360         case IOV_SVAL(IOV_PCIESERDESREG):
3361                 if (!PCIE_GEN2(bus->sih)) {
3362                         DHD_ERROR(("%s: supported only in pcie gen2\n", __FUNCTION__));
3363                         bcmerror = BCME_ERROR;
3364                         break;
3365                 }
3366                 if (pcie2_mdioop(bus, int_val, int_val2, TRUE, &int_val3, FALSE)) {
3367                         DHD_ERROR(("%s: pcie2_mdioop failed.\n", __FUNCTION__));
3368                         bcmerror = BCME_ERROR;
3369                 }
3370                 break;
3371         case IOV_GVAL(IOV_PCIECOREREG):
3372                 int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0);
3373                 bcopy(&int_val, arg, sizeof(int_val));
3374                 break;
3375
3376         case IOV_SVAL(IOV_PCIECFGREG):
3377                 OSL_PCI_WRITE_CONFIG(bus->osh, int_val, 4, int_val2);
3378                 break;
3379
3380         case IOV_GVAL(IOV_PCIECFGREG):
3381                 int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4);
3382                 bcopy(&int_val, arg, sizeof(int_val));
3383                 break;
3384
3385         case IOV_SVAL(IOV_PCIE_LPBK):
3386                 bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
3387                 break;
3388
3389         case IOV_SVAL(IOV_PCIE_DMAXFER):
3390                 bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3);
3391                 break;
3392
3393         case IOV_GVAL(IOV_PCIE_SUSPEND):
3394                 int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
3395                 bcopy(&int_val, arg, val_size);
3396                 break;
3397
3398         case IOV_SVAL(IOV_PCIE_SUSPEND):
3399                 dhdpcie_bus_suspend(bus, bool_val);
3400                 break;
3401
3402         case IOV_GVAL(IOV_MEMSIZE):
3403                 int_val = (int32)bus->ramsize;
3404                 bcopy(&int_val, arg, val_size);
3405                 break;
3406         case IOV_SVAL(IOV_MEMBYTES):
3407         case IOV_GVAL(IOV_MEMBYTES):
3408         {
3409                 uint32 address;         /* absolute backplane address */
3410                 uint size, dsize;
3411                 uint8 *data;
3412
3413                 bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
3414
3415                 ASSERT(plen >= 2*sizeof(int));
3416
3417                 address = (uint32)int_val;
3418                 bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
3419                 size = (uint)int_val;
3420
3421                 /* Do some validation */
3422                 dsize = set ? plen - (2 * sizeof(int)) : len;
3423                 if (dsize < size) {
3424                         DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
3425                                    __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
3426                         bcmerror = BCME_BADARG;
3427                         break;
3428                 }
3429
3430                 DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n dsize %d ", __FUNCTION__,
3431                           (set ? "write" : "read"), size, address, dsize));
3432
3433                 /* check if CR4 */
3434                 if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
3435                     si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
3436                         /* if address is 0, store the reset instruction to be written in 0 */
3437                         if (set && address == bus->dongle_ram_base) {
3438                                 bus->resetinstr = *(((uint32*)params) + 2);
3439                         }
3440                 } else {
3441                 /* If we know about SOCRAM, check for a fit */
3442                 if ((bus->orig_ramsize) &&
3443                     ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
3444                 {
3445                         uint8 enable, protect, remap;
3446                         si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
3447                         if (!enable || protect) {
3448                                 DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
3449                                         __FUNCTION__, bus->orig_ramsize, size, address));
3450                                 DHD_ERROR(("%s: socram enable %d, protect %d\n",
3451                                         __FUNCTION__, enable, protect));
3452                                 bcmerror = BCME_BADARG;
3453                                 break;
3454                         }
3455
3456                         if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) {
3457                                 uint32 devramsize = si_socdevram_size(bus->sih);
3458                                 if ((address < SOCDEVRAM_ARM_ADDR) ||
3459                                         (address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) {
3460                                         DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
3461                                                 __FUNCTION__, address, size));
3462                                         DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
3463                                                 __FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize));
3464                                         bcmerror = BCME_BADARG;
3465                                         break;
3466                                 }
3467                                 /* move it such that address is real now */
3468                                 address -= SOCDEVRAM_ARM_ADDR;
3469                                 address += SOCDEVRAM_BP_ADDR;
3470                                 DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
3471                                         __FUNCTION__, (set ? "write" : "read"), size, address));
3472                         } else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) {
3473                                 /* Can not access remap region while devram remap bit is set
3474                                  * ROM content would be returned in this case
3475                                  */
3476                                 DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n",
3477                                         __FUNCTION__, address));
3478                                 bcmerror = BCME_ERROR;
3479                                 break;
3480                         }
3481                 }
3482                 }
3483
3484                 /* Generate the actual data pointer */
3485                 data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
3486
3487                 /* Call to do the transfer */
3488                 bcmerror = dhdpcie_bus_membytes(bus, set, address, data, size);
3489
3490                 break;
3491         }
3492
3493 #ifdef BCM_BUZZZ
3494         /* Dump dongle side buzzz trace to console */
3495         case IOV_GVAL(IOV_BUZZZ_DUMP):
3496                 bcmerror = dhd_buzzz_dump_dngl(bus);
3497                 break;
3498 #endif /* BCM_BUZZZ */
3499
3500         case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
3501                 bcmerror = dhdpcie_bus_download_state(bus, bool_val);
3502                 break;
3503
3504         case IOV_GVAL(IOV_RAMSIZE):
3505                 int_val = (int32)bus->ramsize;
3506                 bcopy(&int_val, arg, val_size);
3507                 break;
3508
3509         case IOV_GVAL(IOV_RAMSTART):
3510                 int_val = (int32)bus->dongle_ram_base;
3511                 bcopy(&int_val, arg, val_size);
3512                 break;
3513
3514         case IOV_GVAL(IOV_CC_NVMSHADOW):
3515         {
3516                 struct bcmstrbuf dump_b;
3517
3518                 bcm_binit(&dump_b, arg, len);
3519                 bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
3520                 break;
3521         }
3522
3523         case IOV_GVAL(IOV_SLEEP_ALLOWED):
3524                 bool_val = bus->sleep_allowed;
3525                 bcopy(&bool_val, arg, val_size);
3526                 break;
3527
3528         case IOV_SVAL(IOV_SLEEP_ALLOWED):
3529                 bus->sleep_allowed = bool_val;
3530                 break;
3531
3532         case IOV_GVAL(IOV_DONGLEISOLATION):
3533                 int_val = bus->dhd->dongle_isolation;
3534                 bcopy(&int_val, arg, val_size);
3535                 break;
3536
3537         case IOV_SVAL(IOV_DONGLEISOLATION):
3538                 bus->dhd->dongle_isolation = bool_val;
3539                 break;
3540
3541         case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
3542                 int_val = bus->ltrsleep_on_unload;
3543                 bcopy(&int_val, arg, val_size);
3544                 break;
3545
3546         case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
3547                 bus->ltrsleep_on_unload = bool_val;
3548                 break;
3549
3550         case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
3551         {
3552                 struct bcmstrbuf dump_b;
3553                 bcm_binit(&dump_b, arg, len);
3554                 bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
3555                 break;
3556         }
3557         case IOV_GVAL(IOV_DMA_RINGINDICES):
3558         {       int h2d_support, d2h_support;
3559
3560                 d2h_support = DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0;
3561                 h2d_support = DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0;
3562                 int_val = d2h_support | (h2d_support << 1);
3563                 bcopy(&int_val, arg, sizeof(int_val));
3564                 break;
3565         }
3566         case IOV_SVAL(IOV_DMA_RINGINDICES):
3567                 /* Can change it only during initialization/FW download */
3568                 if (bus->dhd->busstate == DHD_BUS_DOWN) {
3569                         if ((int_val > 3) || (int_val < 0)) {
3570                                 DHD_ERROR(("%s: Bad argument. Possible values: 0, 1, 2 & 3\n", __FUNCTION__));
3571                                 bcmerror = BCME_BADARG;
3572                         } else {
3573                                 bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
3574                                 bus->dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
3575                         }
3576                 } else {
3577                         DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
3578                                 __FUNCTION__));
3579                         bcmerror = BCME_NOTDOWN;
3580                 }
3581                 break;
3582
3583         case IOV_GVAL(IOV_METADATA_DBG):
3584                 int_val = dhd_prot_metadata_dbg_get(bus->dhd);
3585                 bcopy(&int_val, arg, val_size);
3586                 break;
3587         case IOV_SVAL(IOV_METADATA_DBG):
3588                 dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0));
3589                 break;
3590
3591         case IOV_GVAL(IOV_RX_METADATALEN):
3592                 int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
3593                 bcopy(&int_val, arg, val_size);
3594                 break;
3595
3596         case IOV_SVAL(IOV_RX_METADATALEN):
3597                 if (int_val > 64) {
3598                         bcmerror = BCME_BUFTOOLONG;
3599                         break;
3600                 }
3601                 dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
3602                 break;
3603
3604         case IOV_SVAL(IOV_TXP_THRESHOLD):
3605                 dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
3606                 break;
3607
3608         case IOV_GVAL(IOV_TXP_THRESHOLD):
3609                 int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
3610                 bcopy(&int_val, arg, val_size);
3611                 break;
3612
3613         case IOV_SVAL(IOV_DB1_FOR_MB):
3614                 if (int_val)
3615                         bus->db1_for_mb = TRUE;
3616                 else
3617                         bus->db1_for_mb = FALSE;
3618                 break;
3619
3620         case IOV_GVAL(IOV_DB1_FOR_MB):
3621                 if (bus->db1_for_mb)
3622                         int_val = 1;
3623                 else
3624                         int_val = 0;
3625                 bcopy(&int_val, arg, val_size);
3626                 break;
3627
3628         case IOV_GVAL(IOV_TX_METADATALEN):
3629                 int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
3630                 bcopy(&int_val, arg, val_size);
3631                 break;
3632
3633         case IOV_SVAL(IOV_TX_METADATALEN):
3634                 if (int_val > 64) {
3635                         bcmerror = BCME_BUFTOOLONG;
3636                         break;
3637                 }
3638                 dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
3639                 break;
3640
3641         case IOV_SVAL(IOV_DEVRESET):
3642                 dhd_bus_devreset(bus->dhd, (uint8)bool_val);
3643                 break;
3644
3645         case IOV_GVAL(IOV_FLOW_PRIO_MAP):
3646                 int_val = bus->dhd->flow_prio_map_type;
3647                 bcopy(&int_val, arg, val_size);
3648                 break;
3649
3650         case IOV_SVAL(IOV_FLOW_PRIO_MAP):
3651                 int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
3652                 bcopy(&int_val, arg, val_size);
3653                 break;
3654
3655 #ifdef DHD_PCIE_RUNTIMEPM
3656         case IOV_GVAL(IOV_IDLETIME):
3657                 int_val = bus->idletime;
3658                 bcopy(&int_val, arg, val_size);
3659                 break;
3660
3661         case IOV_SVAL(IOV_IDLETIME):
3662                 if (int_val < 0) {
3663                         bcmerror = BCME_BADARG;
3664                 } else {
3665                         bus->idletime = int_val;
3666                 }
3667                 break;
3668 #endif /* DHD_PCIE_RUNTIMEPM */
3669
3670         case IOV_GVAL(IOV_TXBOUND):
3671                 int_val = (int32)dhd_txbound;
3672                 bcopy(&int_val, arg, val_size);
3673                 break;
3674
3675         case IOV_SVAL(IOV_TXBOUND):
3676                 dhd_txbound = (uint)int_val;
3677                 break;
3678
3679         case IOV_GVAL(IOV_RXBOUND):
3680                 int_val = (int32)dhd_rxbound;
3681                 bcopy(&int_val, arg, val_size);
3682                 break;
3683
3684         case IOV_SVAL(IOV_RXBOUND):
3685                 dhd_rxbound = (uint)int_val;
3686                 break;
3687
3688         case IOV_SVAL(IOV_HANGREPORT):
3689                 bus->dhd->hang_report = bool_val;
3690                 DHD_ERROR(("%s: Set hang_report as %d\n",
3691                         __FUNCTION__, bus->dhd->hang_report));
3692                 break;
3693
3694         case IOV_GVAL(IOV_HANGREPORT):
3695                 int_val = (int32)bus->dhd->hang_report;
3696                 bcopy(&int_val, arg, val_size);
3697                 break;
3698
3699         default:
3700                 bcmerror = BCME_UNSUPPORTED;
3701                 break;
3702         }
3703
3704 exit:
3705         return bcmerror;
3706 } /* dhdpcie_bus_doiovar */
3707
3708 /** Transfers bytes from host to dongle using pio mode */
3709 static int
3710 dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 len)
3711 {
3712         if (bus->dhd == NULL) {
3713                 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
3714                 return 0;
3715         }
3716         if (bus->dhd->prot == NULL) {
3717                 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
3718                 return 0;
3719         }
3720         if (bus->dhd->busstate != DHD_BUS_DATA) {
3721                 DHD_ERROR(("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
3722                 return 0;
3723         }
3724         dhdmsgbuf_lpbk_req(bus->dhd, len);
3725         return 0;
3726 }
3727
3728 int
3729 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
3730 {
3731         int timeleft;
3732         unsigned long flags;
3733         int rc = 0;
3734
3735         if (bus->dhd == NULL) {
3736                 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
3737                 return BCME_ERROR;
3738         }
3739         if (bus->dhd->prot == NULL) {
3740                 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
3741                 return BCME_ERROR;
3742         }
3743         DHD_GENERAL_LOCK(bus->dhd, flags);
3744         if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) {
3745                 DHD_ERROR(("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
3746                 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3747                 return BCME_ERROR;
3748         }
3749         DHD_GENERAL_UNLOCK(bus->dhd, flags);
3750         if (bus->dhd->dongle_reset) {
3751                 DHD_ERROR(("Dongle is in reset state.\n"));
3752                 return -EIO;
3753         }
3754
3755         if (bus->suspended == state) { /* Set to same state */
3756                 DHD_ERROR(("Bus is already in SUSPEND state.\n"));
3757                 return BCME_OK;
3758         }
3759
3760         if (state) {
3761                 int idle_retry = 0;
3762                 int active;
3763
3764                 if (bus->is_linkdown) {
3765                         DHD_ERROR(("%s: PCIe link was down, state=%d\n",
3766                                 __FUNCTION__, state));
3767                         return BCME_ERROR;
3768                 }
3769
3770                 /* Suspend */
3771                 DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
3772                 bus->wait_for_d3_ack = 0;
3773                 bus->suspended = TRUE;
3774
3775
3776                 DHD_GENERAL_LOCK(bus->dhd, flags);
3777                 /* stop all interface network queue. */
3778                 dhd_bus_stop_queue(bus);
3779                 bus->dhd->busstate = DHD_BUS_SUSPEND;
3780                 if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX) {
3781                         DHD_ERROR(("Tx Request is not ended\n"));
3782                         bus->dhd->busstate = DHD_BUS_DATA;
3783                         /* resume all interface network queue. */
3784                         dhd_bus_start_queue(bus);
3785                         DHD_GENERAL_UNLOCK(bus->dhd, flags);
3786                         bus->suspended = FALSE;
3787                         return -EBUSY;
3788                 }
3789
3790                 bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SUSPEND;
3791                 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3792
3793                 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
3794                 dhd_os_set_ioctl_resp_timeout(D3_ACK_RESP_TIMEOUT);
3795                 dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
3796                 timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
3797                 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
3798                 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
3799
3800                 {
3801                         uint32 d2h_mb_data = 0;
3802                         uint32 zero = 0;
3803
3804                         /* If wait_for_d3_ack was not updated because D2H MB was not received */
3805                         if (bus->wait_for_d3_ack == 0) {
3806                                 /* Read the Mb data to see if the Dongle has actually sent D3 ACK */
3807                                 dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
3808
3809                                 if (d2h_mb_data & D2H_DEV_D3_ACK) {
3810                                         DHD_ERROR(("*** D3 WAR for missing interrupt ***\r\n"));
3811                                         /* Clear the MB Data */
3812                                         dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32),
3813                                                 D2H_MB_DATA, 0);
3814
3815                                         /* Consider that D3 ACK is received */
3816                                         bus->wait_for_d3_ack = 1;
3817                                         bus->d3_ack_war_cnt++;
3818
3819                                 } /* d2h_mb_data & D2H_DEV_D3_ACK */
3820                         } /* bus->wait_for_d3_ack was 0 */
3821                 }
3822
3823                 /* To allow threads that got pre-empted to complete.
3824                  */
3825                 while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
3826                         (idle_retry < MAX_WKLK_IDLE_CHECK)) {
3827                         msleep(1);
3828                         idle_retry++;
3829                 }
3830
3831                 if (bus->wait_for_d3_ack) {
3832                         DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
3833                         /* Got D3 Ack. Suspend the bus */
3834                         if (active) {
3835                                 DHD_ERROR(("%s():Suspend failed because of wakelock restoring "
3836                                         "Dongle to D0\n", __FUNCTION__));
3837
3838                                 /*
3839                                  * Dongle still thinks that it has to be in D3 state
3840                                  * until gets a D0 Inform, but we are backing off from suspend.
3841                                  * Ensure that Dongle is brought back to D0.
3842                                  *
3843                                  * Bringing back Dongle from D3 Ack state to D0 state
3844                                  * is a 2 step process. Dongle would want to know that D0 Inform
3845                                  * would be sent as a MB interrupt
3846                                  * to bring it out of D3 Ack state to D0 state.
3847                                  * So we have to send both this message.
3848                                  */
3849                                 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
3850                                 dhdpcie_send_mb_data(bus,
3851                                         (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
3852                                 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
3853
3854                                 bus->suspended = FALSE;
3855                                 DHD_GENERAL_LOCK(bus->dhd, flags);
3856                                 bus->dhd->busstate = DHD_BUS_DATA;
3857                                 /* resume all interface network queue. */
3858                                 dhd_bus_start_queue(bus);
3859                                 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3860                                 rc = BCME_ERROR;
3861                         } else {
3862                                 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
3863                                 dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
3864                                 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
3865                                 dhdpcie_bus_intr_disable(bus);
3866                                 rc = dhdpcie_pci_suspend_resume(bus, state);
3867                                 dhd_bus_set_device_wake(bus, FALSE);
3868                         }
3869                         bus->dhd->d3ackcnt_timeout = 0;
3870 #if defined(BCMPCIE_OOB_HOST_WAKE)
3871                         dhdpcie_oob_intr_set(bus, TRUE);
3872 #endif /* BCMPCIE_OOB_HOST_WAKE */
3873                 } else if (timeleft == 0) {
3874                         bus->dhd->d3ackcnt_timeout++;
3875                         DHD_ERROR(("%s: resumed on timeout for D3 ACK d3_inform_cnt %d \n",
3876                                 __FUNCTION__, bus->dhd->d3ackcnt_timeout));
3877                         dhd_prot_debug_info_print(bus->dhd);
3878 #ifdef DHD_FW_COREDUMP
3879                         if (bus->dhd->memdump_enabled) {
3880                                 /* write core dump to file */
3881                                 bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
3882                                 dhdpcie_mem_dump(bus);
3883                         }
3884 #endif /* DHD_FW_COREDUMP */
3885                         bus->suspended = FALSE;
3886                         DHD_GENERAL_LOCK(bus->dhd, flags);
3887                         bus->dhd->busstate = DHD_BUS_DATA;
3888                         /* resume all interface network queue. */
3889                         dhd_bus_start_queue(bus);
3890                         DHD_GENERAL_UNLOCK(bus->dhd, flags);
3891                         if (bus->dhd->d3ackcnt_timeout >= MAX_CNTL_D3ACK_TIMEOUT) {
3892                                 DHD_ERROR(("%s: Event HANG send up "
3893                                         "due to PCIe linkdown\n", __FUNCTION__));
3894 #ifdef SUPPORT_LINKDOWN_RECOVERY
3895 #ifdef CONFIG_ARCH_MSM
3896                                 bus->no_cfg_restore = 1;
3897 #endif /* CONFIG_ARCH_MSM */
3898 #endif /* SUPPORT_LINKDOWN_RECOVERY */
3899                                 dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
3900                         }
3901                         rc = -ETIMEDOUT;
3902
3903                 }
3904
3905                 bus->wait_for_d3_ack = 1;
3906                 DHD_GENERAL_LOCK(bus->dhd, flags);
3907                 bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SUSPEND;
3908                 dhd_os_busbusy_wake(bus->dhd);
3909                 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3910         } else {
3911                 /* Resume */
3912 #if defined(BCMPCIE_OOB_HOST_WAKE)
3913                 DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
3914 #endif /* BCMPCIE_OOB_HOST_WAKE */
3915                 DHD_GENERAL_LOCK(bus->dhd, flags);
3916                 bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_RESUME;
3917                 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3918                 rc = dhdpcie_pci_suspend_resume(bus, state);
3919                 if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
3920                         DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
3921                         dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
3922                         DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
3923                         dhd_bus_set_device_wake(bus, TRUE);
3924                 }
3925                 bus->suspended = FALSE;
3926                 DHD_GENERAL_LOCK(bus->dhd, flags);
3927                 bus->dhd->busstate = DHD_BUS_DATA;
3928                 bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_RESUME;
3929 #ifdef DHD_PCIE_RUNTIMEPM
3930                 if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE) {
3931                         bus->bus_wake = 1;
3932                         OSL_SMP_WMB();
3933                         wake_up_interruptible(&bus->rpm_queue);
3934                 }
3935 #endif /* DHD_PCIE_RUNTIMEPM */
3936                 /* resume all interface network queue. */
3937                 dhd_bus_start_queue(bus);
3938                 dhd_os_busbusy_wake(bus->dhd);
3939                 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3940                 dhdpcie_bus_intr_enable(bus);
3941         }
3942         return rc;
3943 }
3944
3945 /** Transfers bytes from host to dongle and to host again using DMA */
3946 static int
3947 dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay)
3948 {
3949         if (bus->dhd == NULL) {
3950                 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
3951                 return BCME_ERROR;
3952         }
3953         if (bus->dhd->prot == NULL) {
3954                 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
3955                 return BCME_ERROR;
3956         }
3957         if (bus->dhd->busstate != DHD_BUS_DATA) {
3958                 DHD_ERROR(("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
3959                 return BCME_ERROR;
3960         }
3961
3962         if (len < 5 || len > 4194296) {
3963                 DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__));
3964                 return BCME_ERROR;
3965         }
3966         return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay);
3967 }
3968
3969
3970
3971 static int
3972 dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
3973 {
3974         int bcmerror = 0;
3975         uint32 *cr4_regs;
3976
3977         if (!bus->sih) {
3978                 DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
3979                 return BCME_ERROR;
3980         }
3981         /* To enter download state, disable ARM and reset SOCRAM.
3982          * To exit download state, simply reset ARM (default is RAM boot).
3983          */
3984         if (enter) {
3985                 bus->alp_only = TRUE;
3986
3987                 /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
3988                 cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
3989
3990                 if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
3991                     !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) &&
3992                     !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
3993                         DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
3994                         bcmerror = BCME_ERROR;
3995                         goto fail;
3996                 }
3997
3998                 if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
3999                         /* Halt ARM & remove reset */
4000                         si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
4001                         if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) {
4002                                 DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__));
4003                                 bcmerror = BCME_ERROR;
4004                                 goto fail;
4005                         }
4006                         si_core_reset(bus->sih, 0, 0);
4007                         /* reset last 4 bytes of RAM address. to be used for shared area */
4008                         dhdpcie_init_shared_addr(bus);
4009                 } else if (cr4_regs == NULL) { /* no CR4 present on chip */
4010                         si_core_disable(bus->sih, 0);
4011
4012                         if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
4013                                 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
4014                                 bcmerror = BCME_ERROR;
4015                                 goto fail;
4016                         }
4017
4018                         si_core_reset(bus->sih, 0, 0);
4019
4020                         /* Clear the top bit of memory */
4021                         if (bus->ramsize) {
4022                                 uint32 zeros = 0;
4023                                 if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
4024                                                      (uint8*)&zeros, 4) < 0) {
4025                                         bcmerror = BCME_ERROR;
4026                                         goto fail;
4027                                 }
4028                         }
4029                 } else {
4030                         /* For CR4,
4031                          * Halt ARM
4032                          * Remove ARM reset
4033                          * Read RAM base address [0x18_0000]
4034                          * [next] Download firmware
4035                          * [done at else] Populate the reset vector
4036                          * [done at else] Remove ARM halt
4037                         */
4038                         /* Halt ARM & remove reset */
4039                         si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
4040                         if (BCM43602_CHIP(bus->sih->chip)) {
4041                                 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
4042                                 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
4043                                 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
4044                                 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
4045                         }
4046                         /* reset last 4 bytes of RAM address. to be used for shared area */
4047                         dhdpcie_init_shared_addr(bus);
4048                 }
4049         } else {
4050                 if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
4051                         /* write vars */
4052                         if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
4053                                 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
4054                                 goto fail;
4055                         }
4056                         /* switch back to arm core again */
4057                         if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
4058                                 DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__));
4059                                 bcmerror = BCME_ERROR;
4060                                 goto fail;
4061                         }
4062                         /* write address 0 with reset instruction */
4063                         bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
4064                                 (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
4065                         /* now remove reset and halt and continue to run CA7 */
4066                 } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
4067                         if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
4068                                 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
4069                                 bcmerror = BCME_ERROR;
4070                                 goto fail;
4071                         }
4072
4073                         if (!si_iscoreup(bus->sih)) {
4074                                 DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
4075                                 bcmerror = BCME_ERROR;
4076                                 goto fail;
4077                         }
4078
4079                         /* Enable remap before ARM reset but after vars.
4080                          * No backplane access in remap mode
4081                          */
4082                         if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
4083                             !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
4084                                 DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
4085                                 bcmerror = BCME_ERROR;
4086                                 goto fail;
4087                         }
4088
4089
4090                         if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
4091                             !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
4092                                 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
4093                                 bcmerror = BCME_ERROR;
4094                                 goto fail;
4095                         }
4096                 } else {
4097                         if (BCM43602_CHIP(bus->sih->chip)) {
4098                                 /* Firmware crashes on SOCSRAM access when core is in reset */
4099                                 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
4100                                         DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
4101                                                 __FUNCTION__));
4102                                         bcmerror = BCME_ERROR;
4103                                         goto fail;
4104                                 }
4105                                 si_core_reset(bus->sih, 0, 0);
4106                                 si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
4107                         }
4108
4109                         /* write vars */
4110                         if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
4111                                 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
4112                                 goto fail;
4113                         }
4114
4115                         /* switch back to arm core again */
4116                         if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
4117                                 DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
4118                                 bcmerror = BCME_ERROR;
4119                                 goto fail;
4120                         }
4121
4122                         /* write address 0 with reset instruction */
4123                         bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
4124                                 (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
4125
4126                         if (bcmerror == BCME_OK) {
4127                                 uint32 tmp;
4128
4129                                 bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
4130                                                                 (uint8 *)&tmp, sizeof(tmp));
4131
4132                                 if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
4133                                         DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
4134                                                   __FUNCTION__, bus->resetinstr));
4135                                         DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
4136                                                   __FUNCTION__, tmp));
4137                                         bcmerror = BCME_ERROR;
4138                                         goto fail;
4139                                 }
4140                         }
4141
4142                         /* now remove reset and halt and continue to run CR4 */
4143                 }
4144
4145                 si_core_reset(bus->sih, 0, 0);
4146
4147                 /* Allow HT Clock now that the ARM is running. */
4148                 bus->alp_only = FALSE;
4149
4150                 bus->dhd->busstate = DHD_BUS_LOAD;
4151         }
4152
4153 fail:
4154         /* Always return to PCIE core */
4155         si_setcore(bus->sih, PCIE2_CORE_ID, 0);
4156
4157         return bcmerror;
4158 } /* dhdpcie_bus_download_state */
4159
4160 static int
4161 dhdpcie_bus_write_vars(dhd_bus_t *bus)
4162 {
4163         int bcmerror = 0;
4164         uint32 varsize, phys_size;
4165         uint32 varaddr;
4166         uint8 *vbuffer;
4167         uint32 varsizew;
4168 #ifdef DHD_DEBUG
4169         uint8 *nvram_ularray;
4170 #endif /* DHD_DEBUG */
4171
4172         /* Even if there are no vars are to be written, we still need to set the ramsize. */
4173         varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
4174         varaddr = (bus->ramsize - 4) - varsize;
4175
4176         varaddr += bus->dongle_ram_base;
4177
4178         if (bus->vars) {
4179
4180                 vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
4181                 if (!vbuffer)
4182                         return BCME_NOMEM;
4183
4184                 bzero(vbuffer, varsize);
4185                 bcopy(bus->vars, vbuffer, bus->varsz);
4186                 /* Write the vars list */
4187                 DHD_INFO_HW4(("%s: tcm: %p varaddr: 0x%x varsize: %d\n",
4188                         __FUNCTION__, bus->tcm, varaddr, varsize));
4189                 bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
4190
4191                 /* Implement read back and verify later */
4192 #ifdef DHD_DEBUG
4193                 /* Verify NVRAM bytes */
4194                 DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize));
4195                 nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
4196                 if (!nvram_ularray)
4197                         return BCME_NOMEM;
4198
4199                 /* Upload image to verify downloaded contents. */
4200                 memset(nvram_ularray, 0xaa, varsize);
4201
4202                 /* Read the vars list to temp buffer for comparison */
4203                 bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
4204                 if (bcmerror) {
4205                                 DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
4206                                         __FUNCTION__, bcmerror, varsize, varaddr));
4207                 }
4208
4209                 /* Compare the org NVRAM with the one read from RAM */
4210                 if (memcmp(vbuffer, nvram_ularray, varsize)) {
4211                         DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
4212                 } else
4213                         DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
4214                         __FUNCTION__));
4215
4216                 MFREE(bus->dhd->osh, nvram_ularray, varsize);
4217 #endif /* DHD_DEBUG */
4218
4219                 MFREE(bus->dhd->osh, vbuffer, varsize);
4220         }
4221
4222         phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
4223
4224         phys_size += bus->dongle_ram_base;
4225
4226         /* adjust to the user specified RAM */
4227         DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__,
4228                 phys_size, bus->ramsize));
4229         DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__,
4230                 varaddr, varsize));
4231         varsize = ((phys_size - 4) - varaddr);
4232
4233         /*
4234          * Determine the length token:
4235          * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
4236          */
4237         if (bcmerror) {
4238                 varsizew = 0;
4239                 bus->nvram_csm = varsizew;
4240         } else {
4241                 varsizew = varsize / 4;
4242                 varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
4243                 bus->nvram_csm = varsizew;
4244                 varsizew = htol32(varsizew);
4245         }
4246
4247         DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew));
4248
4249         /* Write the length token to the last word */
4250         DHD_INFO_HW4(("%s: tcm: %p phys_size: 0x%x varsizew: %x\n",
4251                         __FUNCTION__, bus->tcm, phys_size, varsizew));
4252         bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
4253                 (uint8*)&varsizew, 4);
4254
4255         return bcmerror;
4256 } /* dhdpcie_bus_write_vars */
4257
4258 int
4259 dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
4260 {
4261         int bcmerror = BCME_OK;
4262
4263         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4264
4265         /* Basic sanity checks */
4266         if (bus->dhd->up) {
4267                 bcmerror = BCME_NOTDOWN;
4268                 goto err;
4269         }
4270         if (!len) {
4271                 bcmerror = BCME_BUFTOOSHORT;
4272                 goto err;
4273         }
4274
4275         /* Free the old ones and replace with passed variables */
4276         if (bus->vars)
4277                 MFREE(bus->dhd->osh, bus->vars, bus->varsz);
4278
4279         bus->vars = MALLOC(bus->dhd->osh, len);
4280         bus->varsz = bus->vars ? len : 0;
4281         if (bus->vars == NULL) {
4282                 bcmerror = BCME_NOMEM;
4283                 goto err;
4284         }
4285
4286         /* Copy the passed variables, which should include the terminating double-null */
4287         bcopy(arg, bus->vars, bus->varsz);
4288
4289
4290 err:
4291         return bcmerror;
4292 }
4293
4294 #ifndef BCMPCIE_OOB_HOST_WAKE
4295 /* loop through the capability list and see if the pcie capabilty exists */
4296 uint8
4297 dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
4298 {
4299         uint8 cap_id;
4300         uint8 cap_ptr = 0;
4301         uint8 byte_val;
4302
4303         /* check for Header type 0 */
4304         byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
4305         if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
4306                 DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
4307                 goto end;
4308         }
4309
4310         /* check if the capability pointer field exists */
4311         byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
4312         if (!(byte_val & PCI_CAPPTR_PRESENT)) {
4313                 DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
4314                 goto end;
4315         }
4316
4317         cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
4318         /* check if the capability pointer is 0x00 */
4319         if (cap_ptr == 0x00) {
4320                 DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
4321                 goto end;
4322         }
4323
4324         /* loop thr'u the capability list and see if the pcie capabilty exists */
4325
4326         cap_id = read_pci_cfg_byte(cap_ptr);
4327
4328         while (cap_id != req_cap_id) {
4329                 cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
4330                 if (cap_ptr == 0x00) break;
4331                 cap_id = read_pci_cfg_byte(cap_ptr);
4332         }
4333
4334 end:
4335         return cap_ptr;
4336 }
4337
4338 void
4339 dhdpcie_pme_active(osl_t *osh, bool enable)
4340 {
4341         uint8 cap_ptr;
4342         uint32 pme_csr;
4343
4344         cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
4345
4346         if (!cap_ptr) {
4347                 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
4348                 return;
4349         }
4350
4351         pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
4352         DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
4353
4354         pme_csr |= PME_CSR_PME_STAT;
4355         if (enable) {
4356                 pme_csr |= PME_CSR_PME_EN;
4357         } else {
4358                 pme_csr &= ~PME_CSR_PME_EN;
4359         }
4360
4361         OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
4362 }
4363
4364 bool
4365 dhdpcie_pme_cap(osl_t *osh)
4366 {
4367         uint8 cap_ptr;
4368         uint32 pme_cap;
4369
4370         cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
4371
4372         if (!cap_ptr) {
4373                 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
4374                 return FALSE;
4375         }
4376
4377         pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
4378
4379         DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap));
4380
4381         return ((pme_cap & PME_CAP_PM_STATES) != 0);
4382 }
4383 #endif /* !BCMPCIE_OOB_HOST_WAKE */
4384
4385 void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
4386 {
4387         uint32 intstatus = 0;
4388         uint32 intmask = 0;
4389         uint32 mbintstatus = 0;
4390         uint32 d2h_mb_data = 0;
4391
4392         intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
4393         intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
4394         mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
4395         dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
4396
4397         bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n",
4398                 intstatus, intmask, mbintstatus);
4399         bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
4400                 d2h_mb_data, dhd->bus->def_intmask);
4401 }
4402
4403 /** Add bus dump output to a buffer */
4404 void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
4405 {
4406         uint16 flowid;
4407         int ix = 0;
4408         flow_ring_node_t *flow_ring_node;
4409         flow_info_t *flow_info;
4410         char eabuf[ETHER_ADDR_STR_LEN];
4411
4412         if (dhdp->busstate != DHD_BUS_DATA)
4413                 return;
4414
4415         dhd_prot_print_info(dhdp, strbuf);
4416         dhd_dump_intr_registers(dhdp, strbuf);
4417         bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
4418                 dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
4419         bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
4420         bcm_bprintf(strbuf,
4421                 "%s %4s %2s %4s %17s %4s %4s %10s %4s %4s ",
4422                 "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen",
4423                 "Overflows", "RD", "WR");
4424         bcm_bprintf(strbuf, "%5s %6s %5s \n", "Acked", "tossed", "noack");
4425
4426         for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
4427                 flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
4428                 if (flow_ring_node->active) {
4429                         flow_info = &flow_ring_node->flow_info;
4430                         bcm_bprintf(strbuf,
4431                                 "%3d. %4d %2d %4d %17s %4d %4d %10u ", ix++,
4432                                 flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
4433                                 bcm_ether_ntoa((struct ether_addr *)&flow_info->da, eabuf),
4434                                 DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
4435                                 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
4436                                 DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
4437                         dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf,
4438                                 "%4d %4d ");
4439                         bcm_bprintf(strbuf,
4440                                 "%5s %6s %5s\n", "NA", "NA", "NA");
4441                 }
4442         }
4443         bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
4444         bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
4445         bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
4446         bcm_bprintf(strbuf, "D3 Ack WAR cnt %d\n", dhdp->bus->d3_ack_war_cnt);
4447 }
4448
4449 /**
4450  * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
4451  * flow queue to their flow ring.
4452  */
4453 static void
4454 dhd_update_txflowrings(dhd_pub_t *dhd)
4455 {
4456         unsigned long flags;
4457         dll_t *item, *next;
4458         flow_ring_node_t *flow_ring_node;
4459         struct dhd_bus *bus = dhd->bus;
4460
4461         DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
4462         for (item = dll_head_p(&bus->const_flowring);
4463                 (!dhd_is_device_removed(dhd) && !dll_end(&bus->const_flowring, item));
4464                 item = next) {
4465                 if (dhd->hang_was_sent) {
4466                         break;
4467                 }
4468
4469                 next = dll_next_p(item);
4470                 flow_ring_node = dhd_constlist_to_flowring(item);
4471
4472                 /* Ensure that flow_ring_node in the list is Not Null */
4473                 ASSERT(flow_ring_node != NULL);
4474
4475                 /* Ensure that the flowring node has valid contents */
4476                 ASSERT(flow_ring_node->prot_info != NULL);
4477
4478                 dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
4479         }
4480         DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
4481 }
4482
4483 /** Mailbox ringbell Function */
4484 static void
4485 dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
4486 {
4487         if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
4488                 (bus->sih->buscorerev == 4)) {
4489                 DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__));
4490                 return;
4491         }
4492         if (bus->db1_for_mb)  {
4493                 /* this is a pcie core register, not the config register */
4494                 DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__));
4495                 si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
4496         } else {
4497                 DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__));
4498                 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
4499                 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
4500         }
4501 }
4502
4503 static void
4504 dhd_bus_set_device_wake(struct dhd_bus *bus, bool val)
4505 {
4506         if (bus->device_wake_state != val)
4507         {
4508                 DHD_INFO(("Set Device_Wake to %d\n", val));
4509 #ifdef PCIE_OOB
4510                 if (bus->oob_enabled)
4511                 {
4512                         if (val)
4513                         {
4514                                 gpio_port = gpio_port | (1 << DEVICE_WAKE);
4515                                 gpio_write_port_non_block(gpio_handle_val, gpio_port);
4516                         } else {
4517                                 gpio_port = gpio_port & (0xff ^ (1 << DEVICE_WAKE));
4518                                 gpio_write_port_non_block(gpio_handle_val, gpio_port);
4519                         }
4520                 }
4521 #endif /* PCIE_OOB */
4522                 bus->device_wake_state = val;
4523         }
4524 }
4525
4526 #ifdef PCIE_OOB
4527 void
4528 dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val)
4529 {
4530         DHD_INFO(("Set Device_Wake to %d\n", val));
4531         if (val)
4532         {
4533                 gpio_port = gpio_port | (1 << BIT_BT_REG_ON);
4534                 gpio_write_port(gpio_handle_val, gpio_port);
4535         } else {
4536                 gpio_port = gpio_port & (0xff ^ (1 << BIT_BT_REG_ON));
4537                 gpio_write_port(gpio_handle_val, gpio_port);
4538         }
4539 }
4540
4541 int
4542 dhd_oob_get_bt_reg_on(struct dhd_bus *bus)
4543 {
4544         int ret;
4545         uint8 val;
4546         ret = gpio_read_port(gpio_handle_val, &val);
4547
4548         if (ret < 0) {
4549                 DHD_ERROR(("gpio_read_port returns %d\n", ret));
4550                 return ret;
4551         }
4552
4553         if (val & (1 << BIT_BT_REG_ON))
4554         {
4555                 ret = 1;
4556         } else {
4557                 ret = 0;
4558         }
4559
4560         return ret;
4561 }
4562
4563 static void
4564 dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus)
4565 {
4566         if (dhd_doorbell_timeout)
4567                 dhd_timeout_start(&bus->doorbell_timer,
4568                         (dhd_doorbell_timeout * 1000) / dhd_watchdog_ms);
4569         else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND))
4570                 dhd_bus_set_device_wake(bus, FALSE);
4571 }
4572 #endif /* PCIE_OOB */
4573
4574 /** mailbox doorbell ring function */
4575 void
4576 dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
4577 {
4578         if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
4579                 (bus->sih->buscorerev == 4)) {
4580                 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, PCIE_INTB, PCIE_INTB);
4581         } else {
4582                 /* this is a pcie core register, not the config regsiter */
4583                 DHD_INFO(("%s: writing a door bell to the device\n", __FUNCTION__));
4584                 si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox, ~0, 0x12345678);
4585         }
4586 }
4587
4588 void
4589 dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
4590 {
4591 #ifdef PCIE_OOB
4592         dhd_bus_set_device_wake(bus, TRUE);
4593         dhd_bus_doorbell_timeout_reset(bus);
4594 #endif
4595         W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
4596 }
4597
4598 static void
4599 dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
4600 {
4601         uint32 w;
4602         w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
4603         W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
4604 }
4605
4606 dhd_mb_ring_t
4607 dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
4608 {
4609         if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
4610                 (bus->sih->buscorerev == 4)) {
4611                 bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
4612                         PCIMailBoxInt);
4613                 if (bus->pcie_mb_intr_addr) {
4614                         bus->pcie_mb_intr_osh = si_osh(bus->sih);
4615                         return dhd_bus_ringbell_oldpcie;
4616                 }
4617         } else {
4618                 bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
4619                         PCIH2D_MailBox);
4620                 if (bus->pcie_mb_intr_addr) {
4621                         bus->pcie_mb_intr_osh = si_osh(bus->sih);
4622                         return dhdpcie_bus_ringbell_fast;
4623                 }
4624         }
4625         return dhd_bus_ringbell;
4626 }
4627
4628 bool BCMFASTPATH
4629 dhd_bus_dpc(struct dhd_bus *bus)
4630 {
4631         bool resched = FALSE;     /* Flag indicating resched wanted */
4632         unsigned long flags;
4633
4634         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4635
4636         DHD_GENERAL_LOCK(bus->dhd, flags);
4637         /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
4638          * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
4639          * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
4640          * and if we return from here, then IOCTL response will never be handled
4641          */
4642         if (bus->dhd->busstate == DHD_BUS_DOWN) {
4643                 DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
4644                 bus->intstatus = 0;
4645                 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4646                 return 0;
4647         }
4648         bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DPC;
4649         DHD_GENERAL_UNLOCK(bus->dhd, flags);
4650
4651         resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
4652         if (!resched) {
4653                 bus->intstatus = 0;
4654                 if (!bus->pci_d3hot_done) {
4655                         dhdpcie_bus_intr_enable(bus);
4656                 } else {
4657                         DHD_ERROR(("%s: dhdpcie_bus_intr_enable skip in pci D3hot state \n",
4658                                         __FUNCTION__));
4659                 }
4660         }
4661
4662         DHD_GENERAL_LOCK(bus->dhd, flags);
4663         bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DPC;
4664         dhd_os_busbusy_wake(bus->dhd);
4665         DHD_GENERAL_UNLOCK(bus->dhd, flags);
4666
4667         return resched;
4668
4669 }
4670
4671
4672 static void
4673 dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
4674 {
4675         uint32 cur_h2d_mb_data = 0;
4676
4677         DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
4678
4679         if (bus->is_linkdown) {
4680                 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
4681                 return;
4682         }
4683
4684         dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
4685
4686         if (cur_h2d_mb_data != 0) {
4687                 uint32 i = 0;
4688                 DHD_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n", __FUNCTION__, cur_h2d_mb_data));
4689                 while ((i++ < 100) && cur_h2d_mb_data) {
4690                         OSL_DELAY(10);
4691                         dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
4692                 }
4693                 if (i >= 100) {
4694                         DHD_ERROR(("%s : waited 1ms for the dngl "
4695                                 "to ack the previous mb transaction\n", __FUNCTION__));
4696                         DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
4697                                 __FUNCTION__, cur_h2d_mb_data));
4698                 }
4699         }
4700
4701         dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
4702         dhd_bus_gen_devmb_intr(bus);
4703
4704         if (h2d_mb_data == H2D_HOST_D3_INFORM) {
4705                 DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
4706                 bus->d3_inform_cnt++;
4707         }
4708         if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
4709                 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__));
4710                 bus->d0_inform_in_use_cnt++;
4711         }
4712         if (h2d_mb_data == H2D_HOST_D0_INFORM) {
4713                 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
4714                 bus->d0_inform_cnt++;
4715         }
4716 }
4717
4718 static void
4719 dhdpcie_handle_mb_data(dhd_bus_t *bus)
4720 {
4721         uint32 d2h_mb_data = 0;
4722         uint32 zero = 0;
4723         dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
4724         if (!d2h_mb_data) {
4725                 DHD_INFO_HW4(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
4726                         __FUNCTION__, d2h_mb_data));
4727                 return;
4728         }
4729
4730         dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
4731
4732         DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%08x\n", __FUNCTION__, d2h_mb_data));
4733         if (d2h_mb_data & D2H_DEV_FWHALT)  {
4734                 DHD_ERROR(("FW trap has happened\n"));
4735                 dhdpcie_checkdied(bus, NULL, 0);
4736                 /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
4737                 bus->dhd->busstate = DHD_BUS_DOWN;
4738                 return;
4739         }
4740         if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
4741                 /* what should we do */
4742                 DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__));
4743                 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
4744                 DHD_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__));
4745         }
4746         if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
4747                 /* what should we do */
4748                 DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__));
4749         }
4750         if (d2h_mb_data & D2H_DEV_D3_ACK)  {
4751                 /* what should we do */
4752                 DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__));
4753                 if (!bus->wait_for_d3_ack) {
4754                         bus->wait_for_d3_ack = 1;
4755                         dhd_os_d3ack_wake(bus->dhd);
4756                 }
4757         }
4758 }
4759
4760 /* Inform Dongle to print HW Registers for Livelock Debug */
4761 void dhdpcie_bus_dongle_print_hwregs(struct dhd_bus *bus)
4762 {
4763         dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
4764 }
4765
4766 static bool
4767 dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
4768 {
4769         bool resched = FALSE;
4770
4771         if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
4772                 (bus->sih->buscorerev == 4)) {
4773                 /* Msg stream interrupt */
4774                 if (intstatus & I_BIT1) {
4775                         resched = dhdpci_bus_read_frames(bus);
4776                 } else if (intstatus & I_BIT0) {
4777                         /* do nothing for Now */
4778                 }
4779         } else {
4780                 if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
4781                         dhdpcie_handle_mb_data(bus);
4782
4783                 if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
4784                         goto exit;
4785                 }
4786
4787                 if (intstatus & PCIE_MB_D2H_MB_MASK) {
4788                         resched = dhdpci_bus_read_frames(bus);
4789                 }
4790         }
4791
4792 exit:
4793         return resched;
4794 }
4795
4796 static bool
4797 dhdpci_bus_read_frames(dhd_bus_t *bus)
4798 {
4799         bool more = FALSE;
4800
4801         /* There may be frames in both ctrl buf and data buf; check ctrl buf first */
4802         DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
4803         dhd_prot_process_ctrlbuf(bus->dhd);
4804         /* Unlock to give chance for resp to be handled */
4805         DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
4806
4807         DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
4808         /* update the flow ring cpls */
4809         dhd_update_txflowrings(bus->dhd);
4810
4811         /* With heavy TX traffic, we could get a lot of TxStatus
4812          * so add bound
4813          */
4814         more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound);
4815
4816         /* With heavy RX traffic, this routine potentially could spend some time
4817          * processing RX frames without RX bound
4818          */
4819         more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound);
4820
4821         /* don't talk to the dongle if fw is about to be reloaded */
4822         if (bus->dhd->hang_was_sent) {
4823                 more = FALSE;
4824         }
4825         DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
4826
4827         return more;
4828 }
4829
4830 bool
4831 dhdpcie_tcm_valid(dhd_bus_t *bus)
4832 {
4833         uint32 addr = 0;
4834         int rv;
4835         uint32 shaddr = 0;
4836         pciedev_shared_t sh;
4837
4838         shaddr = bus->dongle_ram_base + bus->ramsize - 4;
4839
4840         /* Read last word in memory to determine address of pciedev_shared structure */
4841         addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
4842
4843         if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
4844                 (addr > shaddr)) {
4845                 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
4846                         __FUNCTION__, addr));
4847                 return FALSE;
4848         }
4849
4850         /* Read hndrte_shared structure */
4851         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh,
4852                 sizeof(pciedev_shared_t))) < 0) {
4853                 DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
4854                 return FALSE;
4855         }
4856
4857         /* Compare any field in pciedev_shared_t */
4858         if (sh.console_addr != bus->pcie_sh->console_addr) {
4859                 DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
4860                 return FALSE;
4861         }
4862
4863         return TRUE;
4864 }
4865
4866 static bool
4867 dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version)
4868 {
4869         DHD_INFO(("firmware api revision %d, host api revision %d\n",
4870                 firmware_api_version, host_api_version));
4871         if (firmware_api_version <= host_api_version)
4872                 return TRUE;
4873         if ((firmware_api_version == 6) && (host_api_version == 5))
4874                 return TRUE;
4875         if ((firmware_api_version == 5) && (host_api_version == 6))
4876                 return TRUE;
4877         return FALSE;
4878 }
4879
4880 static int
4881 dhdpcie_readshared(dhd_bus_t *bus)
4882 {
4883         uint32 addr = 0;
4884         int rv, dma_indx_wr_buf, dma_indx_rd_buf;
4885         uint32 shaddr = 0;
4886         pciedev_shared_t *sh = bus->pcie_sh;
4887         dhd_timeout_t tmo;
4888
4889         shaddr = bus->dongle_ram_base + bus->ramsize - 4;
4890
4891         DHD_INFO_HW4(("%s: ram_base: 0x%x ramsize 0x%x tcm: %p shaddr: 0x%x nvram_csm: 0x%x\n",
4892                 __FUNCTION__, bus->dongle_ram_base, bus->ramsize,
4893                 bus->tcm, shaddr, bus->nvram_csm));
4894         /* start a timer for 5 seconds */
4895         dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
4896
4897         while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
4898                 /* Read last word in memory to determine address of pciedev_shared structure */
4899                 addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
4900         }
4901
4902         if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
4903                 (addr > shaddr)) {
4904                 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
4905                         __FUNCTION__, addr));
4906                 DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed));
4907                 return BCME_ERROR;
4908         } else {
4909                 bus->shared_addr = (ulong)addr;
4910                 DHD_ERROR(("%s: PCIe shared addr (0x%08x) read took %u usec "
4911                         "before dongle is ready\n", __FUNCTION__, addr, tmo.elapsed));
4912         }
4913
4914         /* Read hndrte_shared structure */
4915         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
4916                 sizeof(pciedev_shared_t))) < 0) {
4917                 DHD_ERROR(("%s: Failed to read PCIe shared struct with %d\n", __FUNCTION__, rv));
4918                 return rv;
4919         }
4920
4921         /* Endianness */
4922         sh->flags = ltoh32(sh->flags);
4923         sh->trap_addr = ltoh32(sh->trap_addr);
4924         sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
4925         sh->assert_file_addr = ltoh32(sh->assert_file_addr);
4926         sh->assert_line = ltoh32(sh->assert_line);
4927         sh->console_addr = ltoh32(sh->console_addr);
4928         sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
4929         sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
4930         sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
4931
4932 #ifdef DHD_DEBUG
4933         /* load bus console address */
4934         bus->console_addr = sh->console_addr;
4935 #endif
4936
4937         /* Read the dma rx offset */
4938         bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
4939         dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
4940
4941         DHD_ERROR(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset));
4942
4943         if (!(dhdpcie_check_firmware_compatible(sh->flags & PCIE_SHARED_VERSION_MASK,
4944                 PCIE_SHARED_VERSION)))
4945         {
4946                 DHD_ERROR(("%s: pcie_shared version %d in dhd "
4947                            "is older than pciedev_shared version %d in dongle\n",
4948                            __FUNCTION__, PCIE_SHARED_VERSION,
4949                            sh->flags & PCIE_SHARED_VERSION_MASK));
4950                 return BCME_ERROR;
4951         }
4952
4953         bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ?
4954                 sizeof(uint16) : sizeof(uint32);
4955         DHD_ERROR(("%s: Dongle advertizes %d size indices\n",
4956                 __FUNCTION__, bus->rw_index_sz));
4957
4958         /* Does the FW support DMA'ing r/w indices */
4959         if (sh->flags & PCIE_SHARED_DMA_INDEX) {
4960
4961
4962                 DHD_ERROR(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
4963                         __FUNCTION__,
4964                         (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0),
4965                         (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0)));
4966
4967         } else if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ||
4968                    DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) {
4969
4970 #ifdef BCM_INDX_DMA
4971                 DHD_ERROR(("%s: Incompatible FW. FW does not support DMAing indices\n",
4972                         __FUNCTION__));
4973                 return BCME_ERROR;
4974 #endif
4975                 DHD_ERROR(("%s: Host supports DMAing indices but FW does not\n",
4976                         __FUNCTION__));
4977                 bus->dhd->dma_d2h_ring_upd_support = FALSE;
4978                 bus->dhd->dma_h2d_ring_upd_support = FALSE;
4979         }
4980
4981
4982         /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
4983         {
4984                 ring_info_t  ring_info;
4985
4986                 if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
4987                         (uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
4988                         return rv;
4989
4990                 bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
4991                 bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
4992
4993
4994                 bus->max_sub_queues = ltoh16(ring_info.max_sub_queues);
4995
4996                 /* If both FW and Host support DMA'ing indices, allocate memory and notify FW
4997                  * The max_sub_queues is read from FW initialized ring_info
4998                  */
4999                 if (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) {
5000                         dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
5001                                 H2D_DMA_INDX_WR_BUF, bus->max_sub_queues);
5002                         dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
5003                                 D2H_DMA_INDX_RD_BUF, BCMPCIE_D2H_COMMON_MSGRINGS);
5004
5005                         if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
5006                                 DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
5007                                         "Host will use w/r indices in TCM\n",
5008                                         __FUNCTION__));
5009                                 bus->dhd->dma_h2d_ring_upd_support = FALSE;
5010                         }
5011                 }
5012
5013                 if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support)) {
5014                         dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
5015                                 D2H_DMA_INDX_WR_BUF, BCMPCIE_D2H_COMMON_MSGRINGS);
5016                         dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
5017                                 H2D_DMA_INDX_RD_BUF, bus->max_sub_queues);
5018
5019                         if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
5020                                 DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
5021                                         "Host will use w/r indices in TCM\n",
5022                                         __FUNCTION__));
5023                                 bus->dhd->dma_d2h_ring_upd_support = FALSE;
5024                         }
5025                 }
5026
5027                 /* read ringmem and ringstate ptrs from shared area and store in host variables */
5028                 dhd_fillup_ring_sharedptr_info(bus, &ring_info);
5029
5030                 bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
5031                 DHD_INFO(("%s: ring_info\n", __FUNCTION__));
5032
5033                 DHD_ERROR(("%s: max H2D queues %d\n",
5034                         __FUNCTION__, ltoh16(ring_info.max_sub_queues)));
5035
5036                 DHD_INFO(("mail box address\n"));
5037                 DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
5038                         __FUNCTION__, bus->h2d_mb_data_ptr_addr));
5039                 DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
5040                         __FUNCTION__, bus->d2h_mb_data_ptr_addr));
5041         }
5042
5043         bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
5044         DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
5045                 __FUNCTION__, bus->dhd->d2h_sync_mode));
5046
5047         return BCME_OK;
5048 } /* dhdpcie_readshared */
5049
5050 /** Read ring mem and ring state ptr info from shared memory area in device memory */
5051 static void
5052 dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
5053 {
5054         uint16 i = 0;
5055         uint16 j = 0;
5056         uint32 tcm_memloc;
5057         uint32  d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
5058
5059         /* Ring mem ptr info */
5060         /* Alloated in the order
5061                 H2D_MSGRING_CONTROL_SUBMIT              0
5062                 H2D_MSGRING_RXPOST_SUBMIT               1
5063                 D2H_MSGRING_CONTROL_COMPLETE            2
5064                 D2H_MSGRING_TX_COMPLETE                 3
5065                 D2H_MSGRING_RX_COMPLETE                 4
5066         */
5067
5068         {
5069                 /* ringmemptr holds start of the mem block address space */
5070                 tcm_memloc = ltoh32(ring_info->ringmem_ptr);
5071
5072                 /* Find out ringmem ptr for each ring common  ring */
5073                 for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
5074                         bus->ring_sh[i].ring_mem_addr = tcm_memloc;
5075                         /* Update mem block */
5076                         tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
5077                         DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
5078                                 i, bus->ring_sh[i].ring_mem_addr));
5079                 }
5080         }
5081
5082         /* Ring state mem ptr info */
5083         {
5084                 d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
5085                 d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
5086                 h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
5087                 h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
5088
5089                 /* Store h2d common ring write/read pointers */
5090                 for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
5091                         bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
5092                         bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
5093
5094                         /* update mem block */
5095                         h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
5096                         h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
5097
5098                         DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i,
5099                                 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
5100                 }
5101
5102                 /* Store d2h common ring write/read pointers */
5103                 for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
5104                         bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
5105                         bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
5106
5107                         /* update mem block */
5108                         d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
5109                         d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
5110
5111                         DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i,
5112                                 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
5113                 }
5114
5115                 /* Store txflow ring write/read pointers */
5116                 for (j = 0; j < (bus->max_sub_queues - BCMPCIE_H2D_COMMON_MSGRINGS);
5117                         i++, j++)
5118                 {
5119                         bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
5120                         bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
5121
5122                         /* update mem block */
5123                         h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
5124                         h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
5125
5126                         DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n",
5127                                 __FUNCTION__, i,
5128                                 bus->ring_sh[i].ring_state_w,
5129                                 bus->ring_sh[i].ring_state_r));
5130                 }
5131         }
5132 } /* dhd_fillup_ring_sharedptr_info */
5133
5134 /**
5135  * Initialize bus module: prepare for communication with the dongle. Called after downloading
5136  * firmware into the dongle.
5137  */
5138 int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
5139 {
5140         dhd_bus_t *bus = dhdp->bus;
5141         int  ret = 0;
5142
5143         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5144
5145         ASSERT(bus->dhd);
5146         if (!bus->dhd)
5147                 return 0;
5148
5149         /* Make sure we're talking to the core. */
5150         bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
5151         ASSERT(bus->reg != NULL);
5152
5153         /* before opening up bus for data transfer, check if shared are is intact */
5154         ret = dhdpcie_readshared(bus);
5155         if (ret < 0) {
5156                 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
5157                 return ret;
5158         }
5159
5160         /* Make sure we're talking to the core. */
5161         bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
5162         ASSERT(bus->reg != NULL);
5163
5164         /* Set bus state according to enable result */
5165         dhdp->busstate = DHD_BUS_DATA;
5166
5167         if (!dhd_download_fw_on_driverload)
5168                 dhd_dpc_enable(bus->dhd);
5169
5170         /* Enable the interrupt after device is up */
5171         dhdpcie_bus_intr_enable(bus);
5172
5173         /* bcmsdh_intr_unmask(bus->sdh); */
5174
5175 #ifdef DHD_PCIE_RUNTIMEPM
5176         bus->idlecount = 0;
5177         bus->idletime = (int32)MAX_IDLE_COUNT;
5178         init_waitqueue_head(&bus->rpm_queue);
5179         mutex_init(&bus->pm_lock);
5180 #endif /* DHD_PCIE_RUNTIMEPM */
5181
5182         bus->d3_ack_war_cnt = 0;
5183
5184         return ret;
5185 }
5186
5187 static void
5188 dhdpcie_init_shared_addr(dhd_bus_t *bus)
5189 {
5190         uint32 addr = 0;
5191         uint32 val = 0;
5192         addr = bus->dongle_ram_base + bus->ramsize - 4;
5193 #ifdef DHD_PCIE_RUNTIMEPM
5194         dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
5195 #endif /* DHD_PCIE_RUNTIMEPM */
5196         DHD_INFO_HW4(("%s: tcm: %p, addr: 0x%x val: 0x%x\n", __FUNCTION__, bus->tcm, addr, val));
5197         dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
5198 }
5199
5200
5201 bool
5202 dhdpcie_chipmatch(uint16 vendor, uint16 device)
5203 {
5204         if (vendor != PCI_VENDOR_ID_BROADCOM) {
5205                 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
5206                         vendor, device));
5207                 return (-ENODEV);
5208         }
5209
5210         if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
5211                 (device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) ||
5212                 (device == BCM43569_CHIP_ID))
5213                 return 0;
5214
5215         if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
5216                 (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID))
5217                 return 0;
5218
5219         if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
5220                 (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID))
5221                 return 0;
5222
5223         if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
5224                 (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device))
5225                 return 0;
5226
5227         if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
5228                 (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID))
5229                 return 0;
5230
5231         if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
5232                 (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID))
5233                 return 0;
5234
5235         if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
5236                 (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID))
5237                 return 0;
5238
5239         if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
5240                 (device == BCM4358_D11AC5G_ID))
5241                 return 0;
5242
5243         if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
5244                 (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID))
5245                 return 0;
5246
5247         if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
5248                 (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID))
5249                 return 0;
5250
5251         if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
5252                 (device == BCM4359_D11AC5G_ID))
5253                 return 0;
5254
5255         if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) ||
5256                 (device == BCM43596_D11AC5G_ID))
5257                 return 0;
5258
5259
5260         if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) ||
5261                 (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID))
5262                 return 0;
5263
5264         if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) ||
5265                 (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID))
5266                 return 0;
5267
5268         DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
5269         return (-ENODEV);
5270 } /* dhdpcie_chipmatch */
5271
5272 /**
5273  * Name:  dhdpcie_cc_nvmshadow
5274  *
5275  * Description:
5276  * A shadow of OTP/SPROM exists in ChipCommon Region
5277  * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
5278  * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
5279  * can also be read from ChipCommon Registers.
5280  */
5281 static int
5282 dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
5283 {
5284         uint16 dump_offset = 0;
5285         uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
5286
5287         /* Table for 65nm OTP Size (in bits) */
5288         int  otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
5289
5290         volatile uint16 *nvm_shadow;
5291
5292         uint cur_coreid;
5293         uint chipc_corerev;
5294         chipcregs_t *chipcregs;
5295
5296         /* Save the current core */
5297         cur_coreid = si_coreid(bus->sih);
5298         /* Switch to ChipC */
5299         chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
5300         ASSERT(chipcregs != NULL);
5301
5302         chipc_corerev = si_corerev(bus->sih);
5303
5304         /* Check ChipcommonCore Rev */
5305         if (chipc_corerev < 44) {
5306                 DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
5307                 return BCME_UNSUPPORTED;
5308         }
5309
5310         /* Check ChipID */
5311         if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip)) {
5312                 DHD_ERROR(("%s: cc_nvmdump cmd. supported for 4350/4345 only\n",
5313                         __FUNCTION__));
5314                 return BCME_UNSUPPORTED;
5315         }
5316
5317         /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
5318         if (chipcregs->sromcontrol & SRC_PRESENT) {
5319                 /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
5320                 sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
5321                                         >> SRC_SIZE_SHIFT))) * 1024;
5322                 bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
5323         }
5324
5325         if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
5326                 bcm_bprintf(b, "\nOTP Present");
5327
5328                 if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
5329                         == OTPL_WRAP_TYPE_40NM) {
5330                         /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
5331                         otp_size =  (((chipcregs->capabilities & CC_CAP_OTPSIZE)
5332                                         >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
5333                         bcm_bprintf(b, "(Size %d bits)\n", otp_size);
5334                 } else {
5335                         /* This part is untested since newer chips have 40nm OTP */
5336                         otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
5337                                         >> CC_CAP_OTPSIZE_SHIFT];
5338                         bcm_bprintf(b, "(Size %d bits)\n", otp_size);
5339                         DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
5340                                 __FUNCTION__));
5341                 }
5342         }
5343
5344         if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
5345                 ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
5346                 DHD_ERROR(("%s: SPROM and OTP could not be found \n",
5347                         __FUNCTION__));
5348                 return BCME_NOTFOUND;
5349         }
5350
5351         /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
5352         if ((chipcregs->sromcontrol & SRC_OTPSEL) &&
5353                 (chipcregs->sromcontrol & SRC_OTPPRESENT)) {
5354
5355                 bcm_bprintf(b, "OTP Strap selected.\n"
5356                                "\nOTP Shadow in ChipCommon:\n");
5357
5358                 dump_size = otp_size / 16 ; /* 16bit words */
5359
5360         } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
5361                 (chipcregs->sromcontrol & SRC_PRESENT)) {
5362
5363                 bcm_bprintf(b, "SPROM Strap selected\n"
5364                                 "\nSPROM Shadow in ChipCommon:\n");
5365
5366                 /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
5367                 /* dump_size in 16bit words */
5368                 dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
5369         } else {
5370                 DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
5371                         __FUNCTION__));
5372                 return BCME_NOTFOUND;
5373         }
5374
5375         if (bus->regs == NULL) {
5376                 DHD_ERROR(("ChipCommon Regs. not initialized\n"));
5377                 return BCME_NOTREADY;
5378         } else {
5379             bcm_bprintf(b, "\n OffSet:");
5380
5381             /* Point to the SPROM/OTP shadow in ChipCommon */
5382             nvm_shadow = chipcregs->sromotp;
5383
5384            /*
5385             * Read 16 bits / iteration.
5386             * dump_size & dump_offset in 16-bit words
5387             */
5388             while (dump_offset < dump_size) {
5389                 if (dump_offset % 2 == 0)
5390                         /* Print the offset in the shadow space in Bytes */
5391                         bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
5392
5393                 bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
5394                 dump_offset += 0x1;
5395             }
5396         }
5397
5398         /* Switch back to the original core */
5399         si_setcore(bus->sih, cur_coreid, 0);
5400
5401         return BCME_OK;
5402 } /* dhdpcie_cc_nvmshadow */
5403
5404 /** Flow rings are dynamically created and destroyed */
5405 void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
5406 {
5407         void *pkt;
5408         flow_queue_t *queue;
5409         flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
5410         unsigned long flags;
5411
5412         queue = &flow_ring_node->queue;
5413
5414 #ifdef DHDTCPACK_SUPPRESS
5415         /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
5416          * when there is a newly coming packet from network stack.
5417          */
5418         dhd_tcpack_info_tbl_clean(bus->dhd);
5419 #endif /* DHDTCPACK_SUPPRESS */
5420
5421         /* clean up BUS level info */
5422         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
5423
5424         /* Flush all pending packets in the queue, if any */
5425         while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
5426                 PKTFREE(bus->dhd->osh, pkt, TRUE);
5427         }
5428         ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
5429
5430         flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
5431         flow_ring_node->active = FALSE;
5432
5433         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
5434
5435         DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
5436         dll_delete(&flow_ring_node->list);
5437         DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
5438
5439         /* Release the flowring object back into the pool */
5440         dhd_prot_flowrings_pool_release(bus->dhd,
5441                 flow_ring_node->flowid, flow_ring_node->prot_info);
5442
5443         /* Free the flowid back to the flowid allocator */
5444         dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
5445                 flow_ring_node->flowid);
5446 }
5447
5448 /**
5449  * Allocate a Flow ring buffer,
5450  * Init Ring buffer, send Msg to device about flow ring creation
5451 */
5452 int
5453 dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
5454 {
5455         flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
5456
5457         DHD_INFO(("%s :Flow create\n", __FUNCTION__));
5458
5459         /* Send Msg to device about flow ring creation */
5460         if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
5461                 return BCME_NOMEM;
5462
5463         return BCME_OK;
5464 }
5465
5466 /** Handle response from dongle on a 'flow ring create' request */
5467 void
5468 dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
5469 {
5470         flow_ring_node_t *flow_ring_node;
5471         unsigned long flags;
5472
5473         DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
5474
5475         flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
5476         ASSERT(flow_ring_node->flowid == flowid);
5477
5478         if (status != BCME_OK) {
5479                 DHD_ERROR(("%s Flow create Response failure error status = %d \n",
5480                      __FUNCTION__, status));
5481                 /* Call Flow clean up */
5482                 dhd_bus_clean_flow_ring(bus, flow_ring_node);
5483                 return;
5484         }
5485
5486         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
5487         flow_ring_node->status = FLOW_RING_STATUS_OPEN;
5488         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
5489
5490         /* Now add the Flow ring node into the active list
5491          * Note that this code to add the newly created node to the active
5492          * list was living in dhd_flowid_lookup. But note that after
5493          * adding the node to the active list the contents of node is being
5494          * filled in dhd_prot_flow_ring_create.
5495          * If there is a D2H interrupt after the node gets added to the
5496          * active list and before the node gets populated with values
5497          * from the Bottom half dhd_update_txflowrings would be called.
5498          * which will then try to walk through the active flow ring list,
5499          * pickup the nodes and operate on them. Now note that since
5500          * the function dhd_prot_flow_ring_create is not finished yet
5501          * the contents of flow_ring_node can still be NULL leading to
5502          * crashes. Hence the flow_ring_node should be added to the
5503          * active list only after its truely created, which is after
5504          * receiving the create response message from the Host.
5505          */
5506
5507         DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
5508         dll_prepend(&bus->const_flowring, &flow_ring_node->list);
5509         DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
5510
5511         dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
5512
5513         return;
5514 }
5515
5516 int
5517 dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
5518 {
5519         void * pkt;
5520         flow_queue_t *queue;
5521         flow_ring_node_t *flow_ring_node;
5522         unsigned long flags;
5523
5524         DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
5525
5526         flow_ring_node = (flow_ring_node_t *)arg;
5527
5528         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
5529         if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
5530                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
5531                 DHD_ERROR(("%s :Delete Pending Flow %d\n",
5532                         __FUNCTION__, flow_ring_node->flowid));
5533                 return BCME_ERROR;
5534         }
5535         flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
5536
5537         queue = &flow_ring_node->queue; /* queue associated with flow ring */
5538
5539 #ifdef DHDTCPACK_SUPPRESS
5540         /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
5541          * when there is a newly coming packet from network stack.
5542          */
5543         dhd_tcpack_info_tbl_clean(bus->dhd);
5544 #endif /* DHDTCPACK_SUPPRESS */
5545         /* Flush all pending packets in the queue, if any */
5546         while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
5547                 PKTFREE(bus->dhd->osh, pkt, TRUE);
5548         }
5549         ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
5550
5551         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
5552
5553         /* Send Msg to device about flow ring deletion */
5554         dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
5555
5556         return BCME_OK;
5557 }
5558
5559 void
5560 dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
5561 {
5562         flow_ring_node_t *flow_ring_node;
5563
5564         DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
5565
5566         flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
5567         ASSERT(flow_ring_node->flowid == flowid);
5568
5569         if (status != BCME_OK) {
5570                 DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
5571                     __FUNCTION__, status));
5572                 return;
5573         }
5574         /* Call Flow clean up */
5575         dhd_bus_clean_flow_ring(bus, flow_ring_node);
5576
5577         return;
5578
5579 }
5580
5581 /** This function is not called. Obsolete ? */
5582 int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
5583 {
5584         void *pkt;
5585         flow_queue_t *queue;
5586         flow_ring_node_t *flow_ring_node;
5587         unsigned long flags;
5588
5589         DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
5590
5591         flow_ring_node = (flow_ring_node_t *)arg;
5592         queue = &flow_ring_node->queue; /* queue associated with flow ring */
5593
5594         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
5595
5596 #ifdef DHDTCPACK_SUPPRESS
5597         /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
5598          * when there is a newly coming packet from network stack.
5599          */
5600         dhd_tcpack_info_tbl_clean(bus->dhd);
5601 #endif /* DHDTCPACK_SUPPRESS */
5602
5603         /* Flush all pending packets in the queue, if any */
5604         while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
5605                 PKTFREE(bus->dhd->osh, pkt, TRUE);
5606         }
5607         ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
5608
5609         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
5610
5611         /* Send Msg to device about flow ring flush */
5612         dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
5613
5614         flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
5615         return BCME_OK;
5616 }
5617
5618 void
5619 dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
5620 {
5621         flow_ring_node_t *flow_ring_node;
5622
5623         if (status != BCME_OK) {
5624                 DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
5625                     __FUNCTION__, status));
5626                 return;
5627         }
5628
5629         flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
5630         ASSERT(flow_ring_node->flowid == flowid);
5631
5632         flow_ring_node->status = FLOW_RING_STATUS_OPEN;
5633         return;
5634 }
5635
5636 uint32
5637 dhd_bus_max_h2d_queues(struct dhd_bus *bus)
5638 {
5639         return bus->max_sub_queues;
5640 }
5641
5642 /* To be symmetric with SDIO */
5643 void
5644 dhd_bus_pktq_flush(dhd_pub_t *dhdp)
5645 {
5646         return;
5647 }
5648
5649 void
5650 dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
5651 {
5652         dhdp->bus->is_linkdown = val;
5653 }
5654
5655 int
5656 dhdpcie_bus_clock_start(struct dhd_bus *bus)
5657 {
5658         return dhdpcie_start_host_pcieclock(bus);
5659 }
5660
5661 int
5662 dhdpcie_bus_clock_stop(struct dhd_bus *bus)
5663 {
5664         return dhdpcie_stop_host_pcieclock(bus);
5665 }
5666
5667 int
5668 dhdpcie_bus_disable_device(struct dhd_bus *bus)
5669 {
5670         return dhdpcie_disable_device(bus);
5671 }
5672
5673 int
5674 dhdpcie_bus_enable_device(struct dhd_bus *bus)
5675 {
5676         return dhdpcie_enable_device(bus);
5677 }
5678
5679 int
5680 dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
5681 {
5682         return dhdpcie_alloc_resource(bus);
5683 }
5684
5685 void
5686 dhdpcie_bus_free_resource(struct dhd_bus *bus)
5687 {
5688         dhdpcie_free_resource(bus);
5689 }
5690
5691 int
5692 dhd_bus_request_irq(struct dhd_bus *bus)
5693 {
5694         return dhdpcie_bus_request_irq(bus);
5695 }
5696
5697 bool
5698 dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
5699 {
5700         return dhdpcie_dongle_attach(bus);
5701 }
5702
5703 int
5704 dhd_bus_release_dongle(struct dhd_bus *bus)
5705 {
5706         bool dongle_isolation;
5707         osl_t *osh;
5708
5709         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5710
5711         if (bus) {
5712                 osh = bus->osh;
5713                 ASSERT(osh);
5714
5715                 if (bus->dhd) {
5716                         dongle_isolation = bus->dhd->dongle_isolation;
5717                         dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
5718                 }
5719         }
5720
5721         return 0;
5722 }
5723
5724 #ifdef BCMPCIE_OOB_HOST_WAKE
5725 int
5726 dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
5727 {
5728         return dhdpcie_oob_intr_register(dhdp->bus);
5729 }
5730
5731 void
5732 dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
5733 {
5734         dhdpcie_oob_intr_unregister(dhdp->bus);
5735 }
5736
5737 void
5738 dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
5739 {
5740         dhdpcie_oob_intr_set(dhdp->bus, enable);
5741 }
5742 #endif /* BCMPCIE_OOB_HOST_WAKE */