2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
27 #include "targaddrs.h"
36 enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
42 enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
47 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
48 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
50 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
51 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
53 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
54 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
56 /* how long wait to wait for target to initialise, in ms */
57 #define ATH10K_PCI_TARGET_WAIT 3000
58 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
60 #define QCA988X_2_0_DEVICE_ID (0x003c)
62 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
63 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
67 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
70 static int ath10k_pci_post_rx(struct ath10k *ar);
71 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
73 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
74 static int ath10k_pci_cold_reset(struct ath10k *ar);
75 static int ath10k_pci_warm_reset(struct ath10k *ar);
76 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
77 static int ath10k_pci_init_irq(struct ath10k *ar);
78 static int ath10k_pci_deinit_irq(struct ath10k *ar);
79 static int ath10k_pci_request_irq(struct ath10k *ar);
80 static void ath10k_pci_free_irq(struct ath10k *ar);
81 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
82 struct ath10k_ce_pipe *rx_pipe,
83 struct bmi_xfer *xfer);
85 static const struct ce_attr host_ce_config_wlan[] = {
86 /* CE0: host->target HTC control and raw streams */
88 .flags = CE_ATTR_FLAGS,
94 /* CE1: target->host HTT + HTC control */
96 .flags = CE_ATTR_FLAGS,
102 /* CE2: target->host WMI */
104 .flags = CE_ATTR_FLAGS,
110 /* CE3: host->target WMI */
112 .flags = CE_ATTR_FLAGS,
118 /* CE4: host->target HTT */
120 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
121 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
128 .flags = CE_ATTR_FLAGS,
134 /* CE6: target autonomous hif_memcpy */
136 .flags = CE_ATTR_FLAGS,
142 /* CE7: ce_diag, the Diagnostic Window */
144 .flags = CE_ATTR_FLAGS,
146 .src_sz_max = DIAG_TRANSFER_LIMIT,
151 /* Target firmware's Copy Engine configuration. */
152 static const struct ce_pipe_config target_ce_config_wlan[] = {
153 /* CE0: host->target HTC control and raw streams */
156 .pipedir = PIPEDIR_OUT,
159 .flags = CE_ATTR_FLAGS,
163 /* CE1: target->host HTT + HTC control */
166 .pipedir = PIPEDIR_IN,
169 .flags = CE_ATTR_FLAGS,
173 /* CE2: target->host WMI */
176 .pipedir = PIPEDIR_IN,
179 .flags = CE_ATTR_FLAGS,
183 /* CE3: host->target WMI */
186 .pipedir = PIPEDIR_OUT,
189 .flags = CE_ATTR_FLAGS,
193 /* CE4: host->target HTT */
196 .pipedir = PIPEDIR_OUT,
199 .flags = CE_ATTR_FLAGS,
203 /* NB: 50% of src nentries, since tx has 2 frags */
208 .pipedir = PIPEDIR_OUT,
211 .flags = CE_ATTR_FLAGS,
215 /* CE6: Reserved for target autonomous hif_memcpy */
218 .pipedir = PIPEDIR_INOUT,
221 .flags = CE_ATTR_FLAGS,
225 /* CE7 used only by Host */
228 static bool ath10k_pci_irq_pending(struct ath10k *ar)
232 /* Check if the shared legacy irq is for us */
233 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
234 PCIE_INTR_CAUSE_ADDRESS);
235 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
241 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
243 /* IMPORTANT: INTR_CLR register has to be set after
244 * INTR_ENABLE is set to 0, otherwise interrupt can not be
246 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
248 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
249 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
251 /* IMPORTANT: this extra read transaction is required to
252 * flush the posted write buffer. */
253 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
254 PCIE_INTR_ENABLE_ADDRESS);
257 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
259 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
260 PCIE_INTR_ENABLE_ADDRESS,
261 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
263 /* IMPORTANT: this extra read transaction is required to
264 * flush the posted write buffer. */
265 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
266 PCIE_INTR_ENABLE_ADDRESS);
269 static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
271 struct ath10k *ar = arg;
272 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
274 if (ar_pci->num_msi_intrs == 0) {
275 if (!ath10k_pci_irq_pending(ar))
278 ath10k_pci_disable_and_clear_legacy_irq(ar);
281 tasklet_schedule(&ar_pci->early_irq_tasklet);
286 static int ath10k_pci_request_early_irq(struct ath10k *ar)
288 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
291 /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
292 * interrupt from irq vector is triggered in all cases for FW
293 * indication/errors */
294 ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
295 IRQF_SHARED, "ath10k_pci (early)", ar);
297 ath10k_warn("failed to request early irq: %d\n", ret);
304 static void ath10k_pci_free_early_irq(struct ath10k *ar)
306 free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
310 * Diagnostic read/write access is provided for startup/config/debug usage.
311 * Caller must guarantee proper alignment, when applicable, and single user
314 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
317 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
320 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
323 struct ath10k_ce_pipe *ce_diag;
324 /* Host buffer address in CE space */
326 dma_addr_t ce_data_base = 0;
327 void *data_buf = NULL;
331 * This code cannot handle reads to non-memory space. Redirect to the
332 * register read fn but preserve the multi word read capability of
335 if (address < DRAM_BASE_ADDRESS) {
336 if (!IS_ALIGNED(address, 4) ||
337 !IS_ALIGNED((unsigned long)data, 4))
340 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
341 ar, address, (u32 *)data)) == 0)) {
342 nbytes -= sizeof(u32);
343 address += sizeof(u32);
349 ce_diag = ar_pci->ce_diag;
352 * Allocate a temporary bounce buffer to hold caller's data
353 * to be DMA'ed from Target. This guarantees
354 * 1) 4-byte alignment
355 * 2) Buffer in DMA-able space
357 orig_nbytes = nbytes;
358 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
367 memset(data_buf, 0, orig_nbytes);
369 remaining_bytes = orig_nbytes;
370 ce_data = ce_data_base;
371 while (remaining_bytes) {
372 nbytes = min_t(unsigned int, remaining_bytes,
373 DIAG_TRANSFER_LIMIT);
375 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
379 /* Request CE to send from Target(!) address to Host buffer */
381 * The address supplied by the caller is in the
382 * Target CPU virtual address space.
384 * In order to use this address with the diagnostic CE,
385 * convert it from Target CPU virtual address space
386 * to CE address space
388 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
391 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
397 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
401 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
407 if (nbytes != completed_nbytes) {
412 if (buf != (u32) address) {
418 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
423 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
429 if (nbytes != completed_nbytes) {
434 if (buf != ce_data) {
439 remaining_bytes -= nbytes;
446 /* Copy data from allocated DMA buf to caller's buf */
447 WARN_ON_ONCE(orig_nbytes & 3);
448 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
450 __le32_to_cpu(((__le32 *)data_buf)[i]);
453 ath10k_warn("failed to read diag value at 0x%x: %d\n",
457 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
463 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
465 return ath10k_pci_diag_read_mem(ar, address, value, sizeof(u32));
468 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
474 host_addr = host_interest_item_address(src);
476 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
478 ath10k_warn("failed to get memcpy hi address for firmware address %d: %d\n",
483 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
485 ath10k_warn("failed to memcpy firmware memory from %d (%d B): %d\n",
493 #define ath10k_pci_diag_read_hi(ar, dest, src, len) \
494 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len);
496 /* Read 4-byte aligned data from Target memory or register */
497 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
500 /* Assume range doesn't cross this boundary */
501 if (address >= DRAM_BASE_ADDRESS)
502 return ath10k_pci_diag_read32(ar, address, data);
504 *data = ath10k_pci_read32(ar, address);
508 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
509 const void *data, int nbytes)
511 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
514 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
517 struct ath10k_ce_pipe *ce_diag;
518 void *data_buf = NULL;
519 u32 ce_data; /* Host buffer address in CE space */
520 dma_addr_t ce_data_base = 0;
523 ce_diag = ar_pci->ce_diag;
526 * Allocate a temporary bounce buffer to hold caller's data
527 * to be DMA'ed to Target. This guarantees
528 * 1) 4-byte alignment
529 * 2) Buffer in DMA-able space
531 orig_nbytes = nbytes;
532 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
541 /* Copy caller's data to allocated DMA buf */
542 WARN_ON_ONCE(orig_nbytes & 3);
543 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
544 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
547 * The address supplied by the caller is in the
548 * Target CPU virtual address space.
550 * In order to use this address with the diagnostic CE,
552 * Target CPU virtual address space
556 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
558 remaining_bytes = orig_nbytes;
559 ce_data = ce_data_base;
560 while (remaining_bytes) {
561 /* FIXME: check cast */
562 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
564 /* Set up to receive directly into Target(!) address */
565 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
570 * Request CE to send caller-supplied data that
571 * was copied to bounce buffer to Target(!) address.
573 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
579 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
584 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
590 if (nbytes != completed_nbytes) {
595 if (buf != ce_data) {
601 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
606 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
612 if (nbytes != completed_nbytes) {
617 if (buf != address) {
622 remaining_bytes -= nbytes;
629 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
634 ath10k_warn("failed to write diag value at 0x%x: %d\n",
640 /* Write 4B data to Target memory or register */
641 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
644 /* Assume range doesn't cross this boundary */
645 if (address >= DRAM_BASE_ADDRESS)
646 return ath10k_pci_diag_write_mem(ar, address, &data,
649 ath10k_pci_write32(ar, address, data);
653 static bool ath10k_pci_is_awake(struct ath10k *ar)
655 u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
657 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
660 static int ath10k_pci_wake_wait(struct ath10k *ar)
665 while (tot_delay < PCIE_WAKE_TIMEOUT) {
666 if (ath10k_pci_is_awake(ar))
670 tot_delay += curr_delay;
679 static int ath10k_pci_wake(struct ath10k *ar)
681 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
682 PCIE_SOC_WAKE_V_MASK);
683 return ath10k_pci_wake_wait(ar);
686 static void ath10k_pci_sleep(struct ath10k *ar)
688 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
689 PCIE_SOC_WAKE_RESET);
692 /* Called by lower (CE) layer when a send to Target completes. */
693 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
695 struct ath10k *ar = ce_state->ar;
696 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
697 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
698 void *transfer_context;
701 unsigned int transfer_id;
703 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
705 &transfer_id) == 0) {
706 /* no need to call tx completion for NULL pointers */
707 if (transfer_context == NULL)
710 cb->tx_completion(ar, transfer_context, transfer_id);
714 /* Called by lower (CE) layer when data is received from the Target. */
715 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
717 struct ath10k *ar = ce_state->ar;
718 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
719 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
720 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
722 void *transfer_context;
724 unsigned int nbytes, max_nbytes;
725 unsigned int transfer_id;
727 int err, num_replenish = 0;
729 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
730 &ce_data, &nbytes, &transfer_id,
733 skb = transfer_context;
734 max_nbytes = skb->len + skb_tailroom(skb);
735 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
736 max_nbytes, DMA_FROM_DEVICE);
738 if (unlikely(max_nbytes < nbytes)) {
739 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
741 dev_kfree_skb_any(skb);
745 skb_put(skb, nbytes);
746 cb->rx_completion(ar, skb, pipe_info->pipe_num);
749 err = ath10k_pci_post_rx_pipe(pipe_info, num_replenish);
752 ath10k_warn("failed to replenish CE rx ring %d (%d bufs): %d\n",
753 pipe_info->pipe_num, num_replenish, err);
757 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
758 struct ath10k_hif_sg_item *items, int n_items)
760 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
761 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
762 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
763 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
764 unsigned int nentries_mask;
765 unsigned int sw_index;
766 unsigned int write_index;
769 spin_lock_bh(&ar_pci->ce_lock);
771 nentries_mask = src_ring->nentries_mask;
772 sw_index = src_ring->sw_index;
773 write_index = src_ring->write_index;
775 if (unlikely(CE_RING_DELTA(nentries_mask,
776 write_index, sw_index - 1) < n_items)) {
781 for (i = 0; i < n_items - 1; i++) {
782 ath10k_dbg(ATH10K_DBG_PCI,
783 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
784 i, items[i].paddr, items[i].len, n_items);
785 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
786 items[i].vaddr, items[i].len);
788 err = ath10k_ce_send_nolock(ce_pipe,
789 items[i].transfer_context,
792 items[i].transfer_id,
793 CE_SEND_FLAG_GATHER);
798 /* `i` is equal to `n_items -1` after for() */
800 ath10k_dbg(ATH10K_DBG_PCI,
801 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
802 i, items[i].paddr, items[i].len, n_items);
803 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
804 items[i].vaddr, items[i].len);
806 err = ath10k_ce_send_nolock(ce_pipe,
807 items[i].transfer_context,
810 items[i].transfer_id,
815 spin_unlock_bh(&ar_pci->ce_lock);
820 __ath10k_ce_send_revert(ce_pipe);
822 spin_unlock_bh(&ar_pci->ce_lock);
826 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
828 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
830 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
832 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
835 static void ath10k_pci_dump_registers(struct ath10k *ar,
836 struct ath10k_fw_crash_data *crash_data)
838 u32 i, reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
841 lockdep_assert_held(&ar->data_lock);
843 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
845 REG_DUMP_COUNT_QCA988X * sizeof(u32));
847 ath10k_err("failed to read firmware dump area: %d\n", ret);
851 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
853 ath10k_err("target Register Dump\n");
854 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
855 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
858 reg_dump_values[i + 1],
859 reg_dump_values[i + 2],
860 reg_dump_values[i + 3]);
862 /* crash_data is in little endian */
863 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
864 crash_data->registers[i] = cpu_to_le32(reg_dump_values[i]);
867 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
869 struct ath10k_fw_crash_data *crash_data;
872 spin_lock_bh(&ar->data_lock);
874 crash_data = ath10k_debug_get_new_fw_crash_data(ar);
877 scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
879 scnprintf(uuid, sizeof(uuid), "n/a");
881 ath10k_err("firmware crashed! (uuid %s)\n", uuid);
882 ath10k_err("hardware name %s version 0x%x\n",
883 ar->hw_params.name, ar->target_version);
884 ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
889 ath10k_pci_dump_registers(ar, crash_data);
892 spin_unlock_bh(&ar->data_lock);
894 queue_work(ar->workqueue, &ar->restart_work);
897 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
900 ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
905 * Decide whether to actually poll for completions, or just
906 * wait for a later chance.
907 * If there seem to be plenty of resources left, then just wait
908 * since checking involves reading a CE register, which is a
909 * relatively expensive operation.
911 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
914 * If at least 50% of the total resources are still available,
915 * don't bother checking again yet.
917 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
920 ath10k_ce_per_engine_service(ar, pipe);
923 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
924 struct ath10k_hif_cb *callbacks)
926 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
928 ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
930 memcpy(&ar_pci->msg_callbacks_current, callbacks,
931 sizeof(ar_pci->msg_callbacks_current));
934 static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
936 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
937 const struct ce_attr *attr;
938 struct ath10k_pci_pipe *pipe_info;
939 int pipe_num, disable_interrupts;
941 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
942 pipe_info = &ar_pci->pipe_info[pipe_num];
944 /* Handle Diagnostic CE specially */
945 if (pipe_info->ce_hdl == ar_pci->ce_diag)
948 attr = &host_ce_config_wlan[pipe_num];
950 if (attr->src_nentries) {
951 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
952 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
953 ath10k_pci_ce_send_done,
957 if (attr->dest_nentries)
958 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
959 ath10k_pci_ce_recv_data);
965 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
967 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
970 tasklet_kill(&ar_pci->intr_tq);
971 tasklet_kill(&ar_pci->msi_fw_err);
972 tasklet_kill(&ar_pci->early_irq_tasklet);
974 for (i = 0; i < CE_COUNT; i++)
975 tasklet_kill(&ar_pci->pipe_info[i].intr);
978 /* TODO - temporary mapping while we have too few CE's */
979 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
980 u16 service_id, u8 *ul_pipe,
981 u8 *dl_pipe, int *ul_is_polled,
986 ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
988 /* polling for received messages not supported */
991 switch (service_id) {
992 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
994 * Host->target HTT gets its own pipe, so it can be polled
995 * while other pipes are interrupt driven.
999 * Use the same target->host pipe for HTC ctrl, HTC raw
1005 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1006 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1008 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1009 * HTC_CTRL_RSVD_SVC could share the same pipe as the
1010 * WMI services. So, if another CE is needed, change
1011 * this to *ul_pipe = 3, which frees up CE 0.
1018 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1019 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1020 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1021 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1023 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1029 /* pipe 6 reserved */
1030 /* pipe 7 reserved */
1037 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1042 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1043 u8 *ul_pipe, u8 *dl_pipe)
1045 int ul_is_polled, dl_is_polled;
1047 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
1049 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1050 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1057 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1060 struct ath10k *ar = pipe_info->hif_ce_state;
1061 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1062 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1063 struct sk_buff *skb;
1067 if (pipe_info->buf_sz == 0)
1070 for (i = 0; i < num; i++) {
1071 skb = dev_alloc_skb(pipe_info->buf_sz);
1073 ath10k_warn("failed to allocate skbuff for pipe %d\n",
1079 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1081 ce_data = dma_map_single(ar->dev, skb->data,
1082 skb->len + skb_tailroom(skb),
1085 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1086 ath10k_warn("failed to DMA map sk_buff\n");
1087 dev_kfree_skb_any(skb);
1092 ATH10K_SKB_CB(skb)->paddr = ce_data;
1094 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1096 PCI_DMA_FROMDEVICE);
1098 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1101 ath10k_warn("failed to enqueue to pipe %d: %d\n",
1110 ath10k_pci_rx_pipe_cleanup(pipe_info);
1114 static int ath10k_pci_post_rx(struct ath10k *ar)
1116 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1117 struct ath10k_pci_pipe *pipe_info;
1118 const struct ce_attr *attr;
1119 int pipe_num, ret = 0;
1121 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1122 pipe_info = &ar_pci->pipe_info[pipe_num];
1123 attr = &host_ce_config_wlan[pipe_num];
1125 if (attr->dest_nentries == 0)
1128 ret = ath10k_pci_post_rx_pipe(pipe_info,
1129 attr->dest_nentries - 1);
1131 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1134 for (; pipe_num >= 0; pipe_num--) {
1135 pipe_info = &ar_pci->pipe_info[pipe_num];
1136 ath10k_pci_rx_pipe_cleanup(pipe_info);
1145 static int ath10k_pci_hif_start(struct ath10k *ar)
1147 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1150 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
1152 ath10k_pci_free_early_irq(ar);
1153 ath10k_pci_kill_tasklet(ar);
1155 ret = ath10k_pci_request_irq(ar);
1157 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1162 ret = ath10k_pci_setup_ce_irq(ar);
1164 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
1168 /* Post buffers once to start things off. */
1169 ret = ath10k_pci_post_rx(ar);
1171 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1176 ar_pci->started = 1;
1180 ath10k_ce_disable_interrupts(ar);
1181 ath10k_pci_free_irq(ar);
1182 ath10k_pci_kill_tasklet(ar);
1184 /* Though there should be no interrupts (device was reset)
1185 * power_down() expects the early IRQ to be installed as per the
1186 * driver lifecycle. */
1187 ret_early = ath10k_pci_request_early_irq(ar);
1189 ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1194 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1197 struct ath10k_pci *ar_pci;
1198 struct ath10k_ce_pipe *ce_hdl;
1200 struct sk_buff *netbuf;
1203 buf_sz = pipe_info->buf_sz;
1205 /* Unused Copy Engine */
1209 ar = pipe_info->hif_ce_state;
1210 ar_pci = ath10k_pci_priv(ar);
1212 if (!ar_pci->started)
1215 ce_hdl = pipe_info->ce_hdl;
1217 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1219 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1220 netbuf->len + skb_tailroom(netbuf),
1222 dev_kfree_skb_any(netbuf);
1226 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1229 struct ath10k_pci *ar_pci;
1230 struct ath10k_ce_pipe *ce_hdl;
1231 struct sk_buff *netbuf;
1233 unsigned int nbytes;
1237 buf_sz = pipe_info->buf_sz;
1239 /* Unused Copy Engine */
1243 ar = pipe_info->hif_ce_state;
1244 ar_pci = ath10k_pci_priv(ar);
1246 if (!ar_pci->started)
1249 ce_hdl = pipe_info->ce_hdl;
1251 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1252 &ce_data, &nbytes, &id) == 0) {
1253 /* no need to call tx completion for NULL pointers */
1257 ar_pci->msg_callbacks_current.tx_completion(ar,
1264 * Cleanup residual buffers for device shutdown:
1265 * buffers that were enqueued for receive
1266 * buffers that were to be sent
1267 * Note: Buffers that had completed but which were
1268 * not yet processed are on a completion queue. They
1269 * are handled when the completion thread shuts down.
1271 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1273 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1276 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1277 struct ath10k_pci_pipe *pipe_info;
1279 pipe_info = &ar_pci->pipe_info[pipe_num];
1280 ath10k_pci_rx_pipe_cleanup(pipe_info);
1281 ath10k_pci_tx_pipe_cleanup(pipe_info);
1285 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1289 for (i = 0; i < CE_COUNT; i++)
1290 ath10k_ce_deinit_pipe(ar, i);
1293 static void ath10k_pci_hif_stop(struct ath10k *ar)
1295 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1298 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
1300 if (WARN_ON(!ar_pci->started))
1303 ret = ath10k_ce_disable_interrupts(ar);
1305 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
1307 ath10k_pci_free_irq(ar);
1308 ath10k_pci_kill_tasklet(ar);
1310 ret = ath10k_pci_request_early_irq(ar);
1312 ath10k_warn("failed to re-enable early irq: %d\n", ret);
1314 /* At this point, asynchronous threads are stopped, the target should
1315 * not DMA nor interrupt. We process the leftovers and then free
1316 * everything else up. */
1318 ath10k_pci_buffer_cleanup(ar);
1320 /* Make the sure the device won't access any structures on the host by
1321 * resetting it. The device was fed with PCI CE ringbuffer
1322 * configuration during init. If ringbuffers are freed and the device
1323 * were to access them this could lead to memory corruption on the
1325 ath10k_pci_warm_reset(ar);
1327 ar_pci->started = 0;
1330 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1331 void *req, u32 req_len,
1332 void *resp, u32 *resp_len)
1334 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1335 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1336 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1337 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1338 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1339 dma_addr_t req_paddr = 0;
1340 dma_addr_t resp_paddr = 0;
1341 struct bmi_xfer xfer = {};
1342 void *treq, *tresp = NULL;
1347 if (resp && !resp_len)
1350 if (resp && resp_len && *resp_len == 0)
1353 treq = kmemdup(req, req_len, GFP_KERNEL);
1357 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1358 ret = dma_mapping_error(ar->dev, req_paddr);
1362 if (resp && resp_len) {
1363 tresp = kzalloc(*resp_len, GFP_KERNEL);
1369 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1371 ret = dma_mapping_error(ar->dev, resp_paddr);
1375 xfer.wait_for_resp = true;
1378 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1381 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1385 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1388 unsigned int unused_nbytes;
1389 unsigned int unused_id;
1391 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1392 &unused_nbytes, &unused_id);
1394 /* non-zero means we did not time out */
1402 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1403 dma_unmap_single(ar->dev, resp_paddr,
1404 *resp_len, DMA_FROM_DEVICE);
1407 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1409 if (ret == 0 && resp_len) {
1410 *resp_len = min(*resp_len, xfer.resp_len);
1411 memcpy(resp, tresp, xfer.resp_len);
1420 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1422 struct bmi_xfer *xfer;
1424 unsigned int nbytes;
1425 unsigned int transfer_id;
1427 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1428 &nbytes, &transfer_id))
1431 xfer->tx_done = true;
1434 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1436 struct bmi_xfer *xfer;
1438 unsigned int nbytes;
1439 unsigned int transfer_id;
1442 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1443 &nbytes, &transfer_id, &flags))
1446 if (!xfer->wait_for_resp) {
1447 ath10k_warn("unexpected: BMI data received; ignoring\n");
1451 xfer->resp_len = nbytes;
1452 xfer->rx_done = true;
1455 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1456 struct ath10k_ce_pipe *rx_pipe,
1457 struct bmi_xfer *xfer)
1459 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1461 while (time_before_eq(jiffies, timeout)) {
1462 ath10k_pci_bmi_send_done(tx_pipe);
1463 ath10k_pci_bmi_recv_data(rx_pipe);
1465 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
1475 * Map from service/endpoint to Copy Engine.
1476 * This table is derived from the CE_PCI TABLE, above.
1477 * It is passed to the Target at startup for use by firmware.
1479 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1481 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1482 PIPEDIR_OUT, /* out = UL = host -> target */
1486 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1487 PIPEDIR_IN, /* in = DL = target -> host */
1491 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1492 PIPEDIR_OUT, /* out = UL = host -> target */
1496 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1497 PIPEDIR_IN, /* in = DL = target -> host */
1501 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1502 PIPEDIR_OUT, /* out = UL = host -> target */
1506 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1507 PIPEDIR_IN, /* in = DL = target -> host */
1511 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1512 PIPEDIR_OUT, /* out = UL = host -> target */
1516 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1517 PIPEDIR_IN, /* in = DL = target -> host */
1521 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1522 PIPEDIR_OUT, /* out = UL = host -> target */
1526 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1527 PIPEDIR_IN, /* in = DL = target -> host */
1531 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1532 PIPEDIR_OUT, /* out = UL = host -> target */
1533 0, /* could be moved to 3 (share with WMI) */
1536 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1537 PIPEDIR_IN, /* in = DL = target -> host */
1541 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1542 PIPEDIR_OUT, /* out = UL = host -> target */
1546 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1547 PIPEDIR_IN, /* in = DL = target -> host */
1551 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1552 PIPEDIR_OUT, /* out = UL = host -> target */
1556 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1557 PIPEDIR_IN, /* in = DL = target -> host */
1561 /* (Additions here) */
1563 { /* Must be last */
1571 * Send an interrupt to the device to wake up the Target CPU
1572 * so it has an opportunity to notice any changed state.
1574 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1579 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1583 ath10k_warn("failed to read core_ctrl: %d\n", ret);
1587 /* A_INUM_FIRMWARE interrupt to Target CPU */
1588 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1590 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1594 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1602 static int ath10k_pci_init_config(struct ath10k *ar)
1604 u32 interconnect_targ_addr;
1605 u32 pcie_state_targ_addr = 0;
1606 u32 pipe_cfg_targ_addr = 0;
1607 u32 svc_to_pipe_map = 0;
1608 u32 pcie_config_flags = 0;
1610 u32 ealloc_targ_addr;
1612 u32 flag2_targ_addr;
1615 /* Download to Target the CE Config and the service-to-CE map */
1616 interconnect_targ_addr =
1617 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1619 /* Supply Target-side CE configuration */
1620 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1621 &pcie_state_targ_addr);
1623 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1627 if (pcie_state_targ_addr == 0) {
1629 ath10k_err("Invalid pcie state addr\n");
1633 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1634 offsetof(struct pcie_state,
1636 &pipe_cfg_targ_addr);
1638 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1642 if (pipe_cfg_targ_addr == 0) {
1644 ath10k_err("Invalid pipe cfg addr\n");
1648 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1649 target_ce_config_wlan,
1650 sizeof(target_ce_config_wlan));
1653 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1657 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1658 offsetof(struct pcie_state,
1662 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1666 if (svc_to_pipe_map == 0) {
1668 ath10k_err("Invalid svc_to_pipe map\n");
1672 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1673 target_service_to_ce_map_wlan,
1674 sizeof(target_service_to_ce_map_wlan));
1676 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1680 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1681 offsetof(struct pcie_state,
1683 &pcie_config_flags);
1685 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1689 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1691 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1692 offsetof(struct pcie_state, config_flags),
1694 sizeof(pcie_config_flags));
1696 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1700 /* configure early allocation */
1701 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1703 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1705 ath10k_err("Faile to get early alloc val: %d\n", ret);
1709 /* first bank is switched to IRAM */
1710 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1711 HI_EARLY_ALLOC_MAGIC_MASK);
1712 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1713 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1715 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1717 ath10k_err("Failed to set early alloc val: %d\n", ret);
1721 /* Tell Target to proceed with initialization */
1722 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1724 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1726 ath10k_err("Failed to get option val: %d\n", ret);
1730 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1732 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1734 ath10k_err("Failed to set option val: %d\n", ret);
1741 static int ath10k_pci_alloc_ce(struct ath10k *ar)
1745 for (i = 0; i < CE_COUNT; i++) {
1746 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1748 ath10k_err("failed to allocate copy engine pipe %d: %d\n",
1757 static void ath10k_pci_free_ce(struct ath10k *ar)
1761 for (i = 0; i < CE_COUNT; i++)
1762 ath10k_ce_free_pipe(ar, i);
1765 static int ath10k_pci_ce_init(struct ath10k *ar)
1767 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1768 struct ath10k_pci_pipe *pipe_info;
1769 const struct ce_attr *attr;
1772 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1773 pipe_info = &ar_pci->pipe_info[pipe_num];
1774 pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
1775 pipe_info->pipe_num = pipe_num;
1776 pipe_info->hif_ce_state = ar;
1777 attr = &host_ce_config_wlan[pipe_num];
1779 ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
1781 ath10k_err("failed to initialize copy engine pipe %d: %d\n",
1786 if (pipe_num == CE_COUNT - 1) {
1788 * Reserve the ultimate CE for
1789 * diagnostic Window support
1791 ar_pci->ce_diag = pipe_info->ce_hdl;
1795 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1801 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1803 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1806 fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1808 if (fw_indicator & FW_IND_EVENT_PENDING) {
1809 /* ACK: clear Target-side pending event */
1810 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
1811 fw_indicator & ~FW_IND_EVENT_PENDING);
1813 if (ar_pci->started) {
1814 ath10k_pci_hif_dump_area(ar);
1817 * Probable Target failure before we're prepared
1818 * to handle it. Generally unexpected.
1820 ath10k_warn("early firmware event indicated\n");
1825 /* this function effectively clears target memory controller assert line */
1826 static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1830 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1831 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1832 val | SOC_RESET_CONTROL_SI0_RST_MASK);
1833 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1837 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1838 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1839 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1840 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1845 static int ath10k_pci_warm_reset(struct ath10k *ar)
1849 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
1852 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1853 PCIE_INTR_CAUSE_ADDRESS);
1854 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1856 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1858 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1861 /* disable pending irqs */
1862 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1863 PCIE_INTR_ENABLE_ADDRESS, 0);
1865 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1866 PCIE_INTR_CLR_ADDRESS, ~0);
1870 /* clear fw indicator */
1871 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1873 /* clear target LF timer interrupts */
1874 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1875 SOC_LF_TIMER_CONTROL0_ADDRESS);
1876 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1877 SOC_LF_TIMER_CONTROL0_ADDRESS,
1878 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1881 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1882 SOC_RESET_CONTROL_ADDRESS);
1883 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1884 val | SOC_RESET_CONTROL_CE_RST_MASK);
1885 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1886 SOC_RESET_CONTROL_ADDRESS);
1890 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1891 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1892 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1893 SOC_RESET_CONTROL_ADDRESS);
1896 ath10k_pci_warm_reset_si0(ar);
1899 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1900 PCIE_INTR_CAUSE_ADDRESS);
1901 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1903 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1905 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1908 /* CPU warm reset */
1909 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1910 SOC_RESET_CONTROL_ADDRESS);
1911 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1912 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1914 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1915 SOC_RESET_CONTROL_ADDRESS);
1916 ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
1920 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
1925 static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1927 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1928 const char *irq_mode;
1932 * Bring the target up cleanly.
1934 * The target may be in an undefined state with an AUX-powered Target
1935 * and a Host in WoW mode. If the Host crashes, loses power, or is
1936 * restarted (without unloading the driver) then the Target is left
1937 * (aux) powered and running. On a subsequent driver load, the Target
1938 * is in an unexpected state. We try to catch that here in order to
1939 * reset the Target and retry the probe.
1942 ret = ath10k_pci_cold_reset(ar);
1944 ret = ath10k_pci_warm_reset(ar);
1947 ath10k_err("failed to reset target: %d\n", ret);
1951 ret = ath10k_pci_ce_init(ar);
1953 ath10k_err("failed to initialize CE: %d\n", ret);
1957 ret = ath10k_ce_disable_interrupts(ar);
1959 ath10k_err("failed to disable CE interrupts: %d\n", ret);
1963 ret = ath10k_pci_init_irq(ar);
1965 ath10k_err("failed to init irqs: %d\n", ret);
1969 ret = ath10k_pci_request_early_irq(ar);
1971 ath10k_err("failed to request early irq: %d\n", ret);
1972 goto err_deinit_irq;
1975 ret = ath10k_pci_wait_for_target_init(ar);
1977 ath10k_err("failed to wait for target to init: %d\n", ret);
1978 goto err_free_early_irq;
1981 ret = ath10k_pci_init_config(ar);
1983 ath10k_err("failed to setup init config: %d\n", ret);
1984 goto err_free_early_irq;
1987 ret = ath10k_pci_wake_target_cpu(ar);
1989 ath10k_err("could not wake up target CPU: %d\n", ret);
1990 goto err_free_early_irq;
1993 if (ar_pci->num_msi_intrs > 1)
1995 else if (ar_pci->num_msi_intrs == 1)
1998 irq_mode = "legacy";
2000 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2001 ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
2002 irq_mode, ath10k_pci_irq_mode,
2003 ath10k_pci_reset_mode);
2008 ath10k_pci_free_early_irq(ar);
2010 ath10k_pci_deinit_irq(ar);
2012 ath10k_pci_ce_deinit(ar);
2013 ath10k_pci_warm_reset(ar);
2018 static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
2023 * Sometime warm reset succeeds after retries.
2025 * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
2028 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2029 ret = __ath10k_pci_hif_power_up(ar, false);
2033 ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n",
2034 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
2040 static int ath10k_pci_hif_power_up(struct ath10k *ar)
2044 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
2047 * Hardware CUS232 version 2 has some issues with cold reset and the
2048 * preferred (and safer) way to perform a device reset is through a
2051 * Warm reset doesn't always work though so fall back to cold reset may
2054 ret = ath10k_pci_hif_power_up_warm(ar);
2056 ath10k_warn("failed to power up target using warm reset: %d\n",
2059 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
2062 ath10k_warn("trying cold reset\n");
2064 ret = __ath10k_pci_hif_power_up(ar, true);
2066 ath10k_err("failed to power up target using cold reset too (%d)\n",
2075 static void ath10k_pci_hif_power_down(struct ath10k *ar)
2077 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
2079 ath10k_pci_free_early_irq(ar);
2080 ath10k_pci_kill_tasklet(ar);
2081 ath10k_pci_deinit_irq(ar);
2082 ath10k_pci_ce_deinit(ar);
2083 ath10k_pci_warm_reset(ar);
2088 #define ATH10K_PCI_PM_CONTROL 0x44
2090 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2092 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2093 struct pci_dev *pdev = ar_pci->pdev;
2096 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2098 if ((val & 0x000000ff) != 0x3) {
2099 pci_save_state(pdev);
2100 pci_disable_device(pdev);
2101 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2102 (val & 0xffffff00) | 0x03);
2108 static int ath10k_pci_hif_resume(struct ath10k *ar)
2110 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2111 struct pci_dev *pdev = ar_pci->pdev;
2114 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2116 if ((val & 0x000000ff) != 0) {
2117 pci_restore_state(pdev);
2118 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2121 * Suspend/Resume resets the PCI configuration space,
2122 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2123 * to keep PCI Tx retries from interfering with C3 CPU state
2125 pci_read_config_dword(pdev, 0x40, &val);
2127 if ((val & 0x0000ff00) != 0)
2128 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2135 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2136 .tx_sg = ath10k_pci_hif_tx_sg,
2137 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2138 .start = ath10k_pci_hif_start,
2139 .stop = ath10k_pci_hif_stop,
2140 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2141 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2142 .send_complete_check = ath10k_pci_hif_send_complete_check,
2143 .set_callbacks = ath10k_pci_hif_set_callbacks,
2144 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
2145 .power_up = ath10k_pci_hif_power_up,
2146 .power_down = ath10k_pci_hif_power_down,
2148 .suspend = ath10k_pci_hif_suspend,
2149 .resume = ath10k_pci_hif_resume,
2153 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2155 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2156 struct ath10k_pci *ar_pci = pipe->ar_pci;
2158 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2161 static void ath10k_msi_err_tasklet(unsigned long data)
2163 struct ath10k *ar = (struct ath10k *)data;
2165 ath10k_pci_fw_interrupt_handler(ar);
2169 * Handler for a per-engine interrupt on a PARTICULAR CE.
2170 * This is used in cases where each CE has a private MSI interrupt.
2172 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2174 struct ath10k *ar = arg;
2175 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2176 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2178 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2179 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2184 * NOTE: We are able to derive ce_id from irq because we
2185 * use a one-to-one mapping for CE's 0..5.
2186 * CE's 6 & 7 do not use interrupts at all.
2188 * This mapping must be kept in sync with the mapping
2191 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2195 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2197 struct ath10k *ar = arg;
2198 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2200 tasklet_schedule(&ar_pci->msi_fw_err);
2205 * Top-level interrupt handler for all PCI interrupts from a Target.
2206 * When a block of MSI interrupts is allocated, this top-level handler
2207 * is not used; instead, we directly call the correct sub-handler.
2209 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2211 struct ath10k *ar = arg;
2212 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2214 if (ar_pci->num_msi_intrs == 0) {
2215 if (!ath10k_pci_irq_pending(ar))
2218 ath10k_pci_disable_and_clear_legacy_irq(ar);
2221 tasklet_schedule(&ar_pci->intr_tq);
2226 static void ath10k_pci_early_irq_tasklet(unsigned long data)
2228 struct ath10k *ar = (struct ath10k *)data;
2231 fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2232 if (fw_ind & FW_IND_EVENT_PENDING) {
2233 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
2234 fw_ind & ~FW_IND_EVENT_PENDING);
2235 ath10k_pci_hif_dump_area(ar);
2238 ath10k_pci_enable_legacy_irq(ar);
2241 static void ath10k_pci_tasklet(unsigned long data)
2243 struct ath10k *ar = (struct ath10k *)data;
2244 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2246 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2247 ath10k_ce_per_engine_service_any(ar);
2249 /* Re-enable legacy irq that was disabled in the irq handler */
2250 if (ar_pci->num_msi_intrs == 0)
2251 ath10k_pci_enable_legacy_irq(ar);
2254 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2256 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2259 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2260 ath10k_pci_msi_fw_handler,
2261 IRQF_SHARED, "ath10k_pci", ar);
2263 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2264 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2268 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2269 ret = request_irq(ar_pci->pdev->irq + i,
2270 ath10k_pci_per_engine_handler,
2271 IRQF_SHARED, "ath10k_pci", ar);
2273 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2274 ar_pci->pdev->irq + i, ret);
2276 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2277 free_irq(ar_pci->pdev->irq + i, ar);
2279 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2287 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2289 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2292 ret = request_irq(ar_pci->pdev->irq,
2293 ath10k_pci_interrupt_handler,
2294 IRQF_SHARED, "ath10k_pci", ar);
2296 ath10k_warn("failed to request MSI irq %d: %d\n",
2297 ar_pci->pdev->irq, ret);
2304 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2306 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2309 ret = request_irq(ar_pci->pdev->irq,
2310 ath10k_pci_interrupt_handler,
2311 IRQF_SHARED, "ath10k_pci", ar);
2313 ath10k_warn("failed to request legacy irq %d: %d\n",
2314 ar_pci->pdev->irq, ret);
2321 static int ath10k_pci_request_irq(struct ath10k *ar)
2323 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2325 switch (ar_pci->num_msi_intrs) {
2327 return ath10k_pci_request_irq_legacy(ar);
2329 return ath10k_pci_request_irq_msi(ar);
2330 case MSI_NUM_REQUEST:
2331 return ath10k_pci_request_irq_msix(ar);
2334 ath10k_warn("unknown irq configuration upon request\n");
2338 static void ath10k_pci_free_irq(struct ath10k *ar)
2340 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2343 /* There's at least one interrupt irregardless whether its legacy INTR
2344 * or MSI or MSI-X */
2345 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2346 free_irq(ar_pci->pdev->irq + i, ar);
2349 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2351 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2354 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2355 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2357 tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2360 for (i = 0; i < CE_COUNT; i++) {
2361 ar_pci->pipe_info[i].ar_pci = ar_pci;
2362 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2363 (unsigned long)&ar_pci->pipe_info[i]);
2367 static int ath10k_pci_init_irq(struct ath10k *ar)
2369 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2372 ath10k_pci_init_irq_tasklets(ar);
2374 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2375 !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2376 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
2379 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
2380 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2381 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2382 ar_pci->num_msi_intrs);
2390 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2391 ar_pci->num_msi_intrs = 1;
2392 ret = pci_enable_msi(ar_pci->pdev);
2401 * A potential race occurs here: The CORE_BASE write
2402 * depends on target correctly decoding AXI address but
2403 * host won't know when target writes BAR to CORE_CTRL.
2404 * This write might get lost if target has NOT written BAR.
2405 * For now, fix the race by repeating the write in below
2406 * synchronization checking. */
2407 ar_pci->num_msi_intrs = 0;
2409 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2410 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2415 static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2417 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2421 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2423 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2425 switch (ar_pci->num_msi_intrs) {
2427 ath10k_pci_deinit_irq_legacy(ar);
2431 case MSI_NUM_REQUEST:
2432 pci_disable_msi(ar_pci->pdev);
2435 pci_disable_msi(ar_pci->pdev);
2438 ath10k_warn("unknown irq configuration upon deinit\n");
2442 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2444 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2445 unsigned long timeout;
2448 ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2450 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2453 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2455 ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
2457 /* target should never return this */
2458 if (val == 0xffffffff)
2461 /* the device has crashed so don't bother trying anymore */
2462 if (val & FW_IND_EVENT_PENDING)
2465 if (val & FW_IND_INITIALIZED)
2468 if (ar_pci->num_msi_intrs == 0)
2469 /* Fix potential race by repeating CORE_BASE writes */
2470 ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
2471 PCIE_INTR_FIRMWARE_MASK |
2472 PCIE_INTR_CE_MASK_ALL);
2475 } while (time_before(jiffies, timeout));
2477 if (val == 0xffffffff) {
2478 ath10k_err("failed to read device register, device is gone\n");
2482 if (val & FW_IND_EVENT_PENDING) {
2483 ath10k_warn("device has crashed during init\n");
2484 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
2485 val & ~FW_IND_EVENT_PENDING);
2486 ath10k_pci_hif_dump_area(ar);
2490 if (!(val & FW_IND_INITIALIZED)) {
2491 ath10k_err("failed to receive initialized event from target: %08x\n",
2496 ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
2500 static int ath10k_pci_cold_reset(struct ath10k *ar)
2505 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
2507 /* Put Target, including PCIe, into RESET. */
2508 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2510 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2512 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2513 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2514 RTC_STATE_COLD_RESET_MASK)
2519 /* Pull Target, including PCIe, out of RESET. */
2521 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2523 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2524 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2525 RTC_STATE_COLD_RESET_MASK))
2530 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
2535 static int ath10k_pci_claim(struct ath10k *ar)
2537 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2538 struct pci_dev *pdev = ar_pci->pdev;
2542 pci_set_drvdata(pdev, ar);
2544 ret = pci_enable_device(pdev);
2546 ath10k_err("failed to enable pci device: %d\n", ret);
2550 ret = pci_request_region(pdev, BAR_NUM, "ath");
2552 ath10k_err("failed to request region BAR%d: %d\n", BAR_NUM,
2557 /* Target expects 32 bit DMA. Enforce it. */
2558 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2560 ath10k_err("failed to set dma mask to 32-bit: %d\n", ret);
2564 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2566 ath10k_err("failed to set consistent dma mask to 32-bit: %d\n",
2571 pci_set_master(pdev);
2573 /* Workaround: Disable ASPM */
2574 pci_read_config_dword(pdev, 0x80, &lcr_val);
2575 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2577 /* Arrange for access to Target SoC registers. */
2578 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2580 ath10k_err("failed to iomap BAR%d\n", BAR_NUM);
2585 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2589 pci_clear_master(pdev);
2592 pci_release_region(pdev, BAR_NUM);
2595 pci_disable_device(pdev);
2600 static void ath10k_pci_release(struct ath10k *ar)
2602 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2603 struct pci_dev *pdev = ar_pci->pdev;
2605 pci_iounmap(pdev, ar_pci->mem);
2606 pci_release_region(pdev, BAR_NUM);
2607 pci_clear_master(pdev);
2608 pci_disable_device(pdev);
2611 static int ath10k_pci_probe(struct pci_dev *pdev,
2612 const struct pci_device_id *pci_dev)
2616 struct ath10k_pci *ar_pci;
2619 ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
2621 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
2622 &ath10k_pci_hif_ops);
2624 ath10k_err("failed to allocate core\n");
2628 ar_pci = ath10k_pci_priv(ar);
2629 ar_pci->pdev = pdev;
2630 ar_pci->dev = &pdev->dev;
2633 spin_lock_init(&ar_pci->ce_lock);
2635 ret = ath10k_pci_claim(ar);
2637 ath10k_err("failed to claim device: %d\n", ret);
2638 goto err_core_destroy;
2641 ret = ath10k_pci_wake(ar);
2643 ath10k_err("failed to wake up: %d\n", ret);
2647 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2648 if (chip_id == 0xffffffff) {
2649 ath10k_err("failed to get chip id\n");
2653 ret = ath10k_pci_alloc_ce(ar);
2655 ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
2659 ret = ath10k_core_register(ar, chip_id);
2661 ath10k_err("failed to register driver core: %d\n", ret);
2668 ath10k_pci_free_ce(ar);
2671 ath10k_pci_sleep(ar);
2674 ath10k_pci_release(ar);
2677 ath10k_core_destroy(ar);
2682 static void ath10k_pci_remove(struct pci_dev *pdev)
2684 struct ath10k *ar = pci_get_drvdata(pdev);
2685 struct ath10k_pci *ar_pci;
2687 ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
2692 ar_pci = ath10k_pci_priv(ar);
2697 ath10k_core_unregister(ar);
2698 ath10k_pci_free_ce(ar);
2699 ath10k_pci_sleep(ar);
2700 ath10k_pci_release(ar);
2701 ath10k_core_destroy(ar);
2704 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2706 static struct pci_driver ath10k_pci_driver = {
2707 .name = "ath10k_pci",
2708 .id_table = ath10k_pci_id_table,
2709 .probe = ath10k_pci_probe,
2710 .remove = ath10k_pci_remove,
2713 static int __init ath10k_pci_init(void)
2717 ret = pci_register_driver(&ath10k_pci_driver);
2719 ath10k_err("failed to register PCI driver: %d\n", ret);
2723 module_init(ath10k_pci_init);
2725 static void __exit ath10k_pci_exit(void)
2727 pci_unregister_driver(&ath10k_pci_driver);
2730 module_exit(ath10k_pci_exit);
2732 MODULE_AUTHOR("Qualcomm Atheros");
2733 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2734 MODULE_LICENSE("Dual BSD/GPL");
2735 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_3_FILE);
2736 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);