2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
27 #include "targaddrs.h"
36 enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
42 enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
47 static unsigned int ath10k_pci_target_ps;
48 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
49 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
51 module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
52 MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
54 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
55 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
57 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
58 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
60 /* how long wait to wait for target to initialise, in ms */
61 #define ATH10K_PCI_TARGET_WAIT 3000
62 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
64 #define QCA988X_2_0_DEVICE_ID (0x003c)
66 static const struct pci_device_id ath10k_pci_id_table[] = {
67 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
71 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
74 static int ath10k_pci_post_rx(struct ath10k *ar);
75 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
77 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
78 static int ath10k_pci_cold_reset(struct ath10k *ar);
79 static int ath10k_pci_warm_reset(struct ath10k *ar);
80 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
81 static int ath10k_pci_init_irq(struct ath10k *ar);
82 static int ath10k_pci_deinit_irq(struct ath10k *ar);
83 static int ath10k_pci_request_irq(struct ath10k *ar);
84 static void ath10k_pci_free_irq(struct ath10k *ar);
85 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
86 struct ath10k_ce_pipe *rx_pipe,
87 struct bmi_xfer *xfer);
89 static const struct ce_attr host_ce_config_wlan[] = {
90 /* CE0: host->target HTC control and raw streams */
92 .flags = CE_ATTR_FLAGS,
98 /* CE1: target->host HTT + HTC control */
100 .flags = CE_ATTR_FLAGS,
103 .dest_nentries = 512,
106 /* CE2: target->host WMI */
108 .flags = CE_ATTR_FLAGS,
114 /* CE3: host->target WMI */
116 .flags = CE_ATTR_FLAGS,
122 /* CE4: host->target HTT */
124 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
125 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
132 .flags = CE_ATTR_FLAGS,
138 /* CE6: target autonomous hif_memcpy */
140 .flags = CE_ATTR_FLAGS,
146 /* CE7: ce_diag, the Diagnostic Window */
148 .flags = CE_ATTR_FLAGS,
150 .src_sz_max = DIAG_TRANSFER_LIMIT,
155 /* Target firmware's Copy Engine configuration. */
156 static const struct ce_pipe_config target_ce_config_wlan[] = {
157 /* CE0: host->target HTC control and raw streams */
160 .pipedir = PIPEDIR_OUT,
163 .flags = CE_ATTR_FLAGS,
167 /* CE1: target->host HTT + HTC control */
170 .pipedir = PIPEDIR_IN,
173 .flags = CE_ATTR_FLAGS,
177 /* CE2: target->host WMI */
180 .pipedir = PIPEDIR_IN,
183 .flags = CE_ATTR_FLAGS,
187 /* CE3: host->target WMI */
190 .pipedir = PIPEDIR_OUT,
193 .flags = CE_ATTR_FLAGS,
197 /* CE4: host->target HTT */
200 .pipedir = PIPEDIR_OUT,
203 .flags = CE_ATTR_FLAGS,
207 /* NB: 50% of src nentries, since tx has 2 frags */
212 .pipedir = PIPEDIR_OUT,
215 .flags = CE_ATTR_FLAGS,
219 /* CE6: Reserved for target autonomous hif_memcpy */
222 .pipedir = PIPEDIR_INOUT,
225 .flags = CE_ATTR_FLAGS,
229 /* CE7 used only by Host */
232 static bool ath10k_pci_irq_pending(struct ath10k *ar)
236 /* Check if the shared legacy irq is for us */
237 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
238 PCIE_INTR_CAUSE_ADDRESS);
239 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
245 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
247 /* IMPORTANT: INTR_CLR register has to be set after
248 * INTR_ENABLE is set to 0, otherwise interrupt can not be
250 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
252 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
253 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
255 /* IMPORTANT: this extra read transaction is required to
256 * flush the posted write buffer. */
257 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
258 PCIE_INTR_ENABLE_ADDRESS);
261 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
263 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
264 PCIE_INTR_ENABLE_ADDRESS,
265 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
267 /* IMPORTANT: this extra read transaction is required to
268 * flush the posted write buffer. */
269 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
270 PCIE_INTR_ENABLE_ADDRESS);
273 static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
275 struct ath10k *ar = arg;
276 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
278 if (ar_pci->num_msi_intrs == 0) {
279 if (!ath10k_pci_irq_pending(ar))
282 ath10k_pci_disable_and_clear_legacy_irq(ar);
285 tasklet_schedule(&ar_pci->early_irq_tasklet);
290 static int ath10k_pci_request_early_irq(struct ath10k *ar)
292 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
295 /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
296 * interrupt from irq vector is triggered in all cases for FW
297 * indication/errors */
298 ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
299 IRQF_SHARED, "ath10k_pci (early)", ar);
301 ath10k_warn("failed to request early irq: %d\n", ret);
308 static void ath10k_pci_free_early_irq(struct ath10k *ar)
310 free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
314 * Diagnostic read/write access is provided for startup/config/debug usage.
315 * Caller must guarantee proper alignment, when applicable, and single user
318 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
321 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
324 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
327 struct ath10k_ce_pipe *ce_diag;
328 /* Host buffer address in CE space */
330 dma_addr_t ce_data_base = 0;
331 void *data_buf = NULL;
335 * This code cannot handle reads to non-memory space. Redirect to the
336 * register read fn but preserve the multi word read capability of
339 if (address < DRAM_BASE_ADDRESS) {
340 if (!IS_ALIGNED(address, 4) ||
341 !IS_ALIGNED((unsigned long)data, 4))
344 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
345 ar, address, (u32 *)data)) == 0)) {
346 nbytes -= sizeof(u32);
347 address += sizeof(u32);
353 ce_diag = ar_pci->ce_diag;
356 * Allocate a temporary bounce buffer to hold caller's data
357 * to be DMA'ed from Target. This guarantees
358 * 1) 4-byte alignment
359 * 2) Buffer in DMA-able space
361 orig_nbytes = nbytes;
362 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
371 memset(data_buf, 0, orig_nbytes);
373 remaining_bytes = orig_nbytes;
374 ce_data = ce_data_base;
375 while (remaining_bytes) {
376 nbytes = min_t(unsigned int, remaining_bytes,
377 DIAG_TRANSFER_LIMIT);
379 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
383 /* Request CE to send from Target(!) address to Host buffer */
385 * The address supplied by the caller is in the
386 * Target CPU virtual address space.
388 * In order to use this address with the diagnostic CE,
389 * convert it from Target CPU virtual address space
390 * to CE address space
393 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
395 ath10k_pci_sleep(ar);
397 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
403 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
407 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
413 if (nbytes != completed_nbytes) {
418 if (buf != (u32) address) {
424 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
429 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
435 if (nbytes != completed_nbytes) {
440 if (buf != ce_data) {
445 remaining_bytes -= nbytes;
452 /* Copy data from allocated DMA buf to caller's buf */
453 WARN_ON_ONCE(orig_nbytes & 3);
454 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
456 __le32_to_cpu(((__le32 *)data_buf)[i]);
459 ath10k_warn("failed to read diag value at 0x%x: %d\n",
463 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
469 /* Read 4-byte aligned data from Target memory or register */
470 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
473 /* Assume range doesn't cross this boundary */
474 if (address >= DRAM_BASE_ADDRESS)
475 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
478 *data = ath10k_pci_read32(ar, address);
479 ath10k_pci_sleep(ar);
483 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
484 const void *data, int nbytes)
486 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
489 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
492 struct ath10k_ce_pipe *ce_diag;
493 void *data_buf = NULL;
494 u32 ce_data; /* Host buffer address in CE space */
495 dma_addr_t ce_data_base = 0;
498 ce_diag = ar_pci->ce_diag;
501 * Allocate a temporary bounce buffer to hold caller's data
502 * to be DMA'ed to Target. This guarantees
503 * 1) 4-byte alignment
504 * 2) Buffer in DMA-able space
506 orig_nbytes = nbytes;
507 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
516 /* Copy caller's data to allocated DMA buf */
517 WARN_ON_ONCE(orig_nbytes & 3);
518 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
519 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
522 * The address supplied by the caller is in the
523 * Target CPU virtual address space.
525 * In order to use this address with the diagnostic CE,
527 * Target CPU virtual address space
532 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
533 ath10k_pci_sleep(ar);
535 remaining_bytes = orig_nbytes;
536 ce_data = ce_data_base;
537 while (remaining_bytes) {
538 /* FIXME: check cast */
539 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
541 /* Set up to receive directly into Target(!) address */
542 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
547 * Request CE to send caller-supplied data that
548 * was copied to bounce buffer to Target(!) address.
550 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
556 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
561 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
567 if (nbytes != completed_nbytes) {
572 if (buf != ce_data) {
578 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
583 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
589 if (nbytes != completed_nbytes) {
594 if (buf != address) {
599 remaining_bytes -= nbytes;
606 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
611 ath10k_warn("failed to write diag value at 0x%x: %d\n",
617 /* Write 4B data to Target memory or register */
618 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
621 /* Assume range doesn't cross this boundary */
622 if (address >= DRAM_BASE_ADDRESS)
623 return ath10k_pci_diag_write_mem(ar, address, &data,
627 ath10k_pci_write32(ar, address, data);
628 ath10k_pci_sleep(ar);
632 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
634 void __iomem *mem = ath10k_pci_priv(ar)->mem;
636 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
638 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
641 int ath10k_do_pci_wake(struct ath10k *ar)
643 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
644 void __iomem *pci_addr = ar_pci->mem;
648 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
650 iowrite32(PCIE_SOC_WAKE_V_MASK,
651 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
652 PCIE_SOC_WAKE_ADDRESS);
654 atomic_inc(&ar_pci->keep_awake_count);
656 if (ar_pci->verified_awake)
660 if (ath10k_pci_target_is_awake(ar)) {
661 ar_pci->verified_awake = true;
665 if (tot_delay > PCIE_WAKE_TIMEOUT) {
666 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
668 atomic_read(&ar_pci->keep_awake_count));
673 tot_delay += curr_delay;
680 void ath10k_do_pci_sleep(struct ath10k *ar)
682 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
683 void __iomem *pci_addr = ar_pci->mem;
685 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
687 ar_pci->verified_awake = false;
688 iowrite32(PCIE_SOC_WAKE_RESET,
689 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
690 PCIE_SOC_WAKE_ADDRESS);
694 /* Called by lower (CE) layer when a send to Target completes. */
695 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
697 struct ath10k *ar = ce_state->ar;
698 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
699 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
700 void *transfer_context;
703 unsigned int transfer_id;
705 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
707 &transfer_id) == 0) {
708 /* no need to call tx completion for NULL pointers */
709 if (transfer_context == NULL)
712 cb->tx_completion(ar, transfer_context, transfer_id);
716 /* Called by lower (CE) layer when data is received from the Target. */
717 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
719 struct ath10k *ar = ce_state->ar;
720 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
721 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
722 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
724 void *transfer_context;
726 unsigned int nbytes, max_nbytes;
727 unsigned int transfer_id;
729 int err, num_replenish = 0;
731 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
732 &ce_data, &nbytes, &transfer_id,
735 skb = transfer_context;
736 max_nbytes = skb->len + skb_tailroom(skb);
737 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
738 max_nbytes, DMA_FROM_DEVICE);
740 if (unlikely(max_nbytes < nbytes)) {
741 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
743 dev_kfree_skb_any(skb);
747 skb_put(skb, nbytes);
748 cb->rx_completion(ar, skb, pipe_info->pipe_num);
751 err = ath10k_pci_post_rx_pipe(pipe_info, num_replenish);
754 ath10k_warn("failed to replenish CE rx ring %d (%d bufs): %d\n",
755 pipe_info->pipe_num, num_replenish, err);
759 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
760 struct ath10k_hif_sg_item *items, int n_items)
762 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
763 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
764 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
765 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
766 unsigned int nentries_mask;
767 unsigned int sw_index;
768 unsigned int write_index;
771 spin_lock_bh(&ar_pci->ce_lock);
773 nentries_mask = src_ring->nentries_mask;
774 sw_index = src_ring->sw_index;
775 write_index = src_ring->write_index;
777 if (unlikely(CE_RING_DELTA(nentries_mask,
778 write_index, sw_index - 1) < n_items)) {
783 for (i = 0; i < n_items - 1; i++) {
784 ath10k_dbg(ATH10K_DBG_PCI,
785 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
786 i, items[i].paddr, items[i].len, n_items);
787 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
788 items[i].vaddr, items[i].len);
790 err = ath10k_ce_send_nolock(ce_pipe,
791 items[i].transfer_context,
794 items[i].transfer_id,
795 CE_SEND_FLAG_GATHER);
800 /* `i` is equal to `n_items -1` after for() */
802 ath10k_dbg(ATH10K_DBG_PCI,
803 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
804 i, items[i].paddr, items[i].len, n_items);
805 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
806 items[i].vaddr, items[i].len);
808 err = ath10k_ce_send_nolock(ce_pipe,
809 items[i].transfer_context,
812 items[i].transfer_id,
817 spin_unlock_bh(&ar_pci->ce_lock);
822 __ath10k_ce_send_revert(ce_pipe);
824 spin_unlock_bh(&ar_pci->ce_lock);
828 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
830 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
832 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
834 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
837 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
839 u32 reg_dump_area = 0;
840 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
845 ath10k_err("firmware crashed!\n");
846 ath10k_err("hardware name %s version 0x%x\n",
847 ar->hw_params.name, ar->target_version);
848 ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
850 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
851 ret = ath10k_pci_diag_read_mem(ar, host_addr,
852 ®_dump_area, sizeof(u32));
854 ath10k_err("failed to read FW dump area address: %d\n", ret);
858 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
860 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
862 REG_DUMP_COUNT_QCA988X * sizeof(u32));
864 ath10k_err("failed to read FW dump area: %d\n", ret);
868 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
870 ath10k_err("target Register Dump\n");
871 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
872 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
875 reg_dump_values[i + 1],
876 reg_dump_values[i + 2],
877 reg_dump_values[i + 3]);
879 queue_work(ar->workqueue, &ar->restart_work);
882 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
885 ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
890 * Decide whether to actually poll for completions, or just
891 * wait for a later chance.
892 * If there seem to be plenty of resources left, then just wait
893 * since checking involves reading a CE register, which is a
894 * relatively expensive operation.
896 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
899 * If at least 50% of the total resources are still available,
900 * don't bother checking again yet.
902 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
905 ath10k_ce_per_engine_service(ar, pipe);
908 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
909 struct ath10k_hif_cb *callbacks)
911 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
913 ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
915 memcpy(&ar_pci->msg_callbacks_current, callbacks,
916 sizeof(ar_pci->msg_callbacks_current));
919 static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
921 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
922 const struct ce_attr *attr;
923 struct ath10k_pci_pipe *pipe_info;
924 int pipe_num, disable_interrupts;
926 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
927 pipe_info = &ar_pci->pipe_info[pipe_num];
929 /* Handle Diagnostic CE specially */
930 if (pipe_info->ce_hdl == ar_pci->ce_diag)
933 attr = &host_ce_config_wlan[pipe_num];
935 if (attr->src_nentries) {
936 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
937 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
938 ath10k_pci_ce_send_done,
942 if (attr->dest_nentries)
943 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
944 ath10k_pci_ce_recv_data);
950 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
952 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
955 tasklet_kill(&ar_pci->intr_tq);
956 tasklet_kill(&ar_pci->msi_fw_err);
957 tasklet_kill(&ar_pci->early_irq_tasklet);
959 for (i = 0; i < CE_COUNT; i++)
960 tasklet_kill(&ar_pci->pipe_info[i].intr);
963 /* TODO - temporary mapping while we have too few CE's */
964 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
965 u16 service_id, u8 *ul_pipe,
966 u8 *dl_pipe, int *ul_is_polled,
971 ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
973 /* polling for received messages not supported */
976 switch (service_id) {
977 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
979 * Host->target HTT gets its own pipe, so it can be polled
980 * while other pipes are interrupt driven.
984 * Use the same target->host pipe for HTC ctrl, HTC raw
990 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
991 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
993 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
994 * HTC_CTRL_RSVD_SVC could share the same pipe as the
995 * WMI services. So, if another CE is needed, change
996 * this to *ul_pipe = 3, which frees up CE 0.
1003 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1004 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1005 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1006 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1008 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1014 /* pipe 6 reserved */
1015 /* pipe 7 reserved */
1022 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1027 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1028 u8 *ul_pipe, u8 *dl_pipe)
1030 int ul_is_polled, dl_is_polled;
1032 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
1034 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1035 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1042 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1045 struct ath10k *ar = pipe_info->hif_ce_state;
1046 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1047 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1048 struct sk_buff *skb;
1052 if (pipe_info->buf_sz == 0)
1055 for (i = 0; i < num; i++) {
1056 skb = dev_alloc_skb(pipe_info->buf_sz);
1058 ath10k_warn("failed to allocate skbuff for pipe %d\n",
1064 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1066 ce_data = dma_map_single(ar->dev, skb->data,
1067 skb->len + skb_tailroom(skb),
1070 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1071 ath10k_warn("failed to DMA map sk_buff\n");
1072 dev_kfree_skb_any(skb);
1077 ATH10K_SKB_CB(skb)->paddr = ce_data;
1079 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1081 PCI_DMA_FROMDEVICE);
1083 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1086 ath10k_warn("failed to enqueue to pipe %d: %d\n",
1095 ath10k_pci_rx_pipe_cleanup(pipe_info);
1099 static int ath10k_pci_post_rx(struct ath10k *ar)
1101 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1102 struct ath10k_pci_pipe *pipe_info;
1103 const struct ce_attr *attr;
1104 int pipe_num, ret = 0;
1106 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1107 pipe_info = &ar_pci->pipe_info[pipe_num];
1108 attr = &host_ce_config_wlan[pipe_num];
1110 if (attr->dest_nentries == 0)
1113 ret = ath10k_pci_post_rx_pipe(pipe_info,
1114 attr->dest_nentries - 1);
1116 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1119 for (; pipe_num >= 0; pipe_num--) {
1120 pipe_info = &ar_pci->pipe_info[pipe_num];
1121 ath10k_pci_rx_pipe_cleanup(pipe_info);
1130 static int ath10k_pci_hif_start(struct ath10k *ar)
1132 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1135 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
1137 ath10k_pci_free_early_irq(ar);
1138 ath10k_pci_kill_tasklet(ar);
1140 ret = ath10k_pci_request_irq(ar);
1142 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1147 ret = ath10k_pci_setup_ce_irq(ar);
1149 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
1153 /* Post buffers once to start things off. */
1154 ret = ath10k_pci_post_rx(ar);
1156 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1161 ar_pci->started = 1;
1165 ath10k_ce_disable_interrupts(ar);
1166 ath10k_pci_free_irq(ar);
1167 ath10k_pci_kill_tasklet(ar);
1169 /* Though there should be no interrupts (device was reset)
1170 * power_down() expects the early IRQ to be installed as per the
1171 * driver lifecycle. */
1172 ret_early = ath10k_pci_request_early_irq(ar);
1174 ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1179 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1182 struct ath10k_pci *ar_pci;
1183 struct ath10k_ce_pipe *ce_hdl;
1185 struct sk_buff *netbuf;
1188 buf_sz = pipe_info->buf_sz;
1190 /* Unused Copy Engine */
1194 ar = pipe_info->hif_ce_state;
1195 ar_pci = ath10k_pci_priv(ar);
1197 if (!ar_pci->started)
1200 ce_hdl = pipe_info->ce_hdl;
1202 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1204 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1205 netbuf->len + skb_tailroom(netbuf),
1207 dev_kfree_skb_any(netbuf);
1211 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1214 struct ath10k_pci *ar_pci;
1215 struct ath10k_ce_pipe *ce_hdl;
1216 struct sk_buff *netbuf;
1218 unsigned int nbytes;
1222 buf_sz = pipe_info->buf_sz;
1224 /* Unused Copy Engine */
1228 ar = pipe_info->hif_ce_state;
1229 ar_pci = ath10k_pci_priv(ar);
1231 if (!ar_pci->started)
1234 ce_hdl = pipe_info->ce_hdl;
1236 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1237 &ce_data, &nbytes, &id) == 0) {
1238 /* no need to call tx completion for NULL pointers */
1242 ar_pci->msg_callbacks_current.tx_completion(ar,
1249 * Cleanup residual buffers for device shutdown:
1250 * buffers that were enqueued for receive
1251 * buffers that were to be sent
1252 * Note: Buffers that had completed but which were
1253 * not yet processed are on a completion queue. They
1254 * are handled when the completion thread shuts down.
1256 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1258 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1261 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1262 struct ath10k_pci_pipe *pipe_info;
1264 pipe_info = &ar_pci->pipe_info[pipe_num];
1265 ath10k_pci_rx_pipe_cleanup(pipe_info);
1266 ath10k_pci_tx_pipe_cleanup(pipe_info);
1270 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1274 for (i = 0; i < CE_COUNT; i++)
1275 ath10k_ce_deinit_pipe(ar, i);
1278 static void ath10k_pci_hif_stop(struct ath10k *ar)
1280 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1283 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
1285 if (WARN_ON(!ar_pci->started))
1288 ret = ath10k_ce_disable_interrupts(ar);
1290 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
1292 ath10k_pci_free_irq(ar);
1293 ath10k_pci_kill_tasklet(ar);
1295 ret = ath10k_pci_request_early_irq(ar);
1297 ath10k_warn("failed to re-enable early irq: %d\n", ret);
1299 /* At this point, asynchronous threads are stopped, the target should
1300 * not DMA nor interrupt. We process the leftovers and then free
1301 * everything else up. */
1303 ath10k_pci_buffer_cleanup(ar);
1305 /* Make the sure the device won't access any structures on the host by
1306 * resetting it. The device was fed with PCI CE ringbuffer
1307 * configuration during init. If ringbuffers are freed and the device
1308 * were to access them this could lead to memory corruption on the
1310 ath10k_pci_warm_reset(ar);
1312 ar_pci->started = 0;
1315 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1316 void *req, u32 req_len,
1317 void *resp, u32 *resp_len)
1319 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1320 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1321 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1322 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1323 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1324 dma_addr_t req_paddr = 0;
1325 dma_addr_t resp_paddr = 0;
1326 struct bmi_xfer xfer = {};
1327 void *treq, *tresp = NULL;
1332 if (resp && !resp_len)
1335 if (resp && resp_len && *resp_len == 0)
1338 treq = kmemdup(req, req_len, GFP_KERNEL);
1342 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1343 ret = dma_mapping_error(ar->dev, req_paddr);
1347 if (resp && resp_len) {
1348 tresp = kzalloc(*resp_len, GFP_KERNEL);
1354 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1356 ret = dma_mapping_error(ar->dev, resp_paddr);
1360 xfer.wait_for_resp = true;
1363 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1366 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1370 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1373 unsigned int unused_nbytes;
1374 unsigned int unused_id;
1376 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1377 &unused_nbytes, &unused_id);
1379 /* non-zero means we did not time out */
1387 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1388 dma_unmap_single(ar->dev, resp_paddr,
1389 *resp_len, DMA_FROM_DEVICE);
1392 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1394 if (ret == 0 && resp_len) {
1395 *resp_len = min(*resp_len, xfer.resp_len);
1396 memcpy(resp, tresp, xfer.resp_len);
1405 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1407 struct bmi_xfer *xfer;
1409 unsigned int nbytes;
1410 unsigned int transfer_id;
1412 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1413 &nbytes, &transfer_id))
1416 xfer->tx_done = true;
1419 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1421 struct bmi_xfer *xfer;
1423 unsigned int nbytes;
1424 unsigned int transfer_id;
1427 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1428 &nbytes, &transfer_id, &flags))
1431 if (!xfer->wait_for_resp) {
1432 ath10k_warn("unexpected: BMI data received; ignoring\n");
1436 xfer->resp_len = nbytes;
1437 xfer->rx_done = true;
1440 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1441 struct ath10k_ce_pipe *rx_pipe,
1442 struct bmi_xfer *xfer)
1444 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1446 while (time_before_eq(jiffies, timeout)) {
1447 ath10k_pci_bmi_send_done(tx_pipe);
1448 ath10k_pci_bmi_recv_data(rx_pipe);
1450 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
1460 * Map from service/endpoint to Copy Engine.
1461 * This table is derived from the CE_PCI TABLE, above.
1462 * It is passed to the Target at startup for use by firmware.
1464 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1466 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1467 PIPEDIR_OUT, /* out = UL = host -> target */
1471 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1472 PIPEDIR_IN, /* in = DL = target -> host */
1476 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1477 PIPEDIR_OUT, /* out = UL = host -> target */
1481 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1482 PIPEDIR_IN, /* in = DL = target -> host */
1486 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1487 PIPEDIR_OUT, /* out = UL = host -> target */
1491 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1492 PIPEDIR_IN, /* in = DL = target -> host */
1496 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1497 PIPEDIR_OUT, /* out = UL = host -> target */
1501 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1502 PIPEDIR_IN, /* in = DL = target -> host */
1506 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1507 PIPEDIR_OUT, /* out = UL = host -> target */
1511 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1512 PIPEDIR_IN, /* in = DL = target -> host */
1516 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1517 PIPEDIR_OUT, /* out = UL = host -> target */
1518 0, /* could be moved to 3 (share with WMI) */
1521 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1522 PIPEDIR_IN, /* in = DL = target -> host */
1526 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1527 PIPEDIR_OUT, /* out = UL = host -> target */
1531 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1532 PIPEDIR_IN, /* in = DL = target -> host */
1536 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1537 PIPEDIR_OUT, /* out = UL = host -> target */
1541 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1542 PIPEDIR_IN, /* in = DL = target -> host */
1546 /* (Additions here) */
1548 { /* Must be last */
1556 * Send an interrupt to the device to wake up the Target CPU
1557 * so it has an opportunity to notice any changed state.
1559 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1564 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1568 ath10k_warn("failed to read core_ctrl: %d\n", ret);
1572 /* A_INUM_FIRMWARE interrupt to Target CPU */
1573 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1575 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1579 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1587 static int ath10k_pci_init_config(struct ath10k *ar)
1589 u32 interconnect_targ_addr;
1590 u32 pcie_state_targ_addr = 0;
1591 u32 pipe_cfg_targ_addr = 0;
1592 u32 svc_to_pipe_map = 0;
1593 u32 pcie_config_flags = 0;
1595 u32 ealloc_targ_addr;
1597 u32 flag2_targ_addr;
1600 /* Download to Target the CE Config and the service-to-CE map */
1601 interconnect_targ_addr =
1602 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1604 /* Supply Target-side CE configuration */
1605 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1606 &pcie_state_targ_addr);
1608 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1612 if (pcie_state_targ_addr == 0) {
1614 ath10k_err("Invalid pcie state addr\n");
1618 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1619 offsetof(struct pcie_state,
1621 &pipe_cfg_targ_addr);
1623 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1627 if (pipe_cfg_targ_addr == 0) {
1629 ath10k_err("Invalid pipe cfg addr\n");
1633 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1634 target_ce_config_wlan,
1635 sizeof(target_ce_config_wlan));
1638 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1642 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1643 offsetof(struct pcie_state,
1647 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1651 if (svc_to_pipe_map == 0) {
1653 ath10k_err("Invalid svc_to_pipe map\n");
1657 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1658 target_service_to_ce_map_wlan,
1659 sizeof(target_service_to_ce_map_wlan));
1661 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1665 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1666 offsetof(struct pcie_state,
1668 &pcie_config_flags);
1670 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1674 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1676 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1677 offsetof(struct pcie_state, config_flags),
1679 sizeof(pcie_config_flags));
1681 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1685 /* configure early allocation */
1686 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1688 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1690 ath10k_err("Faile to get early alloc val: %d\n", ret);
1694 /* first bank is switched to IRAM */
1695 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1696 HI_EARLY_ALLOC_MAGIC_MASK);
1697 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1698 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1700 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1702 ath10k_err("Failed to set early alloc val: %d\n", ret);
1706 /* Tell Target to proceed with initialization */
1707 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1709 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1711 ath10k_err("Failed to get option val: %d\n", ret);
1715 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1717 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1719 ath10k_err("Failed to set option val: %d\n", ret);
1726 static int ath10k_pci_alloc_ce(struct ath10k *ar)
1730 for (i = 0; i < CE_COUNT; i++) {
1731 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1733 ath10k_err("failed to allocate copy engine pipe %d: %d\n",
1742 static void ath10k_pci_free_ce(struct ath10k *ar)
1746 for (i = 0; i < CE_COUNT; i++)
1747 ath10k_ce_free_pipe(ar, i);
1750 static int ath10k_pci_ce_init(struct ath10k *ar)
1752 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1753 struct ath10k_pci_pipe *pipe_info;
1754 const struct ce_attr *attr;
1757 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1758 pipe_info = &ar_pci->pipe_info[pipe_num];
1759 pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
1760 pipe_info->pipe_num = pipe_num;
1761 pipe_info->hif_ce_state = ar;
1762 attr = &host_ce_config_wlan[pipe_num];
1764 ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
1766 ath10k_err("failed to initialize copy engine pipe %d: %d\n",
1771 if (pipe_num == CE_COUNT - 1) {
1773 * Reserve the ultimate CE for
1774 * diagnostic Window support
1776 ar_pci->ce_diag = pipe_info->ce_hdl;
1780 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1786 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1788 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1791 ath10k_pci_wake(ar);
1793 fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1795 if (fw_indicator & FW_IND_EVENT_PENDING) {
1796 /* ACK: clear Target-side pending event */
1797 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
1798 fw_indicator & ~FW_IND_EVENT_PENDING);
1800 if (ar_pci->started) {
1801 ath10k_pci_hif_dump_area(ar);
1804 * Probable Target failure before we're prepared
1805 * to handle it. Generally unexpected.
1807 ath10k_warn("early firmware event indicated\n");
1811 ath10k_pci_sleep(ar);
1814 /* this function effectively clears target memory controller assert line */
1815 static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1819 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1820 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1821 val | SOC_RESET_CONTROL_SI0_RST_MASK);
1822 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1826 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1827 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1828 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1829 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1834 static int ath10k_pci_warm_reset(struct ath10k *ar)
1839 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
1841 ret = ath10k_do_pci_wake(ar);
1843 ath10k_err("failed to wake up target: %d\n", ret);
1848 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1849 PCIE_INTR_CAUSE_ADDRESS);
1850 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1852 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1854 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1857 /* disable pending irqs */
1858 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1859 PCIE_INTR_ENABLE_ADDRESS, 0);
1861 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1862 PCIE_INTR_CLR_ADDRESS, ~0);
1866 /* clear fw indicator */
1867 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1869 /* clear target LF timer interrupts */
1870 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1871 SOC_LF_TIMER_CONTROL0_ADDRESS);
1872 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1873 SOC_LF_TIMER_CONTROL0_ADDRESS,
1874 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1877 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1878 SOC_RESET_CONTROL_ADDRESS);
1879 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1880 val | SOC_RESET_CONTROL_CE_RST_MASK);
1881 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1882 SOC_RESET_CONTROL_ADDRESS);
1886 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1887 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1888 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1889 SOC_RESET_CONTROL_ADDRESS);
1892 ath10k_pci_warm_reset_si0(ar);
1895 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1896 PCIE_INTR_CAUSE_ADDRESS);
1897 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1899 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1901 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1904 /* CPU warm reset */
1905 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1906 SOC_RESET_CONTROL_ADDRESS);
1907 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1908 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1910 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1911 SOC_RESET_CONTROL_ADDRESS);
1912 ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
1916 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
1918 ath10k_do_pci_sleep(ar);
1922 static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1924 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1925 const char *irq_mode;
1929 * Bring the target up cleanly.
1931 * The target may be in an undefined state with an AUX-powered Target
1932 * and a Host in WoW mode. If the Host crashes, loses power, or is
1933 * restarted (without unloading the driver) then the Target is left
1934 * (aux) powered and running. On a subsequent driver load, the Target
1935 * is in an unexpected state. We try to catch that here in order to
1936 * reset the Target and retry the probe.
1939 ret = ath10k_pci_cold_reset(ar);
1941 ret = ath10k_pci_warm_reset(ar);
1944 ath10k_err("failed to reset target: %d\n", ret);
1948 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1949 /* Force AWAKE forever */
1950 ath10k_do_pci_wake(ar);
1952 ret = ath10k_pci_ce_init(ar);
1954 ath10k_err("failed to initialize CE: %d\n", ret);
1958 ret = ath10k_ce_disable_interrupts(ar);
1960 ath10k_err("failed to disable CE interrupts: %d\n", ret);
1964 ret = ath10k_pci_init_irq(ar);
1966 ath10k_err("failed to init irqs: %d\n", ret);
1970 ret = ath10k_pci_request_early_irq(ar);
1972 ath10k_err("failed to request early irq: %d\n", ret);
1973 goto err_deinit_irq;
1976 ret = ath10k_pci_wait_for_target_init(ar);
1978 ath10k_err("failed to wait for target to init: %d\n", ret);
1979 goto err_free_early_irq;
1982 ret = ath10k_pci_init_config(ar);
1984 ath10k_err("failed to setup init config: %d\n", ret);
1985 goto err_free_early_irq;
1988 ret = ath10k_pci_wake_target_cpu(ar);
1990 ath10k_err("could not wake up target CPU: %d\n", ret);
1991 goto err_free_early_irq;
1994 if (ar_pci->num_msi_intrs > 1)
1996 else if (ar_pci->num_msi_intrs == 1)
1999 irq_mode = "legacy";
2001 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2002 ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
2003 irq_mode, ath10k_pci_irq_mode,
2004 ath10k_pci_reset_mode);
2009 ath10k_pci_free_early_irq(ar);
2011 ath10k_pci_deinit_irq(ar);
2013 ath10k_pci_ce_deinit(ar);
2014 ath10k_pci_warm_reset(ar);
2016 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2017 ath10k_do_pci_sleep(ar);
2022 static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
2027 * Sometime warm reset succeeds after retries.
2029 * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
2032 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2033 ret = __ath10k_pci_hif_power_up(ar, false);
2037 ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n",
2038 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
2044 static int ath10k_pci_hif_power_up(struct ath10k *ar)
2048 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
2051 * Hardware CUS232 version 2 has some issues with cold reset and the
2052 * preferred (and safer) way to perform a device reset is through a
2055 * Warm reset doesn't always work though so fall back to cold reset may
2058 ret = ath10k_pci_hif_power_up_warm(ar);
2060 ath10k_warn("failed to power up target using warm reset: %d\n",
2063 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
2066 ath10k_warn("trying cold reset\n");
2068 ret = __ath10k_pci_hif_power_up(ar, true);
2070 ath10k_err("failed to power up target using cold reset too (%d)\n",
2079 static void ath10k_pci_hif_power_down(struct ath10k *ar)
2081 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2083 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
2085 ath10k_pci_free_early_irq(ar);
2086 ath10k_pci_kill_tasklet(ar);
2087 ath10k_pci_deinit_irq(ar);
2088 ath10k_pci_ce_deinit(ar);
2089 ath10k_pci_warm_reset(ar);
2091 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2092 ath10k_do_pci_sleep(ar);
2097 #define ATH10K_PCI_PM_CONTROL 0x44
2099 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2101 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2102 struct pci_dev *pdev = ar_pci->pdev;
2105 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2107 if ((val & 0x000000ff) != 0x3) {
2108 pci_save_state(pdev);
2109 pci_disable_device(pdev);
2110 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2111 (val & 0xffffff00) | 0x03);
2117 static int ath10k_pci_hif_resume(struct ath10k *ar)
2119 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2120 struct pci_dev *pdev = ar_pci->pdev;
2123 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2125 if ((val & 0x000000ff) != 0) {
2126 pci_restore_state(pdev);
2127 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2130 * Suspend/Resume resets the PCI configuration space,
2131 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2132 * to keep PCI Tx retries from interfering with C3 CPU state
2134 pci_read_config_dword(pdev, 0x40, &val);
2136 if ((val & 0x0000ff00) != 0)
2137 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2144 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2145 .tx_sg = ath10k_pci_hif_tx_sg,
2146 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2147 .start = ath10k_pci_hif_start,
2148 .stop = ath10k_pci_hif_stop,
2149 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2150 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2151 .send_complete_check = ath10k_pci_hif_send_complete_check,
2152 .set_callbacks = ath10k_pci_hif_set_callbacks,
2153 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
2154 .power_up = ath10k_pci_hif_power_up,
2155 .power_down = ath10k_pci_hif_power_down,
2157 .suspend = ath10k_pci_hif_suspend,
2158 .resume = ath10k_pci_hif_resume,
2162 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2164 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2165 struct ath10k_pci *ar_pci = pipe->ar_pci;
2167 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2170 static void ath10k_msi_err_tasklet(unsigned long data)
2172 struct ath10k *ar = (struct ath10k *)data;
2174 ath10k_pci_fw_interrupt_handler(ar);
2178 * Handler for a per-engine interrupt on a PARTICULAR CE.
2179 * This is used in cases where each CE has a private MSI interrupt.
2181 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2183 struct ath10k *ar = arg;
2184 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2185 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2187 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2188 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2193 * NOTE: We are able to derive ce_id from irq because we
2194 * use a one-to-one mapping for CE's 0..5.
2195 * CE's 6 & 7 do not use interrupts at all.
2197 * This mapping must be kept in sync with the mapping
2200 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2204 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2206 struct ath10k *ar = arg;
2207 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2209 tasklet_schedule(&ar_pci->msi_fw_err);
2214 * Top-level interrupt handler for all PCI interrupts from a Target.
2215 * When a block of MSI interrupts is allocated, this top-level handler
2216 * is not used; instead, we directly call the correct sub-handler.
2218 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2220 struct ath10k *ar = arg;
2221 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2223 if (ar_pci->num_msi_intrs == 0) {
2224 if (!ath10k_pci_irq_pending(ar))
2227 ath10k_pci_disable_and_clear_legacy_irq(ar);
2230 tasklet_schedule(&ar_pci->intr_tq);
2235 static void ath10k_pci_early_irq_tasklet(unsigned long data)
2237 struct ath10k *ar = (struct ath10k *)data;
2241 ret = ath10k_pci_wake(ar);
2243 ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2248 fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2249 if (fw_ind & FW_IND_EVENT_PENDING) {
2250 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
2251 fw_ind & ~FW_IND_EVENT_PENDING);
2252 ath10k_pci_hif_dump_area(ar);
2255 ath10k_pci_sleep(ar);
2256 ath10k_pci_enable_legacy_irq(ar);
2259 static void ath10k_pci_tasklet(unsigned long data)
2261 struct ath10k *ar = (struct ath10k *)data;
2262 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2264 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2265 ath10k_ce_per_engine_service_any(ar);
2267 /* Re-enable legacy irq that was disabled in the irq handler */
2268 if (ar_pci->num_msi_intrs == 0)
2269 ath10k_pci_enable_legacy_irq(ar);
2272 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2274 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2277 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2278 ath10k_pci_msi_fw_handler,
2279 IRQF_SHARED, "ath10k_pci", ar);
2281 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2282 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2286 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2287 ret = request_irq(ar_pci->pdev->irq + i,
2288 ath10k_pci_per_engine_handler,
2289 IRQF_SHARED, "ath10k_pci", ar);
2291 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2292 ar_pci->pdev->irq + i, ret);
2294 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2295 free_irq(ar_pci->pdev->irq + i, ar);
2297 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2305 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2307 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2310 ret = request_irq(ar_pci->pdev->irq,
2311 ath10k_pci_interrupt_handler,
2312 IRQF_SHARED, "ath10k_pci", ar);
2314 ath10k_warn("failed to request MSI irq %d: %d\n",
2315 ar_pci->pdev->irq, ret);
2322 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2324 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2327 ret = request_irq(ar_pci->pdev->irq,
2328 ath10k_pci_interrupt_handler,
2329 IRQF_SHARED, "ath10k_pci", ar);
2331 ath10k_warn("failed to request legacy irq %d: %d\n",
2332 ar_pci->pdev->irq, ret);
2339 static int ath10k_pci_request_irq(struct ath10k *ar)
2341 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2343 switch (ar_pci->num_msi_intrs) {
2345 return ath10k_pci_request_irq_legacy(ar);
2347 return ath10k_pci_request_irq_msi(ar);
2348 case MSI_NUM_REQUEST:
2349 return ath10k_pci_request_irq_msix(ar);
2352 ath10k_warn("unknown irq configuration upon request\n");
2356 static void ath10k_pci_free_irq(struct ath10k *ar)
2358 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2361 /* There's at least one interrupt irregardless whether its legacy INTR
2362 * or MSI or MSI-X */
2363 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2364 free_irq(ar_pci->pdev->irq + i, ar);
2367 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2369 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2372 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2373 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2375 tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2378 for (i = 0; i < CE_COUNT; i++) {
2379 ar_pci->pipe_info[i].ar_pci = ar_pci;
2380 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2381 (unsigned long)&ar_pci->pipe_info[i]);
2385 static int ath10k_pci_init_irq(struct ath10k *ar)
2387 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2388 bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2392 ath10k_pci_init_irq_tasklets(ar);
2394 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2395 !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2396 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
2399 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2400 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2401 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2402 ar_pci->num_msi_intrs);
2410 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2411 ar_pci->num_msi_intrs = 1;
2412 ret = pci_enable_msi(ar_pci->pdev);
2421 * A potential race occurs here: The CORE_BASE write
2422 * depends on target correctly decoding AXI address but
2423 * host won't know when target writes BAR to CORE_CTRL.
2424 * This write might get lost if target has NOT written BAR.
2425 * For now, fix the race by repeating the write in below
2426 * synchronization checking. */
2427 ar_pci->num_msi_intrs = 0;
2429 ret = ath10k_pci_wake(ar);
2431 ath10k_warn("failed to wake target: %d\n", ret);
2435 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2436 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2437 ath10k_pci_sleep(ar);
2442 static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2446 ret = ath10k_pci_wake(ar);
2448 ath10k_warn("failed to wake target: %d\n", ret);
2452 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2454 ath10k_pci_sleep(ar);
2459 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2461 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2463 switch (ar_pci->num_msi_intrs) {
2465 return ath10k_pci_deinit_irq_legacy(ar);
2468 case MSI_NUM_REQUEST:
2469 pci_disable_msi(ar_pci->pdev);
2472 pci_disable_msi(ar_pci->pdev);
2475 ath10k_warn("unknown irq configuration upon deinit\n");
2479 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2481 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2482 unsigned long timeout;
2486 ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2488 ret = ath10k_pci_wake(ar);
2490 ath10k_err("failed to wake up target for init: %d\n", ret);
2494 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2497 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2499 ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
2501 /* target should never return this */
2502 if (val == 0xffffffff)
2505 /* the device has crashed so don't bother trying anymore */
2506 if (val & FW_IND_EVENT_PENDING)
2509 if (val & FW_IND_INITIALIZED)
2512 if (ar_pci->num_msi_intrs == 0)
2513 /* Fix potential race by repeating CORE_BASE writes */
2514 ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
2515 PCIE_INTR_FIRMWARE_MASK |
2516 PCIE_INTR_CE_MASK_ALL);
2519 } while (time_before(jiffies, timeout));
2521 if (val == 0xffffffff) {
2522 ath10k_err("failed to read device register, device is gone\n");
2527 if (val & FW_IND_EVENT_PENDING) {
2528 ath10k_warn("device has crashed during init\n");
2529 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
2530 val & ~FW_IND_EVENT_PENDING);
2531 ath10k_pci_hif_dump_area(ar);
2536 if (!(val & FW_IND_INITIALIZED)) {
2537 ath10k_err("failed to receive initialized event from target: %08x\n",
2543 ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
2546 ath10k_pci_sleep(ar);
2550 static int ath10k_pci_cold_reset(struct ath10k *ar)
2555 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
2557 ret = ath10k_do_pci_wake(ar);
2559 ath10k_err("failed to wake up target: %d\n",
2564 /* Put Target, including PCIe, into RESET. */
2565 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2567 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2569 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2570 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2571 RTC_STATE_COLD_RESET_MASK)
2576 /* Pull Target, including PCIe, out of RESET. */
2578 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2580 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2581 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2582 RTC_STATE_COLD_RESET_MASK))
2587 ath10k_do_pci_sleep(ar);
2589 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
2594 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2598 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2599 if (!test_bit(i, ar_pci->features))
2603 case ATH10K_PCI_FEATURE_MSI_X:
2604 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2606 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2607 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2613 static int ath10k_pci_probe(struct pci_dev *pdev,
2614 const struct pci_device_id *pci_dev)
2619 struct ath10k_pci *ar_pci;
2620 u32 lcr_val, chip_id;
2622 ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
2624 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2628 ar_pci->pdev = pdev;
2629 ar_pci->dev = &pdev->dev;
2631 switch (pci_dev->device) {
2632 case QCA988X_2_0_DEVICE_ID:
2633 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2637 ath10k_err("Unknown device ID: %d\n", pci_dev->device);
2641 if (ath10k_pci_target_ps)
2642 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2644 ath10k_pci_dump_features(ar_pci);
2646 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2648 ath10k_err("failed to create driver core\n");
2654 atomic_set(&ar_pci->keep_awake_count, 0);
2656 pci_set_drvdata(pdev, ar);
2658 ret = pci_enable_device(pdev);
2660 ath10k_err("failed to enable PCI device: %d\n", ret);
2664 /* Request MMIO resources */
2665 ret = pci_request_region(pdev, BAR_NUM, "ath");
2667 ath10k_err("failed to request MMIO region: %d\n", ret);
2672 * Target structures have a limit of 32 bit DMA pointers.
2673 * DMA pointers can be wider than 32 bits by default on some systems.
2675 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2677 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
2681 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2683 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
2687 /* Set bus master bit in PCI_COMMAND to enable DMA */
2688 pci_set_master(pdev);
2691 * Temporary FIX: disable ASPM
2692 * Will be removed after the OTP is programmed
2694 pci_read_config_dword(pdev, 0x80, &lcr_val);
2695 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2697 /* Arrange for access to Target SoC registers. */
2698 mem = pci_iomap(pdev, BAR_NUM, 0);
2700 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
2707 spin_lock_init(&ar_pci->ce_lock);
2709 ret = ath10k_do_pci_wake(ar);
2711 ath10k_err("Failed to get chip id: %d\n", ret);
2715 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2717 ath10k_do_pci_sleep(ar);
2719 ret = ath10k_pci_alloc_ce(ar);
2721 ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
2725 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2727 ret = ath10k_core_register(ar, chip_id);
2729 ath10k_err("failed to register driver core: %d\n", ret);
2736 ath10k_pci_free_ce(ar);
2738 pci_iounmap(pdev, mem);
2740 pci_clear_master(pdev);
2742 pci_release_region(pdev, BAR_NUM);
2744 pci_disable_device(pdev);
2746 ath10k_core_destroy(ar);
2748 /* call HIF PCI free here */
2754 static void ath10k_pci_remove(struct pci_dev *pdev)
2756 struct ath10k *ar = pci_get_drvdata(pdev);
2757 struct ath10k_pci *ar_pci;
2759 ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
2764 ar_pci = ath10k_pci_priv(ar);
2769 ath10k_core_unregister(ar);
2770 ath10k_pci_free_ce(ar);
2772 pci_iounmap(pdev, ar_pci->mem);
2773 pci_release_region(pdev, BAR_NUM);
2774 pci_clear_master(pdev);
2775 pci_disable_device(pdev);
2777 ath10k_core_destroy(ar);
2781 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2783 static struct pci_driver ath10k_pci_driver = {
2784 .name = "ath10k_pci",
2785 .id_table = ath10k_pci_id_table,
2786 .probe = ath10k_pci_probe,
2787 .remove = ath10k_pci_remove,
2790 static int __init ath10k_pci_init(void)
2794 ret = pci_register_driver(&ath10k_pci_driver);
2796 ath10k_err("failed to register PCI driver: %d\n", ret);
2800 module_init(ath10k_pci_init);
2802 static void __exit ath10k_pci_exit(void)
2804 pci_unregister_driver(&ath10k_pci_driver);
2807 module_exit(ath10k_pci_exit);
2809 MODULE_AUTHOR("Qualcomm Atheros");
2810 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2811 MODULE_LICENSE("Dual BSD/GPL");
2812 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
2813 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);