2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
27 #include "targaddrs.h"
36 enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
42 enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
47 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
48 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
50 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
51 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
53 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
54 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
56 /* how long wait to wait for target to initialise, in ms */
57 #define ATH10K_PCI_TARGET_WAIT 3000
58 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
60 #define QCA988X_2_0_DEVICE_ID (0x003c)
62 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
63 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
67 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
70 static int ath10k_pci_post_rx(struct ath10k *ar);
71 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
73 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
74 static int ath10k_pci_cold_reset(struct ath10k *ar);
75 static int ath10k_pci_warm_reset(struct ath10k *ar);
76 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
77 static int ath10k_pci_init_irq(struct ath10k *ar);
78 static int ath10k_pci_deinit_irq(struct ath10k *ar);
79 static int ath10k_pci_request_irq(struct ath10k *ar);
80 static void ath10k_pci_free_irq(struct ath10k *ar);
81 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
82 struct ath10k_ce_pipe *rx_pipe,
83 struct bmi_xfer *xfer);
85 static const struct ce_attr host_ce_config_wlan[] = {
86 /* CE0: host->target HTC control and raw streams */
88 .flags = CE_ATTR_FLAGS,
94 /* CE1: target->host HTT + HTC control */
96 .flags = CE_ATTR_FLAGS,
102 /* CE2: target->host WMI */
104 .flags = CE_ATTR_FLAGS,
110 /* CE3: host->target WMI */
112 .flags = CE_ATTR_FLAGS,
118 /* CE4: host->target HTT */
120 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
121 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
128 .flags = CE_ATTR_FLAGS,
134 /* CE6: target autonomous hif_memcpy */
136 .flags = CE_ATTR_FLAGS,
142 /* CE7: ce_diag, the Diagnostic Window */
144 .flags = CE_ATTR_FLAGS,
146 .src_sz_max = DIAG_TRANSFER_LIMIT,
151 /* Target firmware's Copy Engine configuration. */
152 static const struct ce_pipe_config target_ce_config_wlan[] = {
153 /* CE0: host->target HTC control and raw streams */
156 .pipedir = PIPEDIR_OUT,
159 .flags = CE_ATTR_FLAGS,
163 /* CE1: target->host HTT + HTC control */
166 .pipedir = PIPEDIR_IN,
169 .flags = CE_ATTR_FLAGS,
173 /* CE2: target->host WMI */
176 .pipedir = PIPEDIR_IN,
179 .flags = CE_ATTR_FLAGS,
183 /* CE3: host->target WMI */
186 .pipedir = PIPEDIR_OUT,
189 .flags = CE_ATTR_FLAGS,
193 /* CE4: host->target HTT */
196 .pipedir = PIPEDIR_OUT,
199 .flags = CE_ATTR_FLAGS,
203 /* NB: 50% of src nentries, since tx has 2 frags */
208 .pipedir = PIPEDIR_OUT,
211 .flags = CE_ATTR_FLAGS,
215 /* CE6: Reserved for target autonomous hif_memcpy */
218 .pipedir = PIPEDIR_INOUT,
221 .flags = CE_ATTR_FLAGS,
225 /* CE7 used only by Host */
228 static bool ath10k_pci_irq_pending(struct ath10k *ar)
232 /* Check if the shared legacy irq is for us */
233 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
234 PCIE_INTR_CAUSE_ADDRESS);
235 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
241 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
243 /* IMPORTANT: INTR_CLR register has to be set after
244 * INTR_ENABLE is set to 0, otherwise interrupt can not be
246 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
248 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
249 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
251 /* IMPORTANT: this extra read transaction is required to
252 * flush the posted write buffer. */
253 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
254 PCIE_INTR_ENABLE_ADDRESS);
257 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
259 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
260 PCIE_INTR_ENABLE_ADDRESS,
261 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
263 /* IMPORTANT: this extra read transaction is required to
264 * flush the posted write buffer. */
265 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
266 PCIE_INTR_ENABLE_ADDRESS);
269 static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
271 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
273 if (ar_pci->num_msi_intrs > 1)
275 else if (ar_pci->num_msi_intrs == 1)
282 * Diagnostic read/write access is provided for startup/config/debug usage.
283 * Caller must guarantee proper alignment, when applicable, and single user
286 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
289 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
292 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
295 struct ath10k_ce_pipe *ce_diag;
296 /* Host buffer address in CE space */
298 dma_addr_t ce_data_base = 0;
299 void *data_buf = NULL;
303 * This code cannot handle reads to non-memory space. Redirect to the
304 * register read fn but preserve the multi word read capability of
307 if (address < DRAM_BASE_ADDRESS) {
308 if (!IS_ALIGNED(address, 4) ||
309 !IS_ALIGNED((unsigned long)data, 4))
312 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
313 ar, address, (u32 *)data)) == 0)) {
314 nbytes -= sizeof(u32);
315 address += sizeof(u32);
321 ce_diag = ar_pci->ce_diag;
324 * Allocate a temporary bounce buffer to hold caller's data
325 * to be DMA'ed from Target. This guarantees
326 * 1) 4-byte alignment
327 * 2) Buffer in DMA-able space
329 orig_nbytes = nbytes;
330 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
339 memset(data_buf, 0, orig_nbytes);
341 remaining_bytes = orig_nbytes;
342 ce_data = ce_data_base;
343 while (remaining_bytes) {
344 nbytes = min_t(unsigned int, remaining_bytes,
345 DIAG_TRANSFER_LIMIT);
347 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
351 /* Request CE to send from Target(!) address to Host buffer */
353 * The address supplied by the caller is in the
354 * Target CPU virtual address space.
356 * In order to use this address with the diagnostic CE,
357 * convert it from Target CPU virtual address space
358 * to CE address space
360 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
363 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
369 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
373 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
379 if (nbytes != completed_nbytes) {
384 if (buf != (u32) address) {
390 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
395 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
401 if (nbytes != completed_nbytes) {
406 if (buf != ce_data) {
411 remaining_bytes -= nbytes;
418 /* Copy data from allocated DMA buf to caller's buf */
419 WARN_ON_ONCE(orig_nbytes & 3);
420 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
422 __le32_to_cpu(((__le32 *)data_buf)[i]);
425 ath10k_warn("failed to read diag value at 0x%x: %d\n",
429 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
435 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
437 return ath10k_pci_diag_read_mem(ar, address, value, sizeof(u32));
440 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
446 host_addr = host_interest_item_address(src);
448 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
450 ath10k_warn("failed to get memcpy hi address for firmware address %d: %d\n",
455 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
457 ath10k_warn("failed to memcpy firmware memory from %d (%d B): %d\n",
465 #define ath10k_pci_diag_read_hi(ar, dest, src, len) \
466 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len);
468 /* Read 4-byte aligned data from Target memory or register */
469 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
472 /* Assume range doesn't cross this boundary */
473 if (address >= DRAM_BASE_ADDRESS)
474 return ath10k_pci_diag_read32(ar, address, data);
476 *data = ath10k_pci_read32(ar, address);
480 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
481 const void *data, int nbytes)
483 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
486 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
489 struct ath10k_ce_pipe *ce_diag;
490 void *data_buf = NULL;
491 u32 ce_data; /* Host buffer address in CE space */
492 dma_addr_t ce_data_base = 0;
495 ce_diag = ar_pci->ce_diag;
498 * Allocate a temporary bounce buffer to hold caller's data
499 * to be DMA'ed to Target. This guarantees
500 * 1) 4-byte alignment
501 * 2) Buffer in DMA-able space
503 orig_nbytes = nbytes;
504 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
513 /* Copy caller's data to allocated DMA buf */
514 WARN_ON_ONCE(orig_nbytes & 3);
515 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
516 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
519 * The address supplied by the caller is in the
520 * Target CPU virtual address space.
522 * In order to use this address with the diagnostic CE,
524 * Target CPU virtual address space
528 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
530 remaining_bytes = orig_nbytes;
531 ce_data = ce_data_base;
532 while (remaining_bytes) {
533 /* FIXME: check cast */
534 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
536 /* Set up to receive directly into Target(!) address */
537 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
542 * Request CE to send caller-supplied data that
543 * was copied to bounce buffer to Target(!) address.
545 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
551 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
556 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
562 if (nbytes != completed_nbytes) {
567 if (buf != ce_data) {
573 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
578 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
584 if (nbytes != completed_nbytes) {
589 if (buf != address) {
594 remaining_bytes -= nbytes;
601 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
606 ath10k_warn("failed to write diag value at 0x%x: %d\n",
612 /* Write 4B data to Target memory or register */
613 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
616 /* Assume range doesn't cross this boundary */
617 if (address >= DRAM_BASE_ADDRESS)
618 return ath10k_pci_diag_write_mem(ar, address, &data,
621 ath10k_pci_write32(ar, address, data);
625 static bool ath10k_pci_is_awake(struct ath10k *ar)
627 u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
629 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
632 static int ath10k_pci_wake_wait(struct ath10k *ar)
637 while (tot_delay < PCIE_WAKE_TIMEOUT) {
638 if (ath10k_pci_is_awake(ar))
642 tot_delay += curr_delay;
651 static int ath10k_pci_wake(struct ath10k *ar)
653 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
654 PCIE_SOC_WAKE_V_MASK);
655 return ath10k_pci_wake_wait(ar);
658 static void ath10k_pci_sleep(struct ath10k *ar)
660 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
661 PCIE_SOC_WAKE_RESET);
664 /* Called by lower (CE) layer when a send to Target completes. */
665 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
667 struct ath10k *ar = ce_state->ar;
668 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
669 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
670 void *transfer_context;
673 unsigned int transfer_id;
675 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
677 &transfer_id) == 0) {
678 /* no need to call tx completion for NULL pointers */
679 if (transfer_context == NULL)
682 cb->tx_completion(ar, transfer_context, transfer_id);
686 /* Called by lower (CE) layer when data is received from the Target. */
687 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
689 struct ath10k *ar = ce_state->ar;
690 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
691 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
692 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
694 void *transfer_context;
696 unsigned int nbytes, max_nbytes;
697 unsigned int transfer_id;
699 int err, num_replenish = 0;
701 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
702 &ce_data, &nbytes, &transfer_id,
705 skb = transfer_context;
706 max_nbytes = skb->len + skb_tailroom(skb);
707 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
708 max_nbytes, DMA_FROM_DEVICE);
710 if (unlikely(max_nbytes < nbytes)) {
711 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
713 dev_kfree_skb_any(skb);
717 skb_put(skb, nbytes);
718 cb->rx_completion(ar, skb, pipe_info->pipe_num);
721 err = ath10k_pci_post_rx_pipe(pipe_info, num_replenish);
724 ath10k_warn("failed to replenish CE rx ring %d (%d bufs): %d\n",
725 pipe_info->pipe_num, num_replenish, err);
729 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
730 struct ath10k_hif_sg_item *items, int n_items)
732 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
733 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
734 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
735 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
736 unsigned int nentries_mask;
737 unsigned int sw_index;
738 unsigned int write_index;
741 spin_lock_bh(&ar_pci->ce_lock);
743 nentries_mask = src_ring->nentries_mask;
744 sw_index = src_ring->sw_index;
745 write_index = src_ring->write_index;
747 if (unlikely(CE_RING_DELTA(nentries_mask,
748 write_index, sw_index - 1) < n_items)) {
753 for (i = 0; i < n_items - 1; i++) {
754 ath10k_dbg(ATH10K_DBG_PCI,
755 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
756 i, items[i].paddr, items[i].len, n_items);
757 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
758 items[i].vaddr, items[i].len);
760 err = ath10k_ce_send_nolock(ce_pipe,
761 items[i].transfer_context,
764 items[i].transfer_id,
765 CE_SEND_FLAG_GATHER);
770 /* `i` is equal to `n_items -1` after for() */
772 ath10k_dbg(ATH10K_DBG_PCI,
773 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
774 i, items[i].paddr, items[i].len, n_items);
775 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
776 items[i].vaddr, items[i].len);
778 err = ath10k_ce_send_nolock(ce_pipe,
779 items[i].transfer_context,
782 items[i].transfer_id,
787 spin_unlock_bh(&ar_pci->ce_lock);
792 __ath10k_ce_send_revert(ce_pipe);
794 spin_unlock_bh(&ar_pci->ce_lock);
798 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
800 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
802 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
804 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
807 static void ath10k_pci_dump_registers(struct ath10k *ar,
808 struct ath10k_fw_crash_data *crash_data)
810 u32 i, reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
813 lockdep_assert_held(&ar->data_lock);
815 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
817 REG_DUMP_COUNT_QCA988X * sizeof(u32));
819 ath10k_err("failed to read firmware dump area: %d\n", ret);
823 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
825 ath10k_err("firmware register dump:\n");
826 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
827 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
830 reg_dump_values[i + 1],
831 reg_dump_values[i + 2],
832 reg_dump_values[i + 3]);
834 /* crash_data is in little endian */
835 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
836 crash_data->registers[i] = cpu_to_le32(reg_dump_values[i]);
839 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
841 struct ath10k_fw_crash_data *crash_data;
844 spin_lock_bh(&ar->data_lock);
846 crash_data = ath10k_debug_get_new_fw_crash_data(ar);
849 scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
851 scnprintf(uuid, sizeof(uuid), "n/a");
853 ath10k_err("firmware crashed! (uuid %s)\n", uuid);
854 ath10k_print_driver_info(ar);
859 ath10k_pci_dump_registers(ar, crash_data);
862 spin_unlock_bh(&ar->data_lock);
864 queue_work(ar->workqueue, &ar->restart_work);
867 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
870 ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
875 * Decide whether to actually poll for completions, or just
876 * wait for a later chance.
877 * If there seem to be plenty of resources left, then just wait
878 * since checking involves reading a CE register, which is a
879 * relatively expensive operation.
881 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
884 * If at least 50% of the total resources are still available,
885 * don't bother checking again yet.
887 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
890 ath10k_ce_per_engine_service(ar, pipe);
893 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
894 struct ath10k_hif_cb *callbacks)
896 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
898 ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
900 memcpy(&ar_pci->msg_callbacks_current, callbacks,
901 sizeof(ar_pci->msg_callbacks_current));
904 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
906 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
909 tasklet_kill(&ar_pci->intr_tq);
910 tasklet_kill(&ar_pci->msi_fw_err);
912 for (i = 0; i < CE_COUNT; i++)
913 tasklet_kill(&ar_pci->pipe_info[i].intr);
916 /* TODO - temporary mapping while we have too few CE's */
917 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
918 u16 service_id, u8 *ul_pipe,
919 u8 *dl_pipe, int *ul_is_polled,
924 ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
926 /* polling for received messages not supported */
929 switch (service_id) {
930 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
932 * Host->target HTT gets its own pipe, so it can be polled
933 * while other pipes are interrupt driven.
937 * Use the same target->host pipe for HTC ctrl, HTC raw
943 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
944 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
946 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
947 * HTC_CTRL_RSVD_SVC could share the same pipe as the
948 * WMI services. So, if another CE is needed, change
949 * this to *ul_pipe = 3, which frees up CE 0.
956 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
957 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
958 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
959 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
961 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
967 /* pipe 6 reserved */
968 /* pipe 7 reserved */
975 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
980 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
981 u8 *ul_pipe, u8 *dl_pipe)
983 int ul_is_polled, dl_is_polled;
985 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
987 (void)ath10k_pci_hif_map_service_to_pipe(ar,
988 ATH10K_HTC_SVC_ID_RSVD_CTRL,
995 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
998 struct ath10k *ar = pipe_info->hif_ce_state;
999 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1000 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1001 struct sk_buff *skb;
1005 if (pipe_info->buf_sz == 0)
1008 for (i = 0; i < num; i++) {
1009 skb = dev_alloc_skb(pipe_info->buf_sz);
1011 ath10k_warn("failed to allocate skbuff for pipe %d\n",
1017 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1019 ce_data = dma_map_single(ar->dev, skb->data,
1020 skb->len + skb_tailroom(skb),
1023 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1024 ath10k_warn("failed to DMA map sk_buff\n");
1025 dev_kfree_skb_any(skb);
1030 ATH10K_SKB_CB(skb)->paddr = ce_data;
1032 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1034 PCI_DMA_FROMDEVICE);
1036 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1039 ath10k_warn("failed to enqueue to pipe %d: %d\n",
1048 ath10k_pci_rx_pipe_cleanup(pipe_info);
1052 static int ath10k_pci_post_rx(struct ath10k *ar)
1054 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1055 struct ath10k_pci_pipe *pipe_info;
1056 const struct ce_attr *attr;
1057 int pipe_num, ret = 0;
1059 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1060 pipe_info = &ar_pci->pipe_info[pipe_num];
1061 attr = &host_ce_config_wlan[pipe_num];
1063 if (attr->dest_nentries == 0)
1066 ret = ath10k_pci_post_rx_pipe(pipe_info,
1067 attr->dest_nentries - 1);
1069 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1072 for (; pipe_num >= 0; pipe_num--) {
1073 pipe_info = &ar_pci->pipe_info[pipe_num];
1074 ath10k_pci_rx_pipe_cleanup(pipe_info);
1083 static void ath10k_pci_irq_disable(struct ath10k *ar)
1085 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1088 ath10k_ce_disable_interrupts(ar);
1090 /* Regardless how many interrupts were assigned for MSI the first one
1091 * is always used for firmware indications (crashes). There's no way to
1092 * mask the irq in the device so call disable_irq(). Legacy (shared)
1093 * interrupts can be masked on the device though.
1095 if (ar_pci->num_msi_intrs > 0)
1096 disable_irq(ar_pci->pdev->irq);
1098 ath10k_pci_disable_and_clear_legacy_irq(ar);
1100 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1101 synchronize_irq(ar_pci->pdev->irq + i);
1104 static void ath10k_pci_irq_enable(struct ath10k *ar)
1106 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1108 ath10k_ce_enable_interrupts(ar);
1110 /* See comment in ath10k_pci_irq_disable() */
1111 if (ar_pci->num_msi_intrs > 0)
1112 enable_irq(ar_pci->pdev->irq);
1114 ath10k_pci_enable_legacy_irq(ar);
1117 static int ath10k_pci_hif_start(struct ath10k *ar)
1119 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1122 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
1124 ath10k_pci_irq_enable(ar);
1126 /* Post buffers once to start things off. */
1127 ret = ath10k_pci_post_rx(ar);
1129 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1134 ar_pci->started = 1;
1138 ath10k_pci_irq_disable(ar);
1139 ath10k_pci_kill_tasklet(ar);
1144 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1147 struct ath10k_pci *ar_pci;
1148 struct ath10k_ce_pipe *ce_hdl;
1150 struct sk_buff *netbuf;
1153 buf_sz = pipe_info->buf_sz;
1155 /* Unused Copy Engine */
1159 ar = pipe_info->hif_ce_state;
1160 ar_pci = ath10k_pci_priv(ar);
1162 if (!ar_pci->started)
1165 ce_hdl = pipe_info->ce_hdl;
1167 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1169 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1170 netbuf->len + skb_tailroom(netbuf),
1172 dev_kfree_skb_any(netbuf);
1176 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1179 struct ath10k_pci *ar_pci;
1180 struct ath10k_ce_pipe *ce_hdl;
1181 struct sk_buff *netbuf;
1183 unsigned int nbytes;
1187 buf_sz = pipe_info->buf_sz;
1189 /* Unused Copy Engine */
1193 ar = pipe_info->hif_ce_state;
1194 ar_pci = ath10k_pci_priv(ar);
1196 if (!ar_pci->started)
1199 ce_hdl = pipe_info->ce_hdl;
1201 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1202 &ce_data, &nbytes, &id) == 0) {
1203 /* no need to call tx completion for NULL pointers */
1207 ar_pci->msg_callbacks_current.tx_completion(ar,
1214 * Cleanup residual buffers for device shutdown:
1215 * buffers that were enqueued for receive
1216 * buffers that were to be sent
1217 * Note: Buffers that had completed but which were
1218 * not yet processed are on a completion queue. They
1219 * are handled when the completion thread shuts down.
1221 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1223 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1226 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1227 struct ath10k_pci_pipe *pipe_info;
1229 pipe_info = &ar_pci->pipe_info[pipe_num];
1230 ath10k_pci_rx_pipe_cleanup(pipe_info);
1231 ath10k_pci_tx_pipe_cleanup(pipe_info);
1235 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1239 for (i = 0; i < CE_COUNT; i++)
1240 ath10k_ce_deinit_pipe(ar, i);
1243 static void ath10k_pci_hif_stop(struct ath10k *ar)
1245 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1247 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
1249 if (WARN_ON(!ar_pci->started))
1252 ath10k_pci_irq_disable(ar);
1253 ath10k_pci_kill_tasklet(ar);
1254 ath10k_pci_buffer_cleanup(ar);
1256 /* Make the sure the device won't access any structures on the host by
1257 * resetting it. The device was fed with PCI CE ringbuffer
1258 * configuration during init. If ringbuffers are freed and the device
1259 * were to access them this could lead to memory corruption on the
1261 ath10k_pci_warm_reset(ar);
1263 ar_pci->started = 0;
1266 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1267 void *req, u32 req_len,
1268 void *resp, u32 *resp_len)
1270 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1271 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1272 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1273 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1274 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1275 dma_addr_t req_paddr = 0;
1276 dma_addr_t resp_paddr = 0;
1277 struct bmi_xfer xfer = {};
1278 void *treq, *tresp = NULL;
1283 if (resp && !resp_len)
1286 if (resp && resp_len && *resp_len == 0)
1289 treq = kmemdup(req, req_len, GFP_KERNEL);
1293 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1294 ret = dma_mapping_error(ar->dev, req_paddr);
1298 if (resp && resp_len) {
1299 tresp = kzalloc(*resp_len, GFP_KERNEL);
1305 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1307 ret = dma_mapping_error(ar->dev, resp_paddr);
1311 xfer.wait_for_resp = true;
1314 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1317 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1321 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1324 unsigned int unused_nbytes;
1325 unsigned int unused_id;
1327 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1328 &unused_nbytes, &unused_id);
1330 /* non-zero means we did not time out */
1338 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1339 dma_unmap_single(ar->dev, resp_paddr,
1340 *resp_len, DMA_FROM_DEVICE);
1343 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1345 if (ret == 0 && resp_len) {
1346 *resp_len = min(*resp_len, xfer.resp_len);
1347 memcpy(resp, tresp, xfer.resp_len);
1356 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1358 struct bmi_xfer *xfer;
1360 unsigned int nbytes;
1361 unsigned int transfer_id;
1363 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1364 &nbytes, &transfer_id))
1367 xfer->tx_done = true;
1370 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1372 struct bmi_xfer *xfer;
1374 unsigned int nbytes;
1375 unsigned int transfer_id;
1378 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1379 &nbytes, &transfer_id, &flags))
1382 if (!xfer->wait_for_resp) {
1383 ath10k_warn("unexpected: BMI data received; ignoring\n");
1387 xfer->resp_len = nbytes;
1388 xfer->rx_done = true;
1391 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1392 struct ath10k_ce_pipe *rx_pipe,
1393 struct bmi_xfer *xfer)
1395 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1397 while (time_before_eq(jiffies, timeout)) {
1398 ath10k_pci_bmi_send_done(tx_pipe);
1399 ath10k_pci_bmi_recv_data(rx_pipe);
1401 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
1411 * Map from service/endpoint to Copy Engine.
1412 * This table is derived from the CE_PCI TABLE, above.
1413 * It is passed to the Target at startup for use by firmware.
1415 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1417 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1418 PIPEDIR_OUT, /* out = UL = host -> target */
1422 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1423 PIPEDIR_IN, /* in = DL = target -> host */
1427 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1428 PIPEDIR_OUT, /* out = UL = host -> target */
1432 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1433 PIPEDIR_IN, /* in = DL = target -> host */
1437 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1438 PIPEDIR_OUT, /* out = UL = host -> target */
1442 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1443 PIPEDIR_IN, /* in = DL = target -> host */
1447 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1448 PIPEDIR_OUT, /* out = UL = host -> target */
1452 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1453 PIPEDIR_IN, /* in = DL = target -> host */
1457 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1458 PIPEDIR_OUT, /* out = UL = host -> target */
1462 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1463 PIPEDIR_IN, /* in = DL = target -> host */
1467 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1468 PIPEDIR_OUT, /* out = UL = host -> target */
1469 0, /* could be moved to 3 (share with WMI) */
1472 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1473 PIPEDIR_IN, /* in = DL = target -> host */
1477 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1478 PIPEDIR_OUT, /* out = UL = host -> target */
1482 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1483 PIPEDIR_IN, /* in = DL = target -> host */
1487 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1488 PIPEDIR_OUT, /* out = UL = host -> target */
1492 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1493 PIPEDIR_IN, /* in = DL = target -> host */
1497 /* (Additions here) */
1499 { /* Must be last */
1507 * Send an interrupt to the device to wake up the Target CPU
1508 * so it has an opportunity to notice any changed state.
1510 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1515 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1519 ath10k_warn("failed to read core_ctrl: %d\n", ret);
1523 /* A_INUM_FIRMWARE interrupt to Target CPU */
1524 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1526 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1530 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1538 static int ath10k_pci_init_config(struct ath10k *ar)
1540 u32 interconnect_targ_addr;
1541 u32 pcie_state_targ_addr = 0;
1542 u32 pipe_cfg_targ_addr = 0;
1543 u32 svc_to_pipe_map = 0;
1544 u32 pcie_config_flags = 0;
1546 u32 ealloc_targ_addr;
1548 u32 flag2_targ_addr;
1551 /* Download to Target the CE Config and the service-to-CE map */
1552 interconnect_targ_addr =
1553 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1555 /* Supply Target-side CE configuration */
1556 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1557 &pcie_state_targ_addr);
1559 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1563 if (pcie_state_targ_addr == 0) {
1565 ath10k_err("Invalid pcie state addr\n");
1569 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1570 offsetof(struct pcie_state,
1572 &pipe_cfg_targ_addr);
1574 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1578 if (pipe_cfg_targ_addr == 0) {
1580 ath10k_err("Invalid pipe cfg addr\n");
1584 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1585 target_ce_config_wlan,
1586 sizeof(target_ce_config_wlan));
1589 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1593 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1594 offsetof(struct pcie_state,
1598 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1602 if (svc_to_pipe_map == 0) {
1604 ath10k_err("Invalid svc_to_pipe map\n");
1608 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1609 target_service_to_ce_map_wlan,
1610 sizeof(target_service_to_ce_map_wlan));
1612 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1616 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1617 offsetof(struct pcie_state,
1619 &pcie_config_flags);
1621 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1625 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1627 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1628 offsetof(struct pcie_state, config_flags),
1630 sizeof(pcie_config_flags));
1632 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1636 /* configure early allocation */
1637 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1639 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1641 ath10k_err("Faile to get early alloc val: %d\n", ret);
1645 /* first bank is switched to IRAM */
1646 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1647 HI_EARLY_ALLOC_MAGIC_MASK);
1648 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1649 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1651 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1653 ath10k_err("Failed to set early alloc val: %d\n", ret);
1657 /* Tell Target to proceed with initialization */
1658 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1660 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1662 ath10k_err("Failed to get option val: %d\n", ret);
1666 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1668 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1670 ath10k_err("Failed to set option val: %d\n", ret);
1677 static int ath10k_pci_alloc_ce(struct ath10k *ar)
1681 for (i = 0; i < CE_COUNT; i++) {
1682 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1684 ath10k_err("failed to allocate copy engine pipe %d: %d\n",
1693 static void ath10k_pci_free_ce(struct ath10k *ar)
1697 for (i = 0; i < CE_COUNT; i++)
1698 ath10k_ce_free_pipe(ar, i);
1701 static int ath10k_pci_ce_init(struct ath10k *ar)
1703 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1704 struct ath10k_pci_pipe *pipe_info;
1705 const struct ce_attr *attr;
1708 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1709 pipe_info = &ar_pci->pipe_info[pipe_num];
1710 pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
1711 pipe_info->pipe_num = pipe_num;
1712 pipe_info->hif_ce_state = ar;
1713 attr = &host_ce_config_wlan[pipe_num];
1715 ret = ath10k_ce_init_pipe(ar, pipe_num, attr,
1716 ath10k_pci_ce_send_done,
1717 ath10k_pci_ce_recv_data);
1719 ath10k_err("failed to initialize copy engine pipe %d: %d\n",
1724 if (pipe_num == CE_COUNT - 1) {
1726 * Reserve the ultimate CE for
1727 * diagnostic Window support
1729 ar_pci->ce_diag = pipe_info->ce_hdl;
1733 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1739 static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
1741 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
1742 FW_IND_EVENT_PENDING;
1745 static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
1749 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1750 val &= ~FW_IND_EVENT_PENDING;
1751 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
1754 /* this function effectively clears target memory controller assert line */
1755 static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1759 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1760 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1761 val | SOC_RESET_CONTROL_SI0_RST_MASK);
1762 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1766 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1767 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1768 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1769 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1774 static int ath10k_pci_warm_reset(struct ath10k *ar)
1778 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
1781 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1782 PCIE_INTR_CAUSE_ADDRESS);
1783 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1785 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1787 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1790 /* disable pending irqs */
1791 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1792 PCIE_INTR_ENABLE_ADDRESS, 0);
1794 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1795 PCIE_INTR_CLR_ADDRESS, ~0);
1799 /* clear fw indicator */
1800 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1802 /* clear target LF timer interrupts */
1803 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1804 SOC_LF_TIMER_CONTROL0_ADDRESS);
1805 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1806 SOC_LF_TIMER_CONTROL0_ADDRESS,
1807 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1810 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1811 SOC_RESET_CONTROL_ADDRESS);
1812 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1813 val | SOC_RESET_CONTROL_CE_RST_MASK);
1814 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1815 SOC_RESET_CONTROL_ADDRESS);
1819 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1820 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1821 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1822 SOC_RESET_CONTROL_ADDRESS);
1825 ath10k_pci_warm_reset_si0(ar);
1828 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1829 PCIE_INTR_CAUSE_ADDRESS);
1830 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1832 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1834 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1837 /* CPU warm reset */
1838 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1839 SOC_RESET_CONTROL_ADDRESS);
1840 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1841 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1843 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1844 SOC_RESET_CONTROL_ADDRESS);
1845 ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
1849 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
1854 static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1859 * Bring the target up cleanly.
1861 * The target may be in an undefined state with an AUX-powered Target
1862 * and a Host in WoW mode. If the Host crashes, loses power, or is
1863 * restarted (without unloading the driver) then the Target is left
1864 * (aux) powered and running. On a subsequent driver load, the Target
1865 * is in an unexpected state. We try to catch that here in order to
1866 * reset the Target and retry the probe.
1869 ret = ath10k_pci_cold_reset(ar);
1871 ret = ath10k_pci_warm_reset(ar);
1874 ath10k_err("failed to reset target: %d\n", ret);
1878 ret = ath10k_pci_ce_init(ar);
1880 ath10k_err("failed to initialize CE: %d\n", ret);
1884 ret = ath10k_pci_wait_for_target_init(ar);
1886 ath10k_err("failed to wait for target to init: %d\n", ret);
1890 ret = ath10k_pci_init_config(ar);
1892 ath10k_err("failed to setup init config: %d\n", ret);
1896 ret = ath10k_pci_wake_target_cpu(ar);
1898 ath10k_err("could not wake up target CPU: %d\n", ret);
1905 ath10k_pci_ce_deinit(ar);
1906 ath10k_pci_warm_reset(ar);
1911 static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
1916 * Sometime warm reset succeeds after retries.
1918 * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
1921 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
1922 ret = __ath10k_pci_hif_power_up(ar, false);
1926 ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n",
1927 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
1933 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1937 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
1940 * Hardware CUS232 version 2 has some issues with cold reset and the
1941 * preferred (and safer) way to perform a device reset is through a
1944 * Warm reset doesn't always work though so fall back to cold reset may
1947 ret = ath10k_pci_hif_power_up_warm(ar);
1949 ath10k_warn("failed to power up target using warm reset: %d\n",
1952 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
1955 ath10k_warn("trying cold reset\n");
1957 ret = __ath10k_pci_hif_power_up(ar, true);
1959 ath10k_err("failed to power up target using cold reset too (%d)\n",
1968 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1970 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
1972 ath10k_pci_warm_reset(ar);
1977 #define ATH10K_PCI_PM_CONTROL 0x44
1979 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1981 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1982 struct pci_dev *pdev = ar_pci->pdev;
1985 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1987 if ((val & 0x000000ff) != 0x3) {
1988 pci_save_state(pdev);
1989 pci_disable_device(pdev);
1990 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1991 (val & 0xffffff00) | 0x03);
1997 static int ath10k_pci_hif_resume(struct ath10k *ar)
1999 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2000 struct pci_dev *pdev = ar_pci->pdev;
2003 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2005 if ((val & 0x000000ff) != 0) {
2006 pci_restore_state(pdev);
2007 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2010 * Suspend/Resume resets the PCI configuration space,
2011 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2012 * to keep PCI Tx retries from interfering with C3 CPU state
2014 pci_read_config_dword(pdev, 0x40, &val);
2016 if ((val & 0x0000ff00) != 0)
2017 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2024 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2025 .tx_sg = ath10k_pci_hif_tx_sg,
2026 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2027 .start = ath10k_pci_hif_start,
2028 .stop = ath10k_pci_hif_stop,
2029 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2030 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2031 .send_complete_check = ath10k_pci_hif_send_complete_check,
2032 .set_callbacks = ath10k_pci_hif_set_callbacks,
2033 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
2034 .power_up = ath10k_pci_hif_power_up,
2035 .power_down = ath10k_pci_hif_power_down,
2037 .suspend = ath10k_pci_hif_suspend,
2038 .resume = ath10k_pci_hif_resume,
2042 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2044 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2045 struct ath10k_pci *ar_pci = pipe->ar_pci;
2047 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2050 static void ath10k_msi_err_tasklet(unsigned long data)
2052 struct ath10k *ar = (struct ath10k *)data;
2054 if (!ath10k_pci_has_fw_crashed(ar)) {
2055 ath10k_warn("received unsolicited fw crash interrupt\n");
2059 ath10k_pci_fw_crashed_clear(ar);
2060 ath10k_pci_fw_crashed_dump(ar);
2064 * Handler for a per-engine interrupt on a PARTICULAR CE.
2065 * This is used in cases where each CE has a private MSI interrupt.
2067 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2069 struct ath10k *ar = arg;
2070 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2071 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2073 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2074 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2079 * NOTE: We are able to derive ce_id from irq because we
2080 * use a one-to-one mapping for CE's 0..5.
2081 * CE's 6 & 7 do not use interrupts at all.
2083 * This mapping must be kept in sync with the mapping
2086 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2090 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2092 struct ath10k *ar = arg;
2093 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2095 tasklet_schedule(&ar_pci->msi_fw_err);
2100 * Top-level interrupt handler for all PCI interrupts from a Target.
2101 * When a block of MSI interrupts is allocated, this top-level handler
2102 * is not used; instead, we directly call the correct sub-handler.
2104 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2106 struct ath10k *ar = arg;
2107 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2109 if (ar_pci->num_msi_intrs == 0) {
2110 if (!ath10k_pci_irq_pending(ar))
2113 ath10k_pci_disable_and_clear_legacy_irq(ar);
2116 tasklet_schedule(&ar_pci->intr_tq);
2121 static void ath10k_pci_tasklet(unsigned long data)
2123 struct ath10k *ar = (struct ath10k *)data;
2124 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2126 if (ath10k_pci_has_fw_crashed(ar)) {
2127 ath10k_pci_fw_crashed_clear(ar);
2128 ath10k_pci_fw_crashed_dump(ar);
2132 ath10k_ce_per_engine_service_any(ar);
2134 /* Re-enable legacy irq that was disabled in the irq handler */
2135 if (ar_pci->num_msi_intrs == 0)
2136 ath10k_pci_enable_legacy_irq(ar);
2139 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2141 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2144 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2145 ath10k_pci_msi_fw_handler,
2146 IRQF_SHARED, "ath10k_pci", ar);
2148 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2149 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2153 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2154 ret = request_irq(ar_pci->pdev->irq + i,
2155 ath10k_pci_per_engine_handler,
2156 IRQF_SHARED, "ath10k_pci", ar);
2158 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2159 ar_pci->pdev->irq + i, ret);
2161 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2162 free_irq(ar_pci->pdev->irq + i, ar);
2164 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2172 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2174 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2177 ret = request_irq(ar_pci->pdev->irq,
2178 ath10k_pci_interrupt_handler,
2179 IRQF_SHARED, "ath10k_pci", ar);
2181 ath10k_warn("failed to request MSI irq %d: %d\n",
2182 ar_pci->pdev->irq, ret);
2189 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2191 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2194 ret = request_irq(ar_pci->pdev->irq,
2195 ath10k_pci_interrupt_handler,
2196 IRQF_SHARED, "ath10k_pci", ar);
2198 ath10k_warn("failed to request legacy irq %d: %d\n",
2199 ar_pci->pdev->irq, ret);
2206 static int ath10k_pci_request_irq(struct ath10k *ar)
2208 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2210 switch (ar_pci->num_msi_intrs) {
2212 return ath10k_pci_request_irq_legacy(ar);
2214 return ath10k_pci_request_irq_msi(ar);
2215 case MSI_NUM_REQUEST:
2216 return ath10k_pci_request_irq_msix(ar);
2219 ath10k_warn("unknown irq configuration upon request\n");
2223 static void ath10k_pci_free_irq(struct ath10k *ar)
2225 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2228 /* There's at least one interrupt irregardless whether its legacy INTR
2229 * or MSI or MSI-X */
2230 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2231 free_irq(ar_pci->pdev->irq + i, ar);
2234 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2236 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2239 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2240 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2243 for (i = 0; i < CE_COUNT; i++) {
2244 ar_pci->pipe_info[i].ar_pci = ar_pci;
2245 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2246 (unsigned long)&ar_pci->pipe_info[i]);
2250 static int ath10k_pci_init_irq(struct ath10k *ar)
2252 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2255 ath10k_pci_init_irq_tasklets(ar);
2257 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
2258 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
2261 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
2262 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2263 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2264 ar_pci->num_msi_intrs);
2272 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2273 ar_pci->num_msi_intrs = 1;
2274 ret = pci_enable_msi(ar_pci->pdev);
2283 * A potential race occurs here: The CORE_BASE write
2284 * depends on target correctly decoding AXI address but
2285 * host won't know when target writes BAR to CORE_CTRL.
2286 * This write might get lost if target has NOT written BAR.
2287 * For now, fix the race by repeating the write in below
2288 * synchronization checking. */
2289 ar_pci->num_msi_intrs = 0;
2291 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2292 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2297 static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2299 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2303 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2305 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2307 switch (ar_pci->num_msi_intrs) {
2309 ath10k_pci_deinit_irq_legacy(ar);
2313 case MSI_NUM_REQUEST:
2314 pci_disable_msi(ar_pci->pdev);
2317 pci_disable_msi(ar_pci->pdev);
2320 ath10k_warn("unknown irq configuration upon deinit\n");
2324 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2326 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2327 unsigned long timeout;
2330 ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2332 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2335 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2337 ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
2339 /* target should never return this */
2340 if (val == 0xffffffff)
2343 /* the device has crashed so don't bother trying anymore */
2344 if (val & FW_IND_EVENT_PENDING)
2347 if (val & FW_IND_INITIALIZED)
2350 if (ar_pci->num_msi_intrs == 0)
2351 /* Fix potential race by repeating CORE_BASE writes */
2352 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
2353 PCIE_INTR_ENABLE_ADDRESS,
2354 PCIE_INTR_FIRMWARE_MASK |
2355 PCIE_INTR_CE_MASK_ALL);
2358 } while (time_before(jiffies, timeout));
2360 if (val == 0xffffffff) {
2361 ath10k_err("failed to read device register, device is gone\n");
2365 if (val & FW_IND_EVENT_PENDING) {
2366 ath10k_warn("device has crashed during init\n");
2367 ath10k_pci_fw_crashed_clear(ar);
2368 ath10k_pci_fw_crashed_dump(ar);
2372 if (!(val & FW_IND_INITIALIZED)) {
2373 ath10k_err("failed to receive initialized event from target: %08x\n",
2378 ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
2382 static int ath10k_pci_cold_reset(struct ath10k *ar)
2387 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
2389 /* Put Target, including PCIe, into RESET. */
2390 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2392 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2394 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2395 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2396 RTC_STATE_COLD_RESET_MASK)
2401 /* Pull Target, including PCIe, out of RESET. */
2403 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2405 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2406 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2407 RTC_STATE_COLD_RESET_MASK))
2412 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
2417 static int ath10k_pci_claim(struct ath10k *ar)
2419 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2420 struct pci_dev *pdev = ar_pci->pdev;
2424 pci_set_drvdata(pdev, ar);
2426 ret = pci_enable_device(pdev);
2428 ath10k_err("failed to enable pci device: %d\n", ret);
2432 ret = pci_request_region(pdev, BAR_NUM, "ath");
2434 ath10k_err("failed to request region BAR%d: %d\n", BAR_NUM,
2439 /* Target expects 32 bit DMA. Enforce it. */
2440 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2442 ath10k_err("failed to set dma mask to 32-bit: %d\n", ret);
2446 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2448 ath10k_err("failed to set consistent dma mask to 32-bit: %d\n",
2453 pci_set_master(pdev);
2455 /* Workaround: Disable ASPM */
2456 pci_read_config_dword(pdev, 0x80, &lcr_val);
2457 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2459 /* Arrange for access to Target SoC registers. */
2460 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2462 ath10k_err("failed to iomap BAR%d\n", BAR_NUM);
2467 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2471 pci_clear_master(pdev);
2474 pci_release_region(pdev, BAR_NUM);
2477 pci_disable_device(pdev);
2482 static void ath10k_pci_release(struct ath10k *ar)
2484 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2485 struct pci_dev *pdev = ar_pci->pdev;
2487 pci_iounmap(pdev, ar_pci->mem);
2488 pci_release_region(pdev, BAR_NUM);
2489 pci_clear_master(pdev);
2490 pci_disable_device(pdev);
2493 static int ath10k_pci_probe(struct pci_dev *pdev,
2494 const struct pci_device_id *pci_dev)
2498 struct ath10k_pci *ar_pci;
2501 ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
2503 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
2504 &ath10k_pci_hif_ops);
2506 ath10k_err("failed to allocate core\n");
2510 ar_pci = ath10k_pci_priv(ar);
2511 ar_pci->pdev = pdev;
2512 ar_pci->dev = &pdev->dev;
2515 spin_lock_init(&ar_pci->ce_lock);
2517 ret = ath10k_pci_claim(ar);
2519 ath10k_err("failed to claim device: %d\n", ret);
2520 goto err_core_destroy;
2523 ret = ath10k_pci_wake(ar);
2525 ath10k_err("failed to wake up: %d\n", ret);
2529 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2530 if (chip_id == 0xffffffff) {
2531 ath10k_err("failed to get chip id\n");
2535 ret = ath10k_pci_alloc_ce(ar);
2537 ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
2541 ath10k_pci_ce_deinit(ar);
2543 ret = ath10k_ce_disable_interrupts(ar);
2545 ath10k_err("failed to disable copy engine interrupts: %d\n",
2550 /* Workaround: There's no known way to mask all possible interrupts via
2551 * device CSR. The only way to make sure device doesn't assert
2552 * interrupts is to reset it. Interrupts are then disabled on host
2553 * after handlers are registered.
2555 ath10k_pci_warm_reset(ar);
2557 ret = ath10k_pci_init_irq(ar);
2559 ath10k_err("failed to init irqs: %d\n", ret);
2563 ath10k_info("pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
2564 ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
2565 ath10k_pci_irq_mode, ath10k_pci_reset_mode);
2567 ret = ath10k_pci_request_irq(ar);
2569 ath10k_warn("failed to request irqs: %d\n", ret);
2570 goto err_deinit_irq;
2573 /* This shouldn't race as the device has been reset above. */
2574 ath10k_pci_irq_disable(ar);
2576 ret = ath10k_core_register(ar, chip_id);
2578 ath10k_err("failed to register driver core: %d\n", ret);
2585 ath10k_pci_free_irq(ar);
2588 ath10k_pci_deinit_irq(ar);
2591 ath10k_pci_free_ce(ar);
2594 ath10k_pci_sleep(ar);
2597 ath10k_pci_release(ar);
2600 ath10k_core_destroy(ar);
2605 static void ath10k_pci_remove(struct pci_dev *pdev)
2607 struct ath10k *ar = pci_get_drvdata(pdev);
2608 struct ath10k_pci *ar_pci;
2610 ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
2615 ar_pci = ath10k_pci_priv(ar);
2620 ath10k_core_unregister(ar);
2621 ath10k_pci_free_irq(ar);
2622 ath10k_pci_deinit_irq(ar);
2623 ath10k_pci_ce_deinit(ar);
2624 ath10k_pci_free_ce(ar);
2625 ath10k_pci_sleep(ar);
2626 ath10k_pci_release(ar);
2627 ath10k_core_destroy(ar);
2630 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2632 static struct pci_driver ath10k_pci_driver = {
2633 .name = "ath10k_pci",
2634 .id_table = ath10k_pci_id_table,
2635 .probe = ath10k_pci_probe,
2636 .remove = ath10k_pci_remove,
2639 static int __init ath10k_pci_init(void)
2643 ret = pci_register_driver(&ath10k_pci_driver);
2645 ath10k_err("failed to register PCI driver: %d\n", ret);
2649 module_init(ath10k_pci_init);
2651 static void __exit ath10k_pci_exit(void)
2653 pci_unregister_driver(&ath10k_pci_driver);
2656 module_exit(ath10k_pci_exit);
2658 MODULE_AUTHOR("Qualcomm Atheros");
2659 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2660 MODULE_LICENSE("Dual BSD/GPL");
2661 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_3_FILE);
2662 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);