1 /******************************************************************************
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/etherdevice.h>
30 #include <linux/slab.h>
31 #include <linux/sched.h>
33 #include "iwl-debug.h"
37 #include "iwl-agn-hw.h"
38 #include "iwl-trans-pcie-int.h"
40 #define IWL_TX_CRC_SIZE 4
41 #define IWL_TX_DELIMITER_SIZE 4
44 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
46 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
47 struct iwl_tx_queue *txq,
50 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
51 struct iwl_trans_pcie *trans_pcie =
52 IWL_TRANS_GET_PCIE_TRANS(trans);
53 int write_ptr = txq->q.write_ptr;
54 int txq_id = txq->q.id;
57 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
59 struct iwl_tx_cmd *tx_cmd =
60 (struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload;
62 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
64 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
66 sta_id = tx_cmd->sta_id;
67 sec_ctl = tx_cmd->sec_ctl;
69 switch (sec_ctl & TX_CMD_SEC_MSK) {
77 len += WEP_IV_LEN + WEP_ICV_LEN;
81 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
83 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
85 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
87 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
91 * iwl_txq_update_write_ptr - Send new write index to hardware
93 void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
96 int txq_id = txq->q.id;
98 if (txq->need_update == 0)
101 if (hw_params(trans).shadow_reg_enable) {
102 /* shadow register enabled */
103 iwl_write32(trans, HBUS_TARG_WRPTR,
104 txq->q.write_ptr | (txq_id << 8));
106 /* if we're trying to save power */
107 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
108 /* wake up nic if it's powered down ...
109 * uCode will wake up, and interrupt us again, so next
110 * time we'll skip this part. */
111 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
113 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
114 IWL_DEBUG_INFO(trans,
115 "Tx queue %d requesting wakeup,"
116 " GP1 = 0x%x\n", txq_id, reg);
117 iwl_set_bit(trans, CSR_GP_CNTRL,
118 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
122 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
123 txq->q.write_ptr | (txq_id << 8));
126 * else not in power-save mode,
127 * uCode will never sleep when we're
128 * trying to tx (during RFKILL, we're not trying to tx).
131 iwl_write32(trans, HBUS_TARG_WRPTR,
132 txq->q.write_ptr | (txq_id << 8));
134 txq->need_update = 0;
137 static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
139 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
141 dma_addr_t addr = get_unaligned_le32(&tb->lo);
142 if (sizeof(dma_addr_t) > sizeof(u32))
144 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
149 static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
151 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
153 return le16_to_cpu(tb->hi_n_len) >> 4;
156 static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
157 dma_addr_t addr, u16 len)
159 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
160 u16 hi_n_len = len << 4;
162 put_unaligned_le32(addr, &tb->lo);
163 if (sizeof(dma_addr_t) > sizeof(u32))
164 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
166 tb->hi_n_len = cpu_to_le16(hi_n_len);
168 tfd->num_tbs = idx + 1;
171 static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
173 return tfd->num_tbs & 0x1f;
176 static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
177 struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
182 /* Sanity check on number of chunks */
183 num_tbs = iwl_tfd_get_num_tbs(tfd);
185 if (num_tbs >= IWL_NUM_OF_TBS) {
186 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
187 /* @todo issue fatal error, it is quite serious situation */
193 dma_unmap_single(trans->dev,
194 dma_unmap_addr(meta, mapping),
195 dma_unmap_len(meta, len),
198 /* Unmap chunks, if any. */
199 for (i = 1; i < num_tbs; i++)
200 dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
201 iwl_tfd_tb_get_len(tfd, i), dma_dir);
205 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
206 * @trans - transport private data
208 * @index - the index of the TFD to be freed
209 *@dma_dir - the direction of the DMA mapping
211 * Does NOT advance any TFD circular buffer read/write indexes
212 * Does NOT free the TFD itself (which is within circular buffer)
214 void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
215 int index, enum dma_data_direction dma_dir)
217 struct iwl_tfd *tfd_tmp = txq->tfds;
219 iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
225 skb = txq->skbs[index];
227 /* Can be called from irqs-disabled context
228 * If skb is not NULL, it means that the whole queue is being
229 * freed and that the queue is not empty - free the skb
232 iwl_free_skb(priv(trans), skb);
233 txq->skbs[index] = NULL;
238 int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
239 struct iwl_tx_queue *txq,
240 dma_addr_t addr, u16 len,
244 struct iwl_tfd *tfd, *tfd_tmp;
249 tfd = &tfd_tmp[q->write_ptr];
252 memset(tfd, 0, sizeof(*tfd));
254 num_tbs = iwl_tfd_get_num_tbs(tfd);
256 /* Each TFD can point to a maximum 20 Tx buffers */
257 if (num_tbs >= IWL_NUM_OF_TBS) {
258 IWL_ERR(trans, "Error can not send more than %d chunks\n",
263 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
266 if (unlikely(addr & ~IWL_TX_DMA_MASK))
267 IWL_ERR(trans, "Unaligned address = %llx\n",
268 (unsigned long long)addr);
270 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
275 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
278 * Theory of operation
280 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
281 * of buffer descriptors, each of which points to one or more data buffers for
282 * the device to read from or fill. Driver and device exchange status of each
283 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
284 * entries in each circular buffer, to protect against confusing empty and full
287 * The device reads or writes the data in the queues via the device's several
288 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
290 * For Tx queue, there are low mark and high mark limits. If, after queuing
291 * the packet for Tx, free space become < low mark, Tx queue stopped. When
292 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
295 ***************************************************/
297 int iwl_queue_space(const struct iwl_queue *q)
299 int s = q->read_ptr - q->write_ptr;
301 if (q->read_ptr > q->write_ptr)
306 /* keep some reserve to not confuse empty and full situations */
314 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
316 int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
319 q->n_window = slots_num;
322 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
323 * and iwl_queue_dec_wrap are broken. */
324 if (WARN_ON(!is_power_of_2(count)))
327 /* slots_num must be power-of-two size, otherwise
328 * get_cmd_index is broken. */
329 if (WARN_ON(!is_power_of_2(slots_num)))
332 q->low_mark = q->n_window / 4;
336 q->high_mark = q->n_window / 8;
337 if (q->high_mark < 2)
340 q->write_ptr = q->read_ptr = 0;
345 static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
346 struct iwl_tx_queue *txq)
348 struct iwl_trans_pcie *trans_pcie =
349 IWL_TRANS_GET_PCIE_TRANS(trans);
350 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
351 int txq_id = txq->q.id;
352 int read_ptr = txq->q.read_ptr;
355 struct iwl_tx_cmd *tx_cmd =
356 (struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload;
358 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
360 if (txq_id != trans->shrd->cmd_queue)
361 sta_id = tx_cmd->sta_id;
363 bc_ent = cpu_to_le16(1 | (sta_id << 12));
364 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
366 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
368 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
371 static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
378 struct iwl_trans_pcie *trans_pcie =
379 IWL_TRANS_GET_PCIE_TRANS(trans);
381 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
383 tbl_dw_addr = trans_pcie->scd_base_addr +
384 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
386 tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
389 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
391 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
393 iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
398 static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
400 /* Simply stop the queue, but don't change any configuration;
401 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
402 iwl_write_prph(trans,
403 SCD_QUEUE_STATUS_BITS(txq_id),
404 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
405 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
408 void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
409 int txq_id, u32 index)
411 IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff);
412 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
413 (index & 0xff) | (txq_id << 8));
414 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
417 void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
418 struct iwl_tx_queue *txq,
419 int tx_fifo_id, int scd_retry)
421 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
422 int txq_id = txq->q.id;
424 test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
426 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
427 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
428 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
429 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
430 SCD_QUEUE_STTS_REG_MSK);
432 txq->sched_retry = scd_retry;
435 IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n",
436 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
438 IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n",
439 scd_retry ? "BA" : "AC/CMD", txq_id);
442 static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
445 const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
446 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
447 return ac_to_fifo[tid_to_ac[tid]];
449 /* no support for TIDs 8-15 yet */
453 static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id)
455 if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE)
457 return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE +
458 hw_params(trans).num_ampdu_queues);
461 void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
462 enum iwl_rxon_context_id ctx, int sta_id,
463 int tid, int frame_limit, u16 ssn)
469 struct iwl_trans_pcie *trans_pcie =
470 IWL_TRANS_GET_PCIE_TRANS(trans);
472 if (WARN_ON(sta_id == IWL_INVALID_STATION))
474 if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
477 tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
478 if (WARN_ON(tx_fifo < 0)) {
479 IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
483 txq_id = trans_pcie->agg_txq[sta_id][tid];
484 if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) {
486 "queue number out of range: %d, must be %d to %d\n",
487 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
488 IWLAGN_FIRST_AMPDU_QUEUE +
489 hw_params(trans).num_ampdu_queues - 1);
493 ra_tid = BUILD_RAxTID(sta_id, tid);
495 spin_lock_irqsave(&trans->shrd->lock, flags);
497 /* Stop this Tx queue before configuring it */
498 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
500 /* Map receiver-address / traffic-ID to this queue */
501 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
503 /* Set this queue as a chain-building queue */
504 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id));
506 /* enable aggregations for the queue */
507 iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id));
509 /* Place first TFD at index corresponding to start sequence number.
510 * Assumes that ssn_idx is valid (!= 0xFFF) */
511 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
512 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
513 iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
515 /* Set up Tx window size and frame limit for this queue */
516 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
517 SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
520 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
521 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
523 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
524 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
526 iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
528 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
529 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
532 trans_pcie->txq[txq_id].sta_id = sta_id;
533 trans_pcie->txq[txq_id].tid = tid;
535 spin_unlock_irqrestore(&trans->shrd->lock, flags);
539 * Find first available (lowest unused) Tx Queue, mark it "active".
540 * Called only when finding queue for aggregation.
541 * Should never return anything < 7, because they should already
542 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
544 static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
546 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
549 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
550 if (!test_and_set_bit(txq_id,
551 &trans_pcie->txq_ctx_active_msk))
556 int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
559 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
562 txq_id = iwlagn_txq_ctx_activate_free(trans);
564 IWL_ERR(trans, "No free aggregation queue available\n");
568 trans_pcie->agg_txq[sta_id][tid] = txq_id;
569 iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
574 int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
576 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
577 u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
579 if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) {
581 "queue number out of range: %d, must be %d to %d\n",
582 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
583 IWLAGN_FIRST_AMPDU_QUEUE +
584 hw_params(trans).num_ampdu_queues - 1);
588 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
590 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id));
592 trans_pcie->agg_txq[sta_id][tid] = 0;
593 trans_pcie->txq[txq_id].q.read_ptr = 0;
594 trans_pcie->txq[txq_id].q.write_ptr = 0;
595 /* supposes that ssn_idx is valid (!= 0xFFF) */
596 iwl_trans_set_wr_ptrs(trans, txq_id, 0);
598 iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
599 iwl_txq_ctx_deactivate(trans_pcie, txq_id);
600 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
604 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
607 * iwl_enqueue_hcmd - enqueue a uCode command
608 * @priv: device private data point
609 * @cmd: a point to the ucode command structure
611 * The function returns < 0 values to indicate the operation is
612 * failed. On success, it turns the index (> 0) of command in the
615 static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
617 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
618 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
619 struct iwl_queue *q = &txq->q;
620 struct iwl_device_cmd *out_cmd;
621 struct iwl_cmd_meta *out_meta;
622 dma_addr_t phys_addr;
625 u16 copy_size, cmd_size;
626 bool is_ct_kill = false;
627 bool had_nocopy = false;
630 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
631 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
632 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
636 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
637 IWL_WARN(trans, "fw recovery, no hcmd send\n");
641 if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
642 !(cmd->flags & CMD_ON_DEMAND)) {
643 IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
647 copy_size = sizeof(out_cmd->hdr);
648 cmd_size = sizeof(out_cmd->hdr);
650 /* need one for the header if the first is NOCOPY */
651 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
653 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
656 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
659 /* NOCOPY must not be followed by normal! */
660 if (WARN_ON(had_nocopy))
662 copy_size += cmd->len[i];
664 cmd_size += cmd->len[i];
668 * If any of the command structures end up being larger than
669 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
670 * allocated into separate TFDs, then we will need to
671 * increase the size of the buffers.
673 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
676 if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
677 IWL_WARN(trans, "Not sending command - %s KILL\n",
678 iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
682 spin_lock_irqsave(&trans->hcmd_lock, flags);
684 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
685 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
687 IWL_ERR(trans, "No space in command queue\n");
688 is_ct_kill = iwl_check_for_ct_kill(priv(trans));
690 IWL_ERR(trans, "Restarting adapter queue is full\n");
691 iwlagn_fw_error(priv(trans), false);
696 idx = get_cmd_index(q, q->write_ptr);
697 out_cmd = txq->cmd[idx];
698 out_meta = &txq->meta[idx];
700 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
701 if (cmd->flags & CMD_WANT_SKB)
702 out_meta->source = cmd;
704 /* set up the header */
706 out_cmd->hdr.cmd = cmd->id;
707 out_cmd->hdr.flags = 0;
708 out_cmd->hdr.sequence =
709 cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
710 INDEX_TO_SEQ(q->write_ptr));
712 /* and copy the data that needs to be copied */
714 cmd_dest = out_cmd->payload;
715 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
718 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
720 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
721 cmd_dest += cmd->len[i];
724 IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
725 "%d bytes at %d[%d]:%d\n",
726 get_cmd_string(out_cmd->hdr.cmd),
728 le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
729 q->write_ptr, idx, trans->shrd->cmd_queue);
731 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
733 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
738 dma_unmap_addr_set(out_meta, mapping, phys_addr);
739 dma_unmap_len_set(out_meta, len, copy_size);
741 iwlagn_txq_attach_buf_to_tfd(trans, txq,
742 phys_addr, copy_size, 1);
743 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
744 trace_bufs[0] = &out_cmd->hdr;
745 trace_lens[0] = copy_size;
749 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
752 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
754 phys_addr = dma_map_single(trans->dev,
755 (void *)cmd->data[i],
756 cmd->len[i], DMA_BIDIRECTIONAL);
757 if (dma_mapping_error(trans->dev, phys_addr)) {
758 iwlagn_unmap_tfd(trans, out_meta,
759 &txq->tfds[q->write_ptr],
765 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
767 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
768 trace_bufs[trace_idx] = cmd->data[i];
769 trace_lens[trace_idx] = cmd->len[i];
774 out_meta->flags = cmd->flags;
776 txq->need_update = 1;
778 /* check that tracing gets all possible blocks */
779 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
780 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
781 trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
782 trace_bufs[0], trace_lens[0],
783 trace_bufs[1], trace_lens[1],
784 trace_bufs[2], trace_lens[2]);
787 /* Increment and update queue's write index */
788 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
789 iwl_txq_update_write_ptr(trans, txq);
792 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
797 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
799 * When FW advances 'R' index, all entries between old and new 'R' index
800 * need to be reclaimed. As result, some free space forms. If there is
801 * enough free space (> low mark), wake the stack that feeds us.
803 static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
806 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
807 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
808 struct iwl_queue *q = &txq->q;
811 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
812 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
813 "index %d is out of range [0-%d] %d %d.\n", __func__,
814 txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
818 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
819 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
822 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
823 q->write_ptr, q->read_ptr);
824 iwlagn_fw_error(priv(trans), false);
831 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
832 * @rxb: Rx buffer to reclaim
833 * @handler_status: return value of the handler of the command
834 * (put in setup_rx_handlers)
836 * If an Rx buffer has an async callback associated with it the callback
837 * will be executed. The attached skb (if present) will only be freed
838 * if the callback returns 1
840 void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
843 struct iwl_rx_packet *pkt = rxb_addr(rxb);
844 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
845 int txq_id = SEQ_TO_QUEUE(sequence);
846 int index = SEQ_TO_INDEX(sequence);
848 struct iwl_device_cmd *cmd;
849 struct iwl_cmd_meta *meta;
850 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
851 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
854 /* If a Tx command is being handled and it isn't in the actual
855 * command queue then there a command routing bug has been introduced
856 * in the queue management code. */
857 if (WARN(txq_id != trans->shrd->cmd_queue,
858 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
859 txq_id, trans->shrd->cmd_queue, sequence,
860 trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
861 trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
862 iwl_print_hex_error(trans, pkt, 32);
866 cmd_index = get_cmd_index(&txq->q, index);
867 cmd = txq->cmd[cmd_index];
868 meta = &txq->meta[cmd_index];
870 txq->time_stamp = jiffies;
872 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
875 /* Input error checking is done when commands are added to queue. */
876 if (meta->flags & CMD_WANT_SKB) {
877 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
878 meta->source->handler_status = handler_status;
882 spin_lock_irqsave(&trans->hcmd_lock, flags);
884 iwl_hcmd_queue_reclaim(trans, txq_id, index);
886 if (!(meta->flags & CMD_ASYNC)) {
887 if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
889 "HCMD_ACTIVE already clear for command %s\n",
890 get_cmd_string(cmd->hdr.cmd));
892 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
893 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
894 get_cmd_string(cmd->hdr.cmd));
895 wake_up(&trans->shrd->wait_command_queue);
900 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
903 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
905 static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
909 /* An asynchronous command can not expect an SKB to be set. */
910 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
914 if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
917 ret = iwl_enqueue_hcmd(trans, cmd);
919 IWL_DEBUG_QUIET_RFKILL(trans,
920 "Error sending %s: enqueue_hcmd failed: %d\n",
921 get_cmd_string(cmd->id), ret);
927 static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
929 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
933 lockdep_assert_held(&trans->shrd->mutex);
935 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
936 get_cmd_string(cmd->id));
938 if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
942 if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
943 IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
944 get_cmd_string(cmd->id));
947 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
948 IWL_ERR(trans, "Command %s failed: FW Error\n",
949 get_cmd_string(cmd->id));
952 set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
953 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
954 get_cmd_string(cmd->id));
956 cmd_idx = iwl_enqueue_hcmd(trans, cmd);
959 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
960 IWL_DEBUG_QUIET_RFKILL(trans,
961 "Error sending %s: enqueue_hcmd failed: %d\n",
962 get_cmd_string(cmd->id), ret);
966 ret = wait_event_timeout(trans->shrd->wait_command_queue,
967 !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
968 HOST_COMPLETE_TIMEOUT);
970 if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
971 struct iwl_tx_queue *txq =
972 &trans_pcie->txq[trans->shrd->cmd_queue];
973 struct iwl_queue *q = &txq->q;
975 IWL_DEBUG_QUIET_RFKILL(trans,
976 "Error sending %s: time out after %dms.\n",
977 get_cmd_string(cmd->id),
978 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
980 IWL_DEBUG_QUIET_RFKILL(trans,
981 "Current CMD queue read_ptr %d write_ptr %d\n",
982 q->read_ptr, q->write_ptr);
984 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
985 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
986 "%s\n", get_cmd_string(cmd->id));
992 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
993 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
994 get_cmd_string(cmd->id));
1002 if (cmd->flags & CMD_WANT_SKB) {
1004 * Cancel the CMD_WANT_SKB flag for the cmd in the
1005 * TX cmd queue. Otherwise in case the cmd comes
1006 * in later, it will possibly set an invalid
1007 * address (cmd->meta.source).
1009 trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
1013 if (cmd->reply_page) {
1014 iwl_free_pages(trans->shrd, cmd->reply_page);
1015 cmd->reply_page = 0;
1021 int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1023 if (cmd->flags & CMD_ASYNC)
1024 return iwl_send_cmd_async(trans, cmd);
1026 return iwl_send_cmd_sync(trans, cmd);
1029 /* Frees buffers until index _not_ inclusive */
1030 int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1031 struct sk_buff_head *skbs)
1033 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1034 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1035 struct iwl_queue *q = &txq->q;
1039 /* This function is not meant to release cmd queue*/
1040 if (WARN_ON(txq_id == trans->shrd->cmd_queue))
1043 /*Since we free until index _not_ inclusive, the one before index is
1044 * the last we will free. This one must be used */
1045 last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
1047 if ((index >= q->n_bd) ||
1048 (iwl_queue_used(q, last_to_free) == 0)) {
1049 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
1050 "last_to_free %d is out of range [0-%d] %d %d.\n",
1051 __func__, txq_id, last_to_free, q->n_bd,
1052 q->write_ptr, q->read_ptr);
1056 if (WARN_ON(!skb_queue_empty(skbs)))
1060 q->read_ptr != index;
1061 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1063 if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
1066 __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
1068 txq->skbs[txq->q.read_ptr] = NULL;
1070 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
1072 iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);