ath9k: clean up tx buffer handling
[firefly-linux-kernel-4.4.55.git] / drivers / net / wireless / ath / ath9k / xmit.c
index fa12b9060b0bbc1f0d5ae375501abdb9c2d31a0b..fcbb4a856a0052b514ebae4300a40b1962298237 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include "ath9k.h"
+#include "ar9003_mac.h"
 
 #define BITS_PER_BYTE           8
 #define OFDM_PLCP_BITS          22
@@ -59,15 +60,14 @@ static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
                                  struct ath_atx_tid *tid,
                                  struct list_head *bf_head);
 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
-                               struct ath_txq *txq,
-                               struct list_head *bf_q,
-                               int txok, int sendbar);
+                               struct ath_txq *txq, struct list_head *bf_q,
+                               struct ath_tx_status *ts, int txok, int sendbar);
 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
                             struct list_head *head);
 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
-                             int txok);
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
+                             struct ath_tx_status *ts, int txok);
+static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
                             int nbad, int txok, bool update_rc);
 
 enum {
@@ -92,7 +92,6 @@ static int ath_max_4ms_framelen[3][16] = {
        }
 };
 
-
 /*********************/
 /* Aggregation logic */
 /*********************/
@@ -223,6 +222,9 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
 {
        struct ath_buf *bf;
        struct list_head bf_head;
+       struct ath_tx_status ts;
+
+       memset(&ts, 0, sizeof(ts));
        INIT_LIST_HEAD(&bf_head);
 
        for (;;) {
@@ -236,7 +238,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
                        ath_tx_update_baw(sc, tid, bf->bf_seqno);
 
                spin_unlock(&txq->axq_lock);
-               ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0);
+               ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
                spin_lock(&txq->axq_lock);
        }
 
@@ -259,25 +261,46 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
        hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
 }
 
-static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
+static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
 {
-       struct ath_buf *tbf;
+       struct ath_buf *bf = NULL;
 
        spin_lock_bh(&sc->tx.txbuflock);
-       if (WARN_ON(list_empty(&sc->tx.txbuf))) {
+
+       if (unlikely(list_empty(&sc->tx.txbuf))) {
                spin_unlock_bh(&sc->tx.txbuflock);
                return NULL;
        }
-       tbf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
-       list_del(&tbf->list);
+
+       bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
+       list_del(&bf->list);
+
+       spin_unlock_bh(&sc->tx.txbuflock);
+
+       return bf;
+}
+
+static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
+{
+       spin_lock_bh(&sc->tx.txbuflock);
+       list_add_tail(&bf->list, &sc->tx.txbuf);
        spin_unlock_bh(&sc->tx.txbuflock);
+}
+
+static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
+{
+       struct ath_buf *tbf;
+
+       tbf = ath_tx_get_buffer(sc);
+       if (WARN_ON(!tbf))
+               return NULL;
 
        ATH_TXBUF_RESET(tbf);
 
        tbf->aphy = bf->aphy;
        tbf->bf_mpdu = bf->bf_mpdu;
        tbf->bf_buf_addr = bf->bf_buf_addr;
-       *(tbf->bf_desc) = *(bf->bf_desc);
+       memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
        tbf->bf_state = bf->bf_state;
        tbf->bf_dmacontext = bf->bf_dmacontext;
 
@@ -286,7 +309,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
 
 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                                 struct ath_buf *bf, struct list_head *bf_q,
-                                int txok)
+                                struct ath_tx_status *ts, int txok)
 {
        struct ath_node *an = NULL;
        struct sk_buff *skb;
@@ -296,7 +319,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
        struct ieee80211_tx_info *tx_info;
        struct ath_atx_tid *tid = NULL;
        struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
-       struct ath_desc *ds = bf_last->bf_desc;
        struct list_head bf_head, bf_pending;
        u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
        u32 ba[WME_BA_BMP_SIZE >> 5];
@@ -325,10 +347,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
        memset(ba, 0, WME_BA_BMP_SIZE >> 3);
 
        if (isaggr && txok) {
-               if (ATH_DS_TX_BA(ds)) {
-                       seq_st = ATH_DS_BA_SEQ(ds);
-                       memcpy(ba, ATH_DS_BA_BITMAP(ds),
-                              WME_BA_BMP_SIZE >> 3);
+               if (ts->ts_flags & ATH9K_TX_BA) {
+                       seq_st = ts->ts_seqnum;
+                       memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
                } else {
                        /*
                         * AR5416 can become deaf/mute when BA
@@ -345,7 +366,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
        INIT_LIST_HEAD(&bf_pending);
        INIT_LIST_HEAD(&bf_head);
 
-       nbad = ath_tx_num_badfrms(sc, bf, txok);
+       nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
        while (bf) {
                txfail = txpending = 0;
                bf_next = bf->bf_next;
@@ -359,7 +380,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                        acked_cnt++;
                } else {
                        if (!(tid->state & AGGR_CLEANUP) &&
-                           ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
+                           !bf_last->bf_tx_aborted) {
                                if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
                                        ath_tx_set_retry(sc, txq, bf);
                                        txpending = 1;
@@ -378,7 +399,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                        }
                }
 
-               if (bf_next == NULL) {
+               if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+                   bf_next == NULL) {
                        /*
                         * Make sure the last desc is reclaimed if it
                         * not a holding desc.
@@ -402,45 +424,53 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                        spin_unlock_bh(&txq->axq_lock);
 
                        if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
-                               ath_tx_rc_status(bf, ds, nbad, txok, true);
+                               ath_tx_rc_status(bf, ts, nbad, txok, true);
                                rc_update = false;
                        } else {
-                               ath_tx_rc_status(bf, ds, nbad, txok, false);
+                               ath_tx_rc_status(bf, ts, nbad, txok, false);
                        }
 
-                       ath_tx_complete_buf(sc, bf, txq, &bf_head, !txfail, sendbar);
+                       ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
+                               !txfail, sendbar);
                } else {
                        /* retry the un-acked ones */
-                       if (bf->bf_next == NULL && bf_last->bf_stale) {
-                               struct ath_buf *tbf;
-
-                               tbf = ath_clone_txbuf(sc, bf_last);
-                               /*
-                                * Update tx baw and complete the frame with
-                                * failed status if we run out of tx buf
-                                */
-                               if (!tbf) {
-                                       spin_lock_bh(&txq->axq_lock);
-                                       ath_tx_update_baw(sc, tid,
-                                                         bf->bf_seqno);
-                                       spin_unlock_bh(&txq->axq_lock);
-
-                                       bf->bf_state.bf_type |= BUF_XRETRY;
-                                       ath_tx_rc_status(bf, ds, nbad,
-                                                        0, false);
-                                       ath_tx_complete_buf(sc, bf, txq,
-                                                           &bf_head, 0, 0);
-                                       break;
+                       if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
+                               if (bf->bf_next == NULL && bf_last->bf_stale) {
+                                       struct ath_buf *tbf;
+
+                                       tbf = ath_clone_txbuf(sc, bf_last);
+                                       /*
+                                        * Update tx baw and complete the
+                                        * frame with failed status if we
+                                        * run out of tx buf.
+                                        */
+                                       if (!tbf) {
+                                               spin_lock_bh(&txq->axq_lock);
+                                               ath_tx_update_baw(sc, tid,
+                                                               bf->bf_seqno);
+                                               spin_unlock_bh(&txq->axq_lock);
+
+                                               bf->bf_state.bf_type |=
+                                                       BUF_XRETRY;
+                                               ath_tx_rc_status(bf, ts, nbad,
+                                                               0, false);
+                                               ath_tx_complete_buf(sc, bf, txq,
+                                                                   &bf_head,
+                                                                   ts, 0, 0);
+                                               break;
+                                       }
+
+                                       ath9k_hw_cleartxdesc(sc->sc_ah,
+                                                            tbf->bf_desc);
+                                       list_add_tail(&tbf->list, &bf_head);
+                               } else {
+                                       /*
+                                        * Clear descriptor status words for
+                                        * software retry
+                                        */
+                                       ath9k_hw_cleartxdesc(sc->sc_ah,
+                                                            bf->bf_desc);
                                }
-
-                               ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc);
-                               list_add_tail(&tbf->list, &bf_head);
-                       } else {
-                               /*
-                                * Clear descriptor status words for
-                                * software retry
-                                */
-                               ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc);
                        }
 
                        /*
@@ -664,7 +694,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
                bpad = PADBYTES(al_delta) + (ndelim << 2);
 
                bf->bf_next = NULL;
-               bf->bf_desc->ds_link = 0;
+               ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
 
                /* link buffers of this frame to the aggregate */
                ath_tx_addto_baw(sc, tid, bf);
@@ -672,7 +702,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
                list_move_tail(&bf->list, bf_q);
                if (bf_prev) {
                        bf_prev->bf_next = bf;
-                       bf_prev->bf_desc->ds_link = bf->bf_daddr;
+                       ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
+                                              bf->bf_daddr);
                }
                bf_prev = bf;
 
@@ -752,8 +783,11 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
        struct ath_node *an = (struct ath_node *)sta->drv_priv;
        struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
        struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
+       struct ath_tx_status ts;
        struct ath_buf *bf;
        struct list_head bf_head;
+
+       memset(&ts, 0, sizeof(ts));
        INIT_LIST_HEAD(&bf_head);
 
        if (txtid->state & AGGR_CLEANUP)
@@ -780,7 +814,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
                }
                list_move_tail(&bf->list, &bf_head);
                ath_tx_update_baw(sc, txtid, bf->bf_seqno);
-               ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0);
+               ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
        }
        spin_unlock_bh(&txq->axq_lock);
 
@@ -849,7 +883,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_tx_queue_info qi;
-       int qnum;
+       int qnum, i;
 
        memset(&qi, 0, sizeof(qi));
        qi.tqi_subtype = subtype;
@@ -873,11 +907,16 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
         * The UAPSD queue is an exception, since we take a desc-
         * based intr on the EOSP frames.
         */
-       if (qtype == ATH9K_TX_QUEUE_UAPSD)
-               qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
-       else
-               qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
-                       TXQ_FLAG_TXDESCINT_ENABLE;
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+               qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
+                               TXQ_FLAG_TXERRINT_ENABLE;
+       } else {
+               if (qtype == ATH9K_TX_QUEUE_UAPSD)
+                       qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
+               else
+                       qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
+                                       TXQ_FLAG_TXDESCINT_ENABLE;
+       }
        qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
        if (qnum == -1) {
                /*
@@ -904,6 +943,11 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
                txq->axq_depth = 0;
                txq->axq_tx_inprogress = false;
                sc->tx.txqsetup |= 1<<qnum;
+
+               txq->txq_headidx = txq->txq_tailidx = 0;
+               for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
+                       INIT_LIST_HEAD(&txq->txq_fifo[i]);
+               INIT_LIST_HEAD(&txq->txq_fifo_pending);
        }
        return &sc->tx.txq[qnum];
 }
@@ -1028,45 +1072,63 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
 {
        struct ath_buf *bf, *lastbf;
        struct list_head bf_head;
+       struct ath_tx_status ts;
 
+       memset(&ts, 0, sizeof(ts));
        INIT_LIST_HEAD(&bf_head);
 
        for (;;) {
                spin_lock_bh(&txq->axq_lock);
 
-               if (list_empty(&txq->axq_q)) {
-                       txq->axq_link = NULL;
-                       spin_unlock_bh(&txq->axq_lock);
-                       break;
-               }
-
-               bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
+               if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+                       if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
+                               txq->txq_headidx = txq->txq_tailidx = 0;
+                               spin_unlock_bh(&txq->axq_lock);
+                               break;
+                       } else {
+                               bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
+                                                     struct ath_buf, list);
+                       }
+               } else {
+                       if (list_empty(&txq->axq_q)) {
+                               txq->axq_link = NULL;
+                               spin_unlock_bh(&txq->axq_lock);
+                               break;
+                       }
+                       bf = list_first_entry(&txq->axq_q, struct ath_buf,
+                                             list);
 
-               if (bf->bf_stale) {
-                       list_del(&bf->list);
-                       spin_unlock_bh(&txq->axq_lock);
+                       if (bf->bf_stale) {
+                               list_del(&bf->list);
+                               spin_unlock_bh(&txq->axq_lock);
 
-                       spin_lock_bh(&sc->tx.txbuflock);
-                       list_add_tail(&bf->list, &sc->tx.txbuf);
-                       spin_unlock_bh(&sc->tx.txbuflock);
-                       continue;
+                               ath_tx_return_buffer(sc, bf);
+                               continue;
+                       }
                }
 
                lastbf = bf->bf_lastbf;
                if (!retry_tx)
-                       lastbf->bf_desc->ds_txstat.ts_flags =
-                               ATH9K_TX_SW_ABORTED;
+                       lastbf->bf_tx_aborted = true;
+
+               if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+                       list_cut_position(&bf_head,
+                                         &txq->txq_fifo[txq->txq_tailidx],
+                                         &lastbf->list);
+                       INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
+               } else {
+                       /* remove ath_buf's of the same mpdu from txq */
+                       list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
+               }
 
-               /* remove ath_buf's of the same mpdu from txq */
-               list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
                txq->axq_depth--;
 
                spin_unlock_bh(&txq->axq_lock);
 
                if (bf_isampdu(bf))
-                       ath_tx_complete_aggr(sc, txq, bf, &bf_head, 0);
+                       ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
                else
-                       ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0);
+                       ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
        }
 
        spin_lock_bh(&txq->axq_lock);
@@ -1081,6 +1143,27 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
                        spin_unlock_bh(&txq->axq_lock);
                }
        }
+
+       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+               spin_lock_bh(&txq->axq_lock);
+               while (!list_empty(&txq->txq_fifo_pending)) {
+                       bf = list_first_entry(&txq->txq_fifo_pending,
+                                             struct ath_buf, list);
+                       list_cut_position(&bf_head,
+                                         &txq->txq_fifo_pending,
+                                         &bf->bf_lastbf->list);
+                       spin_unlock_bh(&txq->axq_lock);
+
+                       if (bf_isampdu(bf))
+                               ath_tx_complete_aggr(sc, txq, bf, &bf_head,
+                                                    &ts, 0);
+                       else
+                               ath_tx_complete_buf(sc, bf, txq, &bf_head,
+                                                   &ts, 0, 0);
+                       spin_lock_bh(&txq->axq_lock);
+               }
+               spin_unlock_bh(&txq->axq_lock);
+       }
 }
 
 void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@@ -1218,44 +1301,47 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
 
        bf = list_first_entry(head, struct ath_buf, list);
 
-       list_splice_tail_init(head, &txq->axq_q);
-       txq->axq_depth++;
-
        ath_print(common, ATH_DBG_QUEUE,
                  "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
 
-       if (txq->axq_link == NULL) {
+       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+               if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
+                       list_splice_tail_init(head, &txq->txq_fifo_pending);
+                       return;
+               }
+               if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
+                       ath_print(common, ATH_DBG_XMIT,
+                                 "Initializing tx fifo %d which "
+                                 "is non-empty\n",
+                                 txq->txq_headidx);
+               INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
+               list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
+               INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
                ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
                ath_print(common, ATH_DBG_XMIT,
                          "TXDP[%u] = %llx (%p)\n",
                          txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
        } else {
-               *txq->axq_link = bf->bf_daddr;
-               ath_print(common, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
-                         txq->axq_qnum, txq->axq_link,
-                         ito64(bf->bf_daddr), bf->bf_desc);
-       }
-       txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
-       ath9k_hw_txstart(ah, txq->axq_qnum);
-}
-
-static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
-{
-       struct ath_buf *bf = NULL;
+               list_splice_tail_init(head, &txq->axq_q);
 
-       spin_lock_bh(&sc->tx.txbuflock);
-
-       if (unlikely(list_empty(&sc->tx.txbuf))) {
-               spin_unlock_bh(&sc->tx.txbuflock);
-               return NULL;
+               if (txq->axq_link == NULL) {
+                       ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
+                       ath_print(common, ATH_DBG_XMIT,
+                                       "TXDP[%u] = %llx (%p)\n",
+                                       txq->axq_qnum, ito64(bf->bf_daddr),
+                                       bf->bf_desc);
+               } else {
+                       *txq->axq_link = bf->bf_daddr;
+                       ath_print(common, ATH_DBG_XMIT,
+                                       "link[%u] (%p)=%llx (%p)\n",
+                                       txq->axq_qnum, txq->axq_link,
+                                       ito64(bf->bf_daddr), bf->bf_desc);
+               }
+               ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
+                                      &txq->axq_link);
+               ath9k_hw_txstart(ah, txq->axq_qnum);
        }
-
-       bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
-       list_del(&bf->list);
-
-       spin_unlock_bh(&sc->tx.txbuflock);
-
-       return bf;
+       txq->axq_depth++;
 }
 
 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
@@ -1353,25 +1439,6 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
        return htype;
 }
 
-static bool is_pae(struct sk_buff *skb)
-{
-       struct ieee80211_hdr *hdr;
-       __le16 fc;
-
-       hdr = (struct ieee80211_hdr *)skb->data;
-       fc = hdr->frame_control;
-
-       if (ieee80211_is_data(fc)) {
-               if (ieee80211_is_nullfunc(fc) ||
-                   /* Port Access Entity (IEEE 802.1X) */
-                   (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
-                       return true;
-               }
-       }
-
-       return false;
-}
-
 static int get_hw_crypto_keytype(struct sk_buff *skb)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1421,8 +1488,7 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
        INCR(tid->seq_next, IEEE80211_SEQ_MAX);
 }
 
-static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
-                         struct ath_txq *txq)
+static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        int flags = 0;
@@ -1433,6 +1499,9 @@ static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
        if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
                flags |= ATH9K_TXDESC_NOACK;
 
+       if (use_ldpc)
+               flags |= ATH9K_TXDESC_LDPC;
+
        return flags;
 }
 
@@ -1498,26 +1567,6 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
        if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
                ctsrate |= rate->hw_value_short;
 
-       /*
-        * ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive.
-        * Check the first rate in the series to decide whether RTS/CTS
-        * or CTS-to-self has to be used.
-        */
-       if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
-               flags = ATH9K_TXDESC_CTSENA;
-       else if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
-               flags = ATH9K_TXDESC_RTSENA;
-
-       /* FIXME: Handle aggregation protection */
-       if (sc->config.ath_aggr_prot &&
-           (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
-               flags = ATH9K_TXDESC_RTSENA;
-       }
-
-       /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
-       if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
-               flags &= ~(ATH9K_TXDESC_RTSENA);
-
        for (i = 0; i < 4; i++) {
                bool is_40, is_sgi, is_sp;
                int phy;
@@ -1529,8 +1578,15 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
                series[i].Tries = rates[i].count;
                series[i].ChSel = common->tx_chainmask;
 
-               if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+               if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
+                   (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
+                       series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+                       flags |= ATH9K_TXDESC_RTSENA;
+               } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
                        series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+                       flags |= ATH9K_TXDESC_CTSENA;
+               }
+
                if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
                        series[i].RateFlags |= ATH9K_RATESERIES_2040;
                if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
@@ -1568,6 +1624,14 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
                        phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
        }
 
+       /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
+       if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
+               flags &= ~ATH9K_TXDESC_RTSENA;
+
+       /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
+       if (flags & ATH9K_TXDESC_RTSENA)
+               flags &= ~ATH9K_TXDESC_CTSENA;
+
        /* set dur_update_en for l-sig computation except for PS-Poll frames */
        ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
                                     bf->bf_lastbf->bf_desc,
@@ -1589,15 +1653,16 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
        int hdrlen;
        __le16 fc;
        int padpos, padsize;
+       bool use_ldpc = false;
 
        tx_info->pad[0] = 0;
        switch (txctl->frame_type) {
-       case ATH9K_NOT_INTERNAL:
+       case ATH9K_IFT_NOT_INTERNAL:
                break;
-       case ATH9K_INT_PAUSE:
+       case ATH9K_IFT_PAUSE:
                tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
                /* fall through */
-       case ATH9K_INT_UNPAUSE:
+       case ATH9K_IFT_UNPAUSE:
                tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
                break;
        }
@@ -1615,10 +1680,13 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
                bf->bf_frmlen -= padsize;
        }
 
-       if (conf_is_ht(&hw->conf) && !is_pae(skb))
+       if (conf_is_ht(&hw->conf)) {
                bf->bf_state.bf_type |= BUF_HT;
+               if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
+                       use_ldpc = true;
+       }
 
-       bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
+       bf->bf_flags = setup_tx_flags(skb, use_ldpc);
 
        bf->bf_keytype = get_hw_crypto_keytype(skb);
        if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
@@ -1648,7 +1716,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
        /* tag if this is a nullfunc frame to enable PS when AP acks it */
        if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
                bf->bf_isnullfunc = true;
-               sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
+               sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
        } else
                bf->bf_isnullfunc = false;
 
@@ -1677,8 +1745,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
        list_add_tail(&bf->list, &bf_head);
 
        ds = bf->bf_desc;
-       ds->ds_link = 0;
-       ds->ds_data = bf->bf_buf_addr;
+       ath9k_hw_set_desc_link(ah, ds, 0);
 
        ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
                               bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
@@ -1687,7 +1754,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
                            skb->len,   /* segment length */
                            true,       /* first segment */
                            true,       /* last segment */
-                           ds);        /* first descriptor */
+                           ds,         /* first descriptor */
+                           bf->bf_buf_addr,
+                           txctl->txq->axq_qnum);
 
        spin_lock_bh(&txctl->txq->axq_lock);
 
@@ -1756,9 +1825,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                }
                spin_unlock_bh(&txq->axq_lock);
 
-               spin_lock_bh(&sc->tx.txbuflock);
-               list_add_tail(&bf->list, &sc->tx.txbuf);
-               spin_unlock_bh(&sc->tx.txbuflock);
+               ath_tx_return_buffer(sc, bf);
 
                return r;
        }
@@ -1858,15 +1925,15 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
                skb_pull(skb, padsize);
        }
 
-       if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) {
-               sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK;
+       if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
+               sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
                ath_print(common, ATH_DBG_PS,
                          "Going back to sleep after having "
-                         "received TX status (0x%x)\n",
-                       sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
-                                       SC_OP_WAIT_FOR_CAB |
-                                       SC_OP_WAIT_FOR_PSPOLL_DATA |
-                                       SC_OP_WAIT_FOR_TX_ACK));
+                         "received TX status (0x%lx)\n",
+                       sc->ps_flags & (PS_WAIT_FOR_BEACON |
+                                       PS_WAIT_FOR_CAB |
+                                       PS_WAIT_FOR_PSPOLL_DATA |
+                                       PS_WAIT_FOR_TX_ACK));
        }
 
        if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
@@ -1876,9 +1943,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
 }
 
 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
-                               struct ath_txq *txq,
-                               struct list_head *bf_q,
-                               int txok, int sendbar)
+                               struct ath_txq *txq, struct list_head *bf_q,
+                               struct ath_tx_status *ts, int txok, int sendbar)
 {
        struct sk_buff *skb = bf->bf_mpdu;
        unsigned long flags;
@@ -1896,7 +1962,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
 
        dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
        ath_tx_complete(sc, skb, bf->aphy, tx_flags);
-       ath_debug_stat_tx(sc, txq, bf);
+       ath_debug_stat_tx(sc, txq, bf, ts);
 
        /*
         * Return the list of ath_buf of this mpdu to free queue
@@ -1907,23 +1973,21 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
 }
 
 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
-                             int txok)
+                             struct ath_tx_status *ts, int txok)
 {
-       struct ath_buf *bf_last = bf->bf_lastbf;
-       struct ath_desc *ds = bf_last->bf_desc;
        u16 seq_st = 0;
        u32 ba[WME_BA_BMP_SIZE >> 5];
        int ba_index;
        int nbad = 0;
        int isaggr = 0;
 
-       if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
+       if (bf->bf_tx_aborted)
                return 0;
 
        isaggr = bf_isaggr(bf);
        if (isaggr) {
-               seq_st = ATH_DS_BA_SEQ(ds);
-               memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
+               seq_st = ts->ts_seqnum;
+               memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
        }
 
        while (bf) {
@@ -1937,7 +2001,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
        return nbad;
 }
 
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
+static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
                             int nbad, int txok, bool update_rc)
 {
        struct sk_buff *skb = bf->bf_mpdu;
@@ -1947,24 +2011,24 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
        u8 i, tx_rateindex;
 
        if (txok)
-               tx_info->status.ack_signal = ds->ds_txstat.ts_rssi;
+               tx_info->status.ack_signal = ts->ts_rssi;
 
-       tx_rateindex = ds->ds_txstat.ts_rateindex;
+       tx_rateindex = ts->ts_rateindex;
        WARN_ON(tx_rateindex >= hw->max_rates);
 
-       if (update_rc)
-               tx_info->pad[0] |= ATH_TX_INFO_UPDATE_RC;
-       if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
+       if (ts->ts_status & ATH9K_TXERR_FILT)
                tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+       if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
+               tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
 
-       if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
+       if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
            (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
                if (ieee80211_is_data(hdr->frame_control)) {
-                       if (ds->ds_txstat.ts_flags &
+                       if (ts->ts_flags &
                            (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
                                tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
-                       if ((ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) ||
-                           (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO))
+                       if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
+                           (ts->ts_status & ATH9K_TXERR_FIFO))
                                tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
                        tx_info->status.ampdu_len = bf->bf_nframes;
                        tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
@@ -2002,6 +2066,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
        struct ath_buf *bf, *lastbf, *bf_held = NULL;
        struct list_head bf_head;
        struct ath_desc *ds;
+       struct ath_tx_status ts;
        int txok;
        int status;
 
@@ -2041,7 +2106,8 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
                lastbf = bf->bf_lastbf;
                ds = lastbf->bf_desc;
 
-               status = ath9k_hw_txprocdesc(ah, ds);
+               memset(&ts, 0, sizeof(ts));
+               status = ath9k_hw_txprocdesc(ah, ds, &ts);
                if (status == -EINPROGRESS) {
                        spin_unlock_bh(&txq->axq_lock);
                        break;
@@ -2052,12 +2118,11 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
                 * can disable RX.
                 */
                if (bf->bf_isnullfunc &&
-                   (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
-                       if ((sc->sc_flags & SC_OP_PS_ENABLED)) {
-                               sc->ps_enabled = true;
-                               ath9k_hw_setrxabort(sc->sc_ah, 1);
-                       } else
-                               sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED;
+                   (ts.ts_status & ATH9K_TX_ACKED)) {
+                       if ((sc->ps_flags & PS_ENABLED))
+                               ath9k_enable_ps(sc);
+                       else
+                               sc->ps_flags |= PS_NULLFUNC_COMPLETED;
                }
 
                /*
@@ -2072,31 +2137,30 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
                                &txq->axq_q, lastbf->list.prev);
 
                txq->axq_depth--;
-               txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
+               txok = !(ts.ts_status & ATH9K_TXERR_MASK);
                txq->axq_tx_inprogress = false;
+               if (bf_held)
+                       list_del(&bf_held->list);
                spin_unlock_bh(&txq->axq_lock);
 
-               if (bf_held) {
-                       spin_lock_bh(&sc->tx.txbuflock);
-                       list_move_tail(&bf_held->list, &sc->tx.txbuf);
-                       spin_unlock_bh(&sc->tx.txbuflock);
-               }
+               if (bf_held)
+                       ath_tx_return_buffer(sc, bf_held);
 
                if (!bf_isampdu(bf)) {
                        /*
                         * This frame is sent out as a single frame.
                         * Use hardware retry status for this frame.
                         */
-                       bf->bf_retries = ds->ds_txstat.ts_longretry;
-                       if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
+                       bf->bf_retries = ts.ts_longretry;
+                       if (ts.ts_status & ATH9K_TXERR_XRETRY)
                                bf->bf_state.bf_type |= BUF_XRETRY;
-                       ath_tx_rc_status(bf, ds, 0, txok, true);
+                       ath_tx_rc_status(bf, &ts, 0, txok, true);
                }
 
                if (bf_isampdu(bf))
-                       ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok);
+                       ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
                else
-                       ath_tx_complete_buf(sc, bf, txq, &bf_head, txok, 0);
+                       ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
 
                ath_wake_mac80211_queue(sc, txq);
 
@@ -2158,10 +2222,119 @@ void ath_tx_tasklet(struct ath_softc *sc)
        }
 }
 
+void ath_tx_edma_tasklet(struct ath_softc *sc)
+{
+       struct ath_tx_status txs;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_txq *txq;
+       struct ath_buf *bf, *lastbf;
+       struct list_head bf_head;
+       int status;
+       int txok;
+
+       for (;;) {
+               status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
+               if (status == -EINPROGRESS)
+                       break;
+               if (status == -EIO) {
+                       ath_print(common, ATH_DBG_XMIT,
+                                 "Error processing tx status\n");
+                       break;
+               }
+
+               /* Skip beacon completions */
+               if (txs.qid == sc->beacon.beaconq)
+                       continue;
+
+               txq = &sc->tx.txq[txs.qid];
+
+               spin_lock_bh(&txq->axq_lock);
+               if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
+                       spin_unlock_bh(&txq->axq_lock);
+                       return;
+               }
+
+               bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
+                                     struct ath_buf, list);
+               lastbf = bf->bf_lastbf;
+
+               INIT_LIST_HEAD(&bf_head);
+               list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
+                                 &lastbf->list);
+               INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
+               txq->axq_depth--;
+               txq->axq_tx_inprogress = false;
+               spin_unlock_bh(&txq->axq_lock);
+
+               txok = !(txs.ts_status & ATH9K_TXERR_MASK);
+
+               if (!bf_isampdu(bf)) {
+                       bf->bf_retries = txs.ts_longretry;
+                       if (txs.ts_status & ATH9K_TXERR_XRETRY)
+                               bf->bf_state.bf_type |= BUF_XRETRY;
+                       ath_tx_rc_status(bf, &txs, 0, txok, true);
+               }
+
+               if (bf_isampdu(bf))
+                       ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
+               else
+                       ath_tx_complete_buf(sc, bf, txq, &bf_head,
+                                           &txs, txok, 0);
+
+               spin_lock_bh(&txq->axq_lock);
+               if (!list_empty(&txq->txq_fifo_pending)) {
+                       INIT_LIST_HEAD(&bf_head);
+                       bf = list_first_entry(&txq->txq_fifo_pending,
+                               struct ath_buf, list);
+                       list_cut_position(&bf_head, &txq->txq_fifo_pending,
+                               &bf->bf_lastbf->list);
+                       ath_tx_txqaddbuf(sc, txq, &bf_head);
+               } else if (sc->sc_flags & SC_OP_TXAGGR)
+                       ath_txq_schedule(sc, txq);
+               spin_unlock_bh(&txq->axq_lock);
+       }
+}
+
 /*****************/
 /* Init, Cleanup */
 /*****************/
 
+static int ath_txstatus_setup(struct ath_softc *sc, int size)
+{
+       struct ath_descdma *dd = &sc->txsdma;
+       u8 txs_len = sc->sc_ah->caps.txs_len;
+
+       dd->dd_desc_len = size * txs_len;
+       dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
+                                        &dd->dd_desc_paddr, GFP_KERNEL);
+       if (!dd->dd_desc)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int ath_tx_edma_init(struct ath_softc *sc)
+{
+       int err;
+
+       err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
+       if (!err)
+               ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
+                                         sc->txsdma.dd_desc_paddr,
+                                         ATH_TXSTATUS_RING_SIZE);
+
+       return err;
+}
+
+static void ath_tx_edma_cleanup(struct ath_softc *sc)
+{
+       struct ath_descdma *dd = &sc->txsdma;
+
+       dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
+                         dd->dd_desc_paddr);
+}
+
 int ath_tx_init(struct ath_softc *sc, int nbufs)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -2170,7 +2343,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
        spin_lock_init(&sc->tx.txbuflock);
 
        error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
-                                 "tx", nbufs, 1);
+                                 "tx", nbufs, 1, 1);
        if (error != 0) {
                ath_print(common, ATH_DBG_FATAL,
                          "Failed to allocate tx descriptors: %d\n", error);
@@ -2178,7 +2351,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
        }
 
        error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
-                                 "beacon", ATH_BCBUF, 1);
+                                 "beacon", ATH_BCBUF, 1, 1);
        if (error != 0) {
                ath_print(common, ATH_DBG_FATAL,
                          "Failed to allocate beacon descriptors: %d\n", error);
@@ -2187,6 +2360,12 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
 
        INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
 
+       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+               error = ath_tx_edma_init(sc);
+               if (error)
+                       goto err;
+       }
+
 err:
        if (error != 0)
                ath_tx_cleanup(sc);
@@ -2201,6 +2380,9 @@ void ath_tx_cleanup(struct ath_softc *sc)
 
        if (sc->tx.txdma.dd_desc_len != 0)
                ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
+
+       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+               ath_tx_edma_cleanup(sc);
 }
 
 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
@@ -2264,7 +2446,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
                if (ATH_TXQ_SETUP(sc, i)) {
                        txq = &sc->tx.txq[i];
 
-                       spin_lock(&txq->axq_lock);
+                       spin_lock_bh(&txq->axq_lock);
 
                        list_for_each_entry_safe(ac,
                                        ac_tmp, &txq->axq_acq, list) {
@@ -2285,7 +2467,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
                                }
                        }
 
-                       spin_unlock(&txq->axq_lock);
+                       spin_unlock_bh(&txq->axq_lock);
                }
        }
 }