Merge tag 'iwlwifi-next-for-kalle-2015-04-02' of https://git.kernel.org/pub/scm/linux...
[firefly-linux-kernel-4.4.55.git] / drivers / net / wireless / iwlwifi / mvm / tx.c
index 07304e1fd64aa70b41d4c9a18762b48924a6f250..ef32e177f662b3ba03772e02b1c9b9e512bcd64f 100644 (file)
@@ -664,6 +664,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                info->status.rates[0].count = tx_resp->failure_frame + 1;
                iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
                                            info);
+               info->status.status_driver_data[1] =
+                       (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
 
                /* Single frame failure in an AMPDU queue => send BAR */
                if (txq_id >= mvm->first_agg_queue &&
@@ -909,6 +911,8 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
        info->status.tx_time = tid_data->tx_time;
        info->status.status_driver_data[0] =
                (void *)(uintptr_t)tid_data->reduced_tpc;
+       info->status.status_driver_data[1] =
+               (void *)(uintptr_t)tid_data->rate_n_flags;
 }
 
 int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
@@ -949,8 +953,10 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
        tid_data = &mvmsta->tid_data[tid];
 
-       if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d",
-                     tid_data->txq_id, tid, scd_flow)) {
+       if (tid_data->txq_id != scd_flow) {
+               IWL_ERR(mvm,
+                       "invalid BA notification: Q %d, tid %d, flow %d\n",
+                       tid_data->txq_id, tid, scd_flow);
                rcu_read_unlock();
                return 0;
        }
@@ -1043,6 +1049,14 @@ out:
        return 0;
 }
 
+/*
+ * Note that there are transports that buffer frames before they reach
+ * the firmware. This means that after flush_tx_path is called, the
+ * queue might not be empty. The race-free way to handle this is to:
+ * 1) set the station as draining
+ * 2) flush the Tx path
+ * 3) wait for the transport queues to be empty
+ */
 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
 {
        int ret;