2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/nl80211.h>
20 #define ATH_PCI_VERSION "0.1"
22 static char *dev_info = "ath9k";
24 MODULE_AUTHOR("Atheros Communications");
25 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
26 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
27 MODULE_LICENSE("Dual BSD/GPL");
29 static int modparam_nohwcrypt;
30 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
31 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
33 /* We use the hw_value as an index into our private channel structure */
35 #define CHAN2G(_freq, _idx) { \
36 .center_freq = (_freq), \
41 #define CHAN5G(_freq, _idx) { \
42 .band = IEEE80211_BAND_5GHZ, \
43 .center_freq = (_freq), \
48 /* Some 2 GHz radios are actually tunable on 2312-2732
49 * on 5 MHz steps, we support the channels which we know
50 * we have calibration data for all cards though to make
52 static struct ieee80211_channel ath9k_2ghz_chantable[] = {
53 CHAN2G(2412, 0), /* Channel 1 */
54 CHAN2G(2417, 1), /* Channel 2 */
55 CHAN2G(2422, 2), /* Channel 3 */
56 CHAN2G(2427, 3), /* Channel 4 */
57 CHAN2G(2432, 4), /* Channel 5 */
58 CHAN2G(2437, 5), /* Channel 6 */
59 CHAN2G(2442, 6), /* Channel 7 */
60 CHAN2G(2447, 7), /* Channel 8 */
61 CHAN2G(2452, 8), /* Channel 9 */
62 CHAN2G(2457, 9), /* Channel 10 */
63 CHAN2G(2462, 10), /* Channel 11 */
64 CHAN2G(2467, 11), /* Channel 12 */
65 CHAN2G(2472, 12), /* Channel 13 */
66 CHAN2G(2484, 13), /* Channel 14 */
69 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
70 * on 5 MHz steps, we support the channels which we know
71 * we have calibration data for all cards though to make
73 static struct ieee80211_channel ath9k_5ghz_chantable[] = {
74 /* _We_ call this UNII 1 */
75 CHAN5G(5180, 14), /* Channel 36 */
76 CHAN5G(5200, 15), /* Channel 40 */
77 CHAN5G(5220, 16), /* Channel 44 */
78 CHAN5G(5240, 17), /* Channel 48 */
79 /* _We_ call this UNII 2 */
80 CHAN5G(5260, 18), /* Channel 52 */
81 CHAN5G(5280, 19), /* Channel 56 */
82 CHAN5G(5300, 20), /* Channel 60 */
83 CHAN5G(5320, 21), /* Channel 64 */
84 /* _We_ call this "Middle band" */
85 CHAN5G(5500, 22), /* Channel 100 */
86 CHAN5G(5520, 23), /* Channel 104 */
87 CHAN5G(5540, 24), /* Channel 108 */
88 CHAN5G(5560, 25), /* Channel 112 */
89 CHAN5G(5580, 26), /* Channel 116 */
90 CHAN5G(5600, 27), /* Channel 120 */
91 CHAN5G(5620, 28), /* Channel 124 */
92 CHAN5G(5640, 29), /* Channel 128 */
93 CHAN5G(5660, 30), /* Channel 132 */
94 CHAN5G(5680, 31), /* Channel 136 */
95 CHAN5G(5700, 32), /* Channel 140 */
96 /* _We_ call this UNII 3 */
97 CHAN5G(5745, 33), /* Channel 149 */
98 CHAN5G(5765, 34), /* Channel 153 */
99 CHAN5G(5785, 35), /* Channel 157 */
100 CHAN5G(5805, 36), /* Channel 161 */
101 CHAN5G(5825, 37), /* Channel 165 */
104 static void ath_cache_conf_rate(struct ath_softc *sc,
105 struct ieee80211_conf *conf)
107 switch (conf->channel->band) {
108 case IEEE80211_BAND_2GHZ:
109 if (conf_is_ht20(conf))
111 sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
112 else if (conf_is_ht40_minus(conf))
114 sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
115 else if (conf_is_ht40_plus(conf))
117 sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
120 sc->hw_rate_table[ATH9K_MODE_11G];
122 case IEEE80211_BAND_5GHZ:
123 if (conf_is_ht20(conf))
125 sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
126 else if (conf_is_ht40_minus(conf))
128 sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
129 else if (conf_is_ht40_plus(conf))
131 sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
134 sc->hw_rate_table[ATH9K_MODE_11A];
142 static void ath_update_txpow(struct ath_softc *sc)
144 struct ath_hw *ah = sc->sc_ah;
147 if (sc->curtxpow != sc->config.txpowlimit) {
148 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit);
149 /* read back in case value is clamped */
150 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
151 sc->curtxpow = txpow;
155 static u8 parse_mpdudensity(u8 mpdudensity)
158 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
159 * 0 for no restriction
168 switch (mpdudensity) {
174 /* Our lower layer calculations limit our precision to
190 static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
192 struct ath_rate_table *rate_table = NULL;
193 struct ieee80211_supported_band *sband;
194 struct ieee80211_rate *rate;
198 case IEEE80211_BAND_2GHZ:
199 rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
201 case IEEE80211_BAND_5GHZ:
202 rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
208 if (rate_table == NULL)
211 sband = &sc->sbands[band];
212 rate = sc->rates[band];
214 if (rate_table->rate_cnt > ATH_RATE_MAX)
215 maxrates = ATH_RATE_MAX;
217 maxrates = rate_table->rate_cnt;
219 for (i = 0; i < maxrates; i++) {
220 rate[i].bitrate = rate_table->info[i].ratekbps / 100;
221 rate[i].hw_value = rate_table->info[i].ratecode;
222 if (rate_table->info[i].short_preamble) {
223 rate[i].hw_value_short = rate_table->info[i].ratecode |
224 rate_table->info[i].short_preamble;
225 rate[i].flags = IEEE80211_RATE_SHORT_PREAMBLE;
229 DPRINTF(sc, ATH_DBG_CONFIG, "Rate: %2dMbps, ratecode: %2d\n",
230 rate[i].bitrate / 10, rate[i].hw_value);
235 * Set/change channels. If the channel is really being changed, it's done
236 * by reseting the chip. To accomplish this we must first cleanup any pending
237 * DMA, then restart stuff.
239 int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
240 struct ath9k_channel *hchan)
242 struct ath_hw *ah = sc->sc_ah;
243 bool fastcc = true, stopped;
244 struct ieee80211_channel *channel = hw->conf.channel;
247 if (sc->sc_flags & SC_OP_INVALID)
253 * This is only performed if the channel settings have
256 * To switch channels clear any pending DMA operations;
257 * wait long enough for the RX fifo to drain, reset the
258 * hardware at the new frequency, and then re-enable
259 * the relevant bits of the h/w.
261 ath9k_hw_set_interrupts(ah, 0);
262 ath_drain_all_txq(sc, false);
263 stopped = ath_stoprecv(sc);
265 /* XXX: do not flush receive queue here. We don't want
266 * to flush data frames already in queue because of
267 * changing channel. */
269 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
272 DPRINTF(sc, ATH_DBG_CONFIG,
273 "(%u MHz) -> (%u MHz), chanwidth: %d\n",
274 sc->sc_ah->curchan->channel,
275 channel->center_freq, sc->tx_chan_width);
277 spin_lock_bh(&sc->sc_resetlock);
279 r = ath9k_hw_reset(ah, hchan, fastcc);
281 DPRINTF(sc, ATH_DBG_FATAL,
282 "Unable to reset channel (%u Mhz) "
284 channel->center_freq, r);
285 spin_unlock_bh(&sc->sc_resetlock);
288 spin_unlock_bh(&sc->sc_resetlock);
290 sc->sc_flags &= ~SC_OP_FULL_RESET;
292 if (ath_startrecv(sc) != 0) {
293 DPRINTF(sc, ATH_DBG_FATAL,
294 "Unable to restart recv logic\n");
298 ath_cache_conf_rate(sc, &hw->conf);
299 ath_update_txpow(sc);
300 ath9k_hw_set_interrupts(ah, sc->imask);
301 ath9k_ps_restore(sc);
306 * This routine performs the periodic noise floor calibration function
307 * that is used to adjust and optimize the chip performance. This
308 * takes environmental changes (location, temperature) into account.
309 * When the task is complete, it reschedules itself depending on the
310 * appropriate interval that was calculated.
312 static void ath_ani_calibrate(unsigned long data)
314 struct ath_softc *sc = (struct ath_softc *)data;
315 struct ath_hw *ah = sc->sc_ah;
316 bool longcal = false;
317 bool shortcal = false;
318 bool aniflag = false;
319 unsigned int timestamp = jiffies_to_msecs(jiffies);
320 u32 cal_interval, short_cal_interval;
322 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
323 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
326 * don't calibrate when we're scanning.
327 * we are most likely not on our home channel.
329 if (sc->sc_flags & SC_OP_SCANNING)
332 /* Long calibration runs independently of short calibration. */
333 if ((timestamp - sc->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
335 DPRINTF(sc, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
336 sc->ani.longcal_timer = timestamp;
339 /* Short calibration applies only while caldone is false */
340 if (!sc->ani.caldone) {
341 if ((timestamp - sc->ani.shortcal_timer) >= short_cal_interval) {
343 DPRINTF(sc, ATH_DBG_ANI, "shortcal @%lu\n", jiffies);
344 sc->ani.shortcal_timer = timestamp;
345 sc->ani.resetcal_timer = timestamp;
348 if ((timestamp - sc->ani.resetcal_timer) >=
349 ATH_RESTART_CALINTERVAL) {
350 sc->ani.caldone = ath9k_hw_reset_calvalid(ah);
352 sc->ani.resetcal_timer = timestamp;
356 /* Verify whether we must check ANI */
357 if ((timestamp - sc->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
359 sc->ani.checkani_timer = timestamp;
362 /* Skip all processing if there's nothing to do. */
363 if (longcal || shortcal || aniflag) {
364 /* Call ANI routine if necessary */
366 ath9k_hw_ani_monitor(ah, &sc->nodestats, ah->curchan);
368 /* Perform calibration if necessary */
369 if (longcal || shortcal) {
370 bool iscaldone = false;
372 if (ath9k_hw_calibrate(ah, ah->curchan,
373 sc->rx_chainmask, longcal,
376 sc->ani.noise_floor =
377 ath9k_hw_getchan_noise(ah,
380 DPRINTF(sc, ATH_DBG_ANI,
381 "calibrate chan %u/%x nf: %d\n",
382 ah->curchan->channel,
383 ah->curchan->channelFlags,
384 sc->ani.noise_floor);
386 DPRINTF(sc, ATH_DBG_ANY,
387 "calibrate chan %u/%x failed\n",
388 ah->curchan->channel,
389 ah->curchan->channelFlags);
391 sc->ani.caldone = iscaldone;
397 * Set timer interval based on previous results.
398 * The interval must be the shortest necessary to satisfy ANI,
399 * short calibration and long calibration.
401 cal_interval = ATH_LONG_CALINTERVAL;
402 if (sc->sc_ah->config.enable_ani)
403 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
404 if (!sc->ani.caldone)
405 cal_interval = min(cal_interval, (u32)short_cal_interval);
407 mod_timer(&sc->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
411 * Update tx/rx chainmask. For legacy association,
412 * hard code chainmask to 1x1, for 11n association, use
413 * the chainmask configuration, for bt coexistence, use
414 * the chainmask configuration even in legacy mode.
416 void ath_update_chainmask(struct ath_softc *sc, int is_ht)
419 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BT_COEX)) {
420 sc->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
421 sc->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
423 sc->tx_chainmask = 1;
424 sc->rx_chainmask = 1;
427 DPRINTF(sc, ATH_DBG_CONFIG, "tx chmask: %d, rx chmask: %d\n",
428 sc->tx_chainmask, sc->rx_chainmask);
431 static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
435 an = (struct ath_node *)sta->drv_priv;
437 if (sc->sc_flags & SC_OP_TXAGGR) {
438 ath_tx_node_init(sc, an);
439 an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
440 sta->ht_cap.ampdu_factor);
441 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
445 static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
447 struct ath_node *an = (struct ath_node *)sta->drv_priv;
449 if (sc->sc_flags & SC_OP_TXAGGR)
450 ath_tx_node_cleanup(sc, an);
453 static void ath9k_tasklet(unsigned long data)
455 struct ath_softc *sc = (struct ath_softc *)data;
456 u32 status = sc->intrstatus;
458 if (status & ATH9K_INT_FATAL) {
459 ath_reset(sc, false);
463 if (status & (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
464 spin_lock_bh(&sc->rx.rxflushlock);
465 ath_rx_tasklet(sc, 0);
466 spin_unlock_bh(&sc->rx.rxflushlock);
469 if (status & ATH9K_INT_TX)
472 /* re-enable hardware interrupt */
473 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
476 irqreturn_t ath_isr(int irq, void *dev)
478 #define SCHED_INTR ( \
488 struct ath_softc *sc = dev;
489 struct ath_hw *ah = sc->sc_ah;
490 enum ath9k_int status;
494 * The hardware is not ready/present, don't
495 * touch anything. Note this can happen early
496 * on if the IRQ is shared.
498 if (sc->sc_flags & SC_OP_INVALID)
503 /* shared irq, not for us */
505 if (!ath9k_hw_intrpend(ah)) {
506 ath9k_ps_restore(sc);
511 * Figure out the reason(s) for the interrupt. Note
512 * that the hal returns a pseudo-ISR that may include
513 * bits we haven't explicitly enabled so we mask the
514 * value to insure we only process bits we requested.
516 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
517 status &= sc->imask; /* discard unasked-for bits */
520 * If there are no status bits set, then this interrupt was not
521 * for me (should have been caught above).
524 ath9k_ps_restore(sc);
528 /* Cache the status */
529 sc->intrstatus = status;
531 if (status & SCHED_INTR)
535 * If a FATAL or RXORN interrupt is received, we have to reset the
538 if (status & (ATH9K_INT_FATAL | ATH9K_INT_RXORN))
541 if (status & ATH9K_INT_SWBA)
542 tasklet_schedule(&sc->bcon_tasklet);
544 if (status & ATH9K_INT_TXURN)
545 ath9k_hw_updatetxtriglevel(ah, true);
547 if (status & ATH9K_INT_MIB) {
549 * Disable interrupts until we service the MIB
550 * interrupt; otherwise it will continue to
553 ath9k_hw_set_interrupts(ah, 0);
555 * Let the hal handle the event. We assume
556 * it will clear whatever condition caused
559 ath9k_hw_procmibevent(ah, &sc->nodestats);
560 ath9k_hw_set_interrupts(ah, sc->imask);
563 if (status & ATH9K_INT_TIM_TIMER) {
564 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
565 /* Clear RxAbort bit so that we can
567 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
568 ath9k_hw_setrxabort(ah, 0);
570 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
576 ath9k_ps_restore(sc);
577 ath_debug_stat_interrupt(sc, status);
580 /* turn off every interrupt except SWBA */
581 ath9k_hw_set_interrupts(ah, (sc->imask & ATH9K_INT_SWBA));
582 tasklet_schedule(&sc->intr_tq);
590 static u32 ath_get_extchanmode(struct ath_softc *sc,
591 struct ieee80211_channel *chan,
592 enum nl80211_channel_type channel_type)
596 switch (chan->band) {
597 case IEEE80211_BAND_2GHZ:
598 switch(channel_type) {
599 case NL80211_CHAN_NO_HT:
600 case NL80211_CHAN_HT20:
601 chanmode = CHANNEL_G_HT20;
603 case NL80211_CHAN_HT40PLUS:
604 chanmode = CHANNEL_G_HT40PLUS;
606 case NL80211_CHAN_HT40MINUS:
607 chanmode = CHANNEL_G_HT40MINUS;
611 case IEEE80211_BAND_5GHZ:
612 switch(channel_type) {
613 case NL80211_CHAN_NO_HT:
614 case NL80211_CHAN_HT20:
615 chanmode = CHANNEL_A_HT20;
617 case NL80211_CHAN_HT40PLUS:
618 chanmode = CHANNEL_A_HT40PLUS;
620 case NL80211_CHAN_HT40MINUS:
621 chanmode = CHANNEL_A_HT40MINUS;
632 static int ath_setkey_tkip(struct ath_softc *sc, u16 keyix, const u8 *key,
633 struct ath9k_keyval *hk, const u8 *addr,
639 key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
640 key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
644 * Group key installation - only two key cache entries are used
645 * regardless of splitmic capability since group key is only
646 * used either for TX or RX.
649 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
650 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
652 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
653 memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
655 return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, addr);
658 /* TX and RX keys share the same key cache entry. */
659 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
660 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
661 return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, addr);
664 /* Separate key cache entries for TX and RX */
666 /* TX key goes at first index, RX key at +32. */
667 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
668 if (!ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, NULL)) {
669 /* TX MIC entry failed. No need to proceed further */
670 DPRINTF(sc, ATH_DBG_FATAL,
671 "Setting TX MIC Key Failed\n");
675 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
676 /* XXX delete tx key on failure? */
677 return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix + 32, hk, addr);
680 static int ath_reserve_key_cache_slot_tkip(struct ath_softc *sc)
684 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) {
685 if (test_bit(i, sc->keymap) ||
686 test_bit(i + 64, sc->keymap))
687 continue; /* At least one part of TKIP key allocated */
689 (test_bit(i + 32, sc->keymap) ||
690 test_bit(i + 64 + 32, sc->keymap)))
691 continue; /* At least one part of TKIP key allocated */
693 /* Found a free slot for a TKIP key */
699 static int ath_reserve_key_cache_slot(struct ath_softc *sc)
703 /* First, try to find slots that would not be available for TKIP. */
705 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 4; i++) {
706 if (!test_bit(i, sc->keymap) &&
707 (test_bit(i + 32, sc->keymap) ||
708 test_bit(i + 64, sc->keymap) ||
709 test_bit(i + 64 + 32, sc->keymap)))
711 if (!test_bit(i + 32, sc->keymap) &&
712 (test_bit(i, sc->keymap) ||
713 test_bit(i + 64, sc->keymap) ||
714 test_bit(i + 64 + 32, sc->keymap)))
716 if (!test_bit(i + 64, sc->keymap) &&
717 (test_bit(i , sc->keymap) ||
718 test_bit(i + 32, sc->keymap) ||
719 test_bit(i + 64 + 32, sc->keymap)))
721 if (!test_bit(i + 64 + 32, sc->keymap) &&
722 (test_bit(i, sc->keymap) ||
723 test_bit(i + 32, sc->keymap) ||
724 test_bit(i + 64, sc->keymap)))
728 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) {
729 if (!test_bit(i, sc->keymap) &&
730 test_bit(i + 64, sc->keymap))
732 if (test_bit(i, sc->keymap) &&
733 !test_bit(i + 64, sc->keymap))
738 /* No partially used TKIP slots, pick any available slot */
739 for (i = IEEE80211_WEP_NKID; i < sc->keymax; i++) {
740 /* Do not allow slots that could be needed for TKIP group keys
741 * to be used. This limitation could be removed if we know that
742 * TKIP will not be used. */
743 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
746 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
748 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
752 if (!test_bit(i, sc->keymap))
753 return i; /* Found a free slot for a key */
756 /* No free slot found */
760 static int ath_key_config(struct ath_softc *sc,
761 struct ieee80211_vif *vif,
762 struct ieee80211_sta *sta,
763 struct ieee80211_key_conf *key)
765 struct ath9k_keyval hk;
766 const u8 *mac = NULL;
770 memset(&hk, 0, sizeof(hk));
774 hk.kv_type = ATH9K_CIPHER_WEP;
777 hk.kv_type = ATH9K_CIPHER_TKIP;
780 hk.kv_type = ATH9K_CIPHER_AES_CCM;
786 hk.kv_len = key->keylen;
787 memcpy(hk.kv_val, key->key, key->keylen);
789 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
790 /* For now, use the default keys for broadcast keys. This may
791 * need to change with virtual interfaces. */
793 } else if (key->keyidx) {
798 if (vif->type != NL80211_IFTYPE_AP) {
799 /* Only keyidx 0 should be used with unicast key, but
800 * allow this for client mode for now. */
809 if (key->alg == ALG_TKIP)
810 idx = ath_reserve_key_cache_slot_tkip(sc);
812 idx = ath_reserve_key_cache_slot(sc);
814 return -ENOSPC; /* no free key cache entries */
817 if (key->alg == ALG_TKIP)
818 ret = ath_setkey_tkip(sc, idx, key->key, &hk, mac,
819 vif->type == NL80211_IFTYPE_AP);
821 ret = ath9k_hw_set_keycache_entry(sc->sc_ah, idx, &hk, mac);
826 set_bit(idx, sc->keymap);
827 if (key->alg == ALG_TKIP) {
828 set_bit(idx + 64, sc->keymap);
830 set_bit(idx + 32, sc->keymap);
831 set_bit(idx + 64 + 32, sc->keymap);
838 static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
840 ath9k_hw_keyreset(sc->sc_ah, key->hw_key_idx);
841 if (key->hw_key_idx < IEEE80211_WEP_NKID)
844 clear_bit(key->hw_key_idx, sc->keymap);
845 if (key->alg != ALG_TKIP)
848 clear_bit(key->hw_key_idx + 64, sc->keymap);
850 clear_bit(key->hw_key_idx + 32, sc->keymap);
851 clear_bit(key->hw_key_idx + 64 + 32, sc->keymap);
855 static void setup_ht_cap(struct ath_softc *sc,
856 struct ieee80211_sta_ht_cap *ht_info)
858 #define ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
859 #define ATH9K_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
861 ht_info->ht_supported = true;
862 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
863 IEEE80211_HT_CAP_SM_PS |
864 IEEE80211_HT_CAP_SGI_40 |
865 IEEE80211_HT_CAP_DSSSCCK40;
867 ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
868 ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
870 /* set up supported mcs set */
871 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
873 switch(sc->rx_chainmask) {
875 ht_info->mcs.rx_mask[0] = 0xff;
881 ht_info->mcs.rx_mask[0] = 0xff;
882 ht_info->mcs.rx_mask[1] = 0xff;
886 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
889 static void ath9k_bss_assoc_info(struct ath_softc *sc,
890 struct ieee80211_vif *vif,
891 struct ieee80211_bss_conf *bss_conf)
893 struct ath_vif *avp = (void *)vif->drv_priv;
895 if (bss_conf->assoc) {
896 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info ASSOC %d, bssid: %pM\n",
897 bss_conf->aid, sc->curbssid);
899 /* New association, store aid */
900 if (avp->av_opmode == NL80211_IFTYPE_STATION) {
901 sc->curaid = bss_conf->aid;
902 ath9k_hw_write_associd(sc);
905 /* Configure the beacon */
906 ath_beacon_config(sc, vif);
908 /* Reset rssi stats */
909 sc->nodestats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
910 sc->nodestats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
911 sc->nodestats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
912 sc->nodestats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
915 mod_timer(&sc->ani.timer,
916 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
918 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
923 /********************************/
925 /********************************/
927 static void ath_led_blink_work(struct work_struct *work)
929 struct ath_softc *sc = container_of(work, struct ath_softc,
930 ath_led_blink_work.work);
932 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
935 if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
936 (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
937 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0);
939 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
940 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
942 queue_delayed_work(sc->hw->workqueue, &sc->ath_led_blink_work,
943 (sc->sc_flags & SC_OP_LED_ON) ?
944 msecs_to_jiffies(sc->led_off_duration) :
945 msecs_to_jiffies(sc->led_on_duration));
947 sc->led_on_duration = sc->led_on_cnt ?
948 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
949 ATH_LED_ON_DURATION_IDLE;
950 sc->led_off_duration = sc->led_off_cnt ?
951 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
952 ATH_LED_OFF_DURATION_IDLE;
953 sc->led_on_cnt = sc->led_off_cnt = 0;
954 if (sc->sc_flags & SC_OP_LED_ON)
955 sc->sc_flags &= ~SC_OP_LED_ON;
957 sc->sc_flags |= SC_OP_LED_ON;
960 static void ath_led_brightness(struct led_classdev *led_cdev,
961 enum led_brightness brightness)
963 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
964 struct ath_softc *sc = led->sc;
966 switch (brightness) {
968 if (led->led_type == ATH_LED_ASSOC ||
969 led->led_type == ATH_LED_RADIO) {
970 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
971 (led->led_type == ATH_LED_RADIO));
972 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
973 if (led->led_type == ATH_LED_RADIO)
974 sc->sc_flags &= ~SC_OP_LED_ON;
980 if (led->led_type == ATH_LED_ASSOC) {
981 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
982 queue_delayed_work(sc->hw->workqueue,
983 &sc->ath_led_blink_work, 0);
984 } else if (led->led_type == ATH_LED_RADIO) {
985 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0);
986 sc->sc_flags |= SC_OP_LED_ON;
996 static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
1002 led->led_cdev.name = led->name;
1003 led->led_cdev.default_trigger = trigger;
1004 led->led_cdev.brightness_set = ath_led_brightness;
1006 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
1008 DPRINTF(sc, ATH_DBG_FATAL,
1009 "Failed to register led:%s", led->name);
1011 led->registered = 1;
1015 static void ath_unregister_led(struct ath_led *led)
1017 if (led->registered) {
1018 led_classdev_unregister(&led->led_cdev);
1019 led->registered = 0;
1023 static void ath_deinit_leds(struct ath_softc *sc)
1025 cancel_delayed_work_sync(&sc->ath_led_blink_work);
1026 ath_unregister_led(&sc->assoc_led);
1027 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
1028 ath_unregister_led(&sc->tx_led);
1029 ath_unregister_led(&sc->rx_led);
1030 ath_unregister_led(&sc->radio_led);
1031 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1034 static void ath_init_leds(struct ath_softc *sc)
1039 /* Configure gpio 1 for output */
1040 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
1041 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1042 /* LED off, active low */
1043 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1045 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
1047 trigger = ieee80211_get_radio_led_name(sc->hw);
1048 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
1049 "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
1050 ret = ath_register_led(sc, &sc->radio_led, trigger);
1051 sc->radio_led.led_type = ATH_LED_RADIO;
1055 trigger = ieee80211_get_assoc_led_name(sc->hw);
1056 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
1057 "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
1058 ret = ath_register_led(sc, &sc->assoc_led, trigger);
1059 sc->assoc_led.led_type = ATH_LED_ASSOC;
1063 trigger = ieee80211_get_tx_led_name(sc->hw);
1064 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
1065 "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
1066 ret = ath_register_led(sc, &sc->tx_led, trigger);
1067 sc->tx_led.led_type = ATH_LED_TX;
1071 trigger = ieee80211_get_rx_led_name(sc->hw);
1072 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
1073 "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
1074 ret = ath_register_led(sc, &sc->rx_led, trigger);
1075 sc->rx_led.led_type = ATH_LED_RX;
1082 ath_deinit_leds(sc);
1085 void ath_radio_enable(struct ath_softc *sc)
1087 struct ath_hw *ah = sc->sc_ah;
1088 struct ieee80211_channel *channel = sc->hw->conf.channel;
1091 ath9k_ps_wakeup(sc);
1092 spin_lock_bh(&sc->sc_resetlock);
1094 r = ath9k_hw_reset(ah, ah->curchan, false);
1097 DPRINTF(sc, ATH_DBG_FATAL,
1098 "Unable to reset channel %u (%uMhz) ",
1099 "reset status %u\n",
1100 channel->center_freq, r);
1102 spin_unlock_bh(&sc->sc_resetlock);
1104 ath_update_txpow(sc);
1105 if (ath_startrecv(sc) != 0) {
1106 DPRINTF(sc, ATH_DBG_FATAL,
1107 "Unable to restart recv logic\n");
1111 if (sc->sc_flags & SC_OP_BEACONS)
1112 ath_beacon_config(sc, NULL); /* restart beacons */
1114 /* Re-Enable interrupts */
1115 ath9k_hw_set_interrupts(ah, sc->imask);
1118 ath9k_hw_cfg_output(ah, ATH_LED_PIN,
1119 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1120 ath9k_hw_set_gpio(ah, ATH_LED_PIN, 0);
1122 ieee80211_wake_queues(sc->hw);
1123 ath9k_ps_restore(sc);
1126 void ath_radio_disable(struct ath_softc *sc)
1128 struct ath_hw *ah = sc->sc_ah;
1129 struct ieee80211_channel *channel = sc->hw->conf.channel;
1132 ath9k_ps_wakeup(sc);
1133 ieee80211_stop_queues(sc->hw);
1136 ath9k_hw_set_gpio(ah, ATH_LED_PIN, 1);
1137 ath9k_hw_cfg_gpio_input(ah, ATH_LED_PIN);
1139 /* Disable interrupts */
1140 ath9k_hw_set_interrupts(ah, 0);
1142 ath_drain_all_txq(sc, false); /* clear pending tx frames */
1143 ath_stoprecv(sc); /* turn off frame recv */
1144 ath_flushrecv(sc); /* flush recv queue */
1146 spin_lock_bh(&sc->sc_resetlock);
1147 r = ath9k_hw_reset(ah, ah->curchan, false);
1149 DPRINTF(sc, ATH_DBG_FATAL,
1150 "Unable to reset channel %u (%uMhz) "
1151 "reset status %u\n",
1152 channel->center_freq, r);
1154 spin_unlock_bh(&sc->sc_resetlock);
1156 ath9k_hw_phy_disable(ah);
1157 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
1158 ath9k_ps_restore(sc);
1161 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1163 /*******************/
1165 /*******************/
1167 static bool ath_is_rfkill_set(struct ath_softc *sc)
1169 struct ath_hw *ah = sc->sc_ah;
1171 return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
1172 ah->rfkill_polarity;
1175 /* h/w rfkill poll function */
1176 static void ath_rfkill_poll(struct work_struct *work)
1178 struct ath_softc *sc = container_of(work, struct ath_softc,
1179 rf_kill.rfkill_poll.work);
1182 if (sc->sc_flags & SC_OP_INVALID)
1185 radio_on = !ath_is_rfkill_set(sc);
1188 * enable/disable radio only when there is a
1189 * state change in RF switch
1191 if (radio_on == !!(sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED)) {
1192 enum rfkill_state state;
1194 if (sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED) {
1195 state = radio_on ? RFKILL_STATE_SOFT_BLOCKED
1196 : RFKILL_STATE_HARD_BLOCKED;
1197 } else if (radio_on) {
1198 ath_radio_enable(sc);
1199 state = RFKILL_STATE_UNBLOCKED;
1201 ath_radio_disable(sc);
1202 state = RFKILL_STATE_HARD_BLOCKED;
1205 if (state == RFKILL_STATE_HARD_BLOCKED)
1206 sc->sc_flags |= SC_OP_RFKILL_HW_BLOCKED;
1208 sc->sc_flags &= ~SC_OP_RFKILL_HW_BLOCKED;
1210 rfkill_force_state(sc->rf_kill.rfkill, state);
1213 queue_delayed_work(sc->hw->workqueue, &sc->rf_kill.rfkill_poll,
1214 msecs_to_jiffies(ATH_RFKILL_POLL_INTERVAL));
1217 /* s/w rfkill handler */
1218 static int ath_sw_toggle_radio(void *data, enum rfkill_state state)
1220 struct ath_softc *sc = data;
1223 case RFKILL_STATE_SOFT_BLOCKED:
1224 if (!(sc->sc_flags & (SC_OP_RFKILL_HW_BLOCKED |
1225 SC_OP_RFKILL_SW_BLOCKED)))
1226 ath_radio_disable(sc);
1227 sc->sc_flags |= SC_OP_RFKILL_SW_BLOCKED;
1229 case RFKILL_STATE_UNBLOCKED:
1230 if ((sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED)) {
1231 sc->sc_flags &= ~SC_OP_RFKILL_SW_BLOCKED;
1232 if (sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED) {
1233 DPRINTF(sc, ATH_DBG_FATAL, "Can't turn on the"
1234 "radio as it is disabled by h/w\n");
1237 ath_radio_enable(sc);
1245 /* Init s/w rfkill */
1246 static int ath_init_sw_rfkill(struct ath_softc *sc)
1248 sc->rf_kill.rfkill = rfkill_allocate(wiphy_dev(sc->hw->wiphy),
1250 if (!sc->rf_kill.rfkill) {
1251 DPRINTF(sc, ATH_DBG_FATAL, "Failed to allocate rfkill\n");
1255 snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name),
1256 "ath9k-%s::rfkill", wiphy_name(sc->hw->wiphy));
1257 sc->rf_kill.rfkill->name = sc->rf_kill.rfkill_name;
1258 sc->rf_kill.rfkill->data = sc;
1259 sc->rf_kill.rfkill->toggle_radio = ath_sw_toggle_radio;
1260 sc->rf_kill.rfkill->state = RFKILL_STATE_UNBLOCKED;
1265 /* Deinitialize rfkill */
1266 static void ath_deinit_rfkill(struct ath_softc *sc)
1268 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1269 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
1271 if (sc->sc_flags & SC_OP_RFKILL_REGISTERED) {
1272 rfkill_unregister(sc->rf_kill.rfkill);
1273 sc->sc_flags &= ~SC_OP_RFKILL_REGISTERED;
1274 sc->rf_kill.rfkill = NULL;
1278 static int ath_start_rfkill_poll(struct ath_softc *sc)
1280 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1281 queue_delayed_work(sc->hw->workqueue,
1282 &sc->rf_kill.rfkill_poll, 0);
1284 if (!(sc->sc_flags & SC_OP_RFKILL_REGISTERED)) {
1285 if (rfkill_register(sc->rf_kill.rfkill)) {
1286 DPRINTF(sc, ATH_DBG_FATAL,
1287 "Unable to register rfkill\n");
1288 rfkill_free(sc->rf_kill.rfkill);
1290 /* Deinitialize the device */
1294 sc->sc_flags |= SC_OP_RFKILL_REGISTERED;
1300 #endif /* CONFIG_RFKILL */
1302 void ath_cleanup(struct ath_softc *sc)
1305 free_irq(sc->irq, sc);
1306 ath_bus_cleanup(sc);
1307 kfree(sc->sec_wiphy);
1308 ieee80211_free_hw(sc->hw);
1311 void ath_detach(struct ath_softc *sc)
1313 struct ieee80211_hw *hw = sc->hw;
1316 ath9k_ps_wakeup(sc);
1318 DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n");
1320 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1321 ath_deinit_rfkill(sc);
1323 ath_deinit_leds(sc);
1324 cancel_work_sync(&sc->chan_work);
1325 cancel_delayed_work_sync(&sc->wiphy_work);
1327 for (i = 0; i < sc->num_sec_wiphy; i++) {
1328 struct ath_wiphy *aphy = sc->sec_wiphy[i];
1331 sc->sec_wiphy[i] = NULL;
1332 ieee80211_unregister_hw(aphy->hw);
1333 ieee80211_free_hw(aphy->hw);
1335 ieee80211_unregister_hw(hw);
1339 tasklet_kill(&sc->intr_tq);
1340 tasklet_kill(&sc->bcon_tasklet);
1342 if (!(sc->sc_flags & SC_OP_INVALID))
1343 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1345 /* cleanup tx queues */
1346 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1347 if (ATH_TXQ_SETUP(sc, i))
1348 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1350 ath9k_hw_detach(sc->sc_ah);
1351 ath9k_exit_debug(sc);
1352 ath9k_ps_restore(sc);
1355 static int ath_init(u16 devid, struct ath_softc *sc)
1357 struct ath_hw *ah = NULL;
1362 /* XXX: hardware will not be ready until ath_open() being called */
1363 sc->sc_flags |= SC_OP_INVALID;
1365 if (ath9k_init_debug(sc) < 0)
1366 printk(KERN_ERR "Unable to create debugfs files\n");
1368 spin_lock_init(&sc->wiphy_lock);
1369 spin_lock_init(&sc->sc_resetlock);
1370 spin_lock_init(&sc->sc_serial_rw);
1371 mutex_init(&sc->mutex);
1372 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1373 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
1377 * Cache line size is used to size and align various
1378 * structures used to communicate with the hardware.
1380 ath_read_cachesize(sc, &csz);
1381 /* XXX assert csz is non-zero */
1382 sc->cachelsz = csz << 2; /* convert to bytes */
1384 ah = ath9k_hw_attach(devid, sc, &status);
1386 DPRINTF(sc, ATH_DBG_FATAL,
1387 "Unable to attach hardware; HAL status %d\n", status);
1393 /* Get the hardware key cache size. */
1394 sc->keymax = ah->caps.keycache_size;
1395 if (sc->keymax > ATH_KEYMAX) {
1396 DPRINTF(sc, ATH_DBG_ANY,
1397 "Warning, using only %u entries in %u key cache\n",
1398 ATH_KEYMAX, sc->keymax);
1399 sc->keymax = ATH_KEYMAX;
1403 * Reset the key cache since some parts do not
1404 * reset the contents on initial power up.
1406 for (i = 0; i < sc->keymax; i++)
1407 ath9k_hw_keyreset(ah, (u16) i);
1409 if (ath_regd_init(&sc->sc_ah->regulatory))
1412 /* default to MONITOR mode */
1413 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
1415 /* Setup rate tables */
1417 ath_rate_attach(sc);
1418 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1419 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1422 * Allocate hardware transmit queues: one queue for
1423 * beacon frames and one data queue for each QoS
1424 * priority. Note that the hal handles reseting
1425 * these queues at the needed time.
1427 sc->beacon.beaconq = ath_beaconq_setup(ah);
1428 if (sc->beacon.beaconq == -1) {
1429 DPRINTF(sc, ATH_DBG_FATAL,
1430 "Unable to setup a beacon xmit queue\n");
1434 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1435 if (sc->beacon.cabq == NULL) {
1436 DPRINTF(sc, ATH_DBG_FATAL,
1437 "Unable to setup CAB xmit queue\n");
1442 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
1443 ath_cabq_update(sc);
1445 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
1446 sc->tx.hwq_map[i] = -1;
1448 /* Setup data queues */
1449 /* NB: ensure BK queue is the lowest priority h/w queue */
1450 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1451 DPRINTF(sc, ATH_DBG_FATAL,
1452 "Unable to setup xmit queue for BK traffic\n");
1457 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1458 DPRINTF(sc, ATH_DBG_FATAL,
1459 "Unable to setup xmit queue for BE traffic\n");
1463 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1464 DPRINTF(sc, ATH_DBG_FATAL,
1465 "Unable to setup xmit queue for VI traffic\n");
1469 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1470 DPRINTF(sc, ATH_DBG_FATAL,
1471 "Unable to setup xmit queue for VO traffic\n");
1476 /* Initializes the noise floor to a reasonable default value.
1477 * Later on this will be updated during ANI processing. */
1479 sc->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1480 setup_timer(&sc->ani.timer, ath_ani_calibrate, (unsigned long)sc);
1482 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1483 ATH9K_CIPHER_TKIP, NULL)) {
1485 * Whether we should enable h/w TKIP MIC.
1486 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1487 * report WMM capable, so it's always safe to turn on
1488 * TKIP MIC in this case.
1490 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1495 * Check whether the separate key cache entries
1496 * are required to handle both tx+rx MIC keys.
1497 * With split mic keys the number of stations is limited
1498 * to 27 otherwise 59.
1500 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1501 ATH9K_CIPHER_TKIP, NULL)
1502 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1503 ATH9K_CIPHER_MIC, NULL)
1504 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1508 /* turn on mcast key search if possible */
1509 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1510 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1513 sc->config.txpowlimit = ATH_TXPOWER_MAX;
1515 /* 11n Capabilities */
1516 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1517 sc->sc_flags |= SC_OP_TXAGGR;
1518 sc->sc_flags |= SC_OP_RXAGGR;
1521 sc->tx_chainmask = ah->caps.tx_chainmask;
1522 sc->rx_chainmask = ah->caps.rx_chainmask;
1524 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1525 sc->rx.defant = ath9k_hw_getdefantenna(ah);
1527 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
1528 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
1530 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1532 /* initialize beacon slots */
1533 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
1534 sc->beacon.bslot[i] = NULL;
1535 sc->beacon.bslot_aphy[i] = NULL;
1538 /* setup channels and rates */
1540 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
1541 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
1542 sc->rates[IEEE80211_BAND_2GHZ];
1543 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1544 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
1545 ARRAY_SIZE(ath9k_2ghz_chantable);
1547 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
1548 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
1549 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1550 sc->rates[IEEE80211_BAND_5GHZ];
1551 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
1552 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
1553 ARRAY_SIZE(ath9k_5ghz_chantable);
1556 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BT_COEX)
1557 ath9k_hw_btcoex_enable(sc->sc_ah);
1561 /* cleanup tx queues */
1562 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1563 if (ATH_TXQ_SETUP(sc, i))
1564 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1567 ath9k_hw_detach(ah);
1568 ath9k_exit_debug(sc);
1573 static int ath9k_reg_notifier(struct wiphy *wiphy,
1574 struct regulatory_request *request)
1576 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1577 struct ath_wiphy *aphy = hw->priv;
1578 struct ath_softc *sc = aphy->sc;
1579 struct ath_regulatory *reg = &sc->sc_ah->regulatory;
1581 return ath_reg_notifier_apply(wiphy, request, reg);
1584 void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1586 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1587 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1588 IEEE80211_HW_SIGNAL_DBM |
1589 IEEE80211_HW_AMPDU_AGGREGATION |
1590 IEEE80211_HW_SUPPORTS_PS |
1591 IEEE80211_HW_PS_NULLFUNC_STACK |
1592 IEEE80211_HW_SPECTRUM_MGMT;
1594 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
1595 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
1597 hw->wiphy->interface_modes =
1598 BIT(NL80211_IFTYPE_AP) |
1599 BIT(NL80211_IFTYPE_STATION) |
1600 BIT(NL80211_IFTYPE_ADHOC) |
1601 BIT(NL80211_IFTYPE_MESH_POINT);
1603 hw->wiphy->reg_notifier = ath9k_reg_notifier;
1604 hw->wiphy->strict_regulatory = true;
1608 hw->channel_change_time = 5000;
1609 hw->max_listen_interval = 10;
1610 hw->max_rate_tries = ATH_11N_TXMAXTRY;
1611 hw->sta_data_size = sizeof(struct ath_node);
1612 hw->vif_data_size = sizeof(struct ath_vif);
1614 hw->rate_control_algorithm = "ath9k_rate_control";
1616 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1617 &sc->sbands[IEEE80211_BAND_2GHZ];
1618 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1619 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1620 &sc->sbands[IEEE80211_BAND_5GHZ];
1623 int ath_attach(u16 devid, struct ath_softc *sc)
1625 struct ieee80211_hw *hw = sc->hw;
1626 const struct ieee80211_regdomain *regd;
1628 struct ath_regulatory *reg;
1630 DPRINTF(sc, ATH_DBG_CONFIG, "Attach ATH hw\n");
1632 error = ath_init(devid, sc);
1636 reg = &sc->sc_ah->regulatory;
1638 /* get mac address from hardware and set in mac80211 */
1640 SET_IEEE80211_PERM_ADDR(hw, sc->sc_ah->macaddr);
1642 ath_set_hw_capab(sc, hw);
1644 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1645 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
1646 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1647 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
1650 /* initialize tx/rx engine */
1651 error = ath_tx_init(sc, ATH_TXBUF);
1655 error = ath_rx_init(sc, ATH_RXBUF);
1659 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1660 /* Initialze h/w Rfkill */
1661 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1662 INIT_DELAYED_WORK(&sc->rf_kill.rfkill_poll, ath_rfkill_poll);
1664 /* Initialize s/w rfkill */
1665 error = ath_init_sw_rfkill(sc);
1670 if (ath_is_world_regd(reg)) {
1671 /* Anything applied here (prior to wiphy registration) gets
1672 * saved on the wiphy orig_* parameters */
1673 regd = ath_world_regdomain(reg);
1674 hw->wiphy->custom_regulatory = true;
1675 hw->wiphy->strict_regulatory = false;
1677 /* This gets applied in the case of the absense of CRDA,
1678 * it's our own custom world regulatory domain, similar to
1679 * cfg80211's but we enable passive scanning */
1680 regd = ath_default_world_regdomain();
1682 wiphy_apply_custom_regulatory(hw->wiphy, regd);
1683 ath_reg_apply_radar_flags(hw->wiphy);
1684 ath_reg_apply_world_flags(hw->wiphy,
1685 NL80211_REGDOM_SET_BY_DRIVER,
1688 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
1689 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
1690 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
1692 error = ieee80211_register_hw(hw);
1694 if (!ath_is_world_regd(reg)) {
1695 error = regulatory_hint(hw->wiphy, reg->alpha2);
1700 /* Initialize LED control */
1707 /* cleanup tx queues */
1708 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1709 if (ATH_TXQ_SETUP(sc, i))
1710 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1712 ath9k_hw_detach(sc->sc_ah);
1713 ath9k_exit_debug(sc);
1718 int ath_reset(struct ath_softc *sc, bool retry_tx)
1720 struct ath_hw *ah = sc->sc_ah;
1721 struct ieee80211_hw *hw = sc->hw;
1724 ath9k_hw_set_interrupts(ah, 0);
1725 ath_drain_all_txq(sc, retry_tx);
1729 spin_lock_bh(&sc->sc_resetlock);
1730 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
1732 DPRINTF(sc, ATH_DBG_FATAL,
1733 "Unable to reset hardware; reset status %u\n", r);
1734 spin_unlock_bh(&sc->sc_resetlock);
1736 if (ath_startrecv(sc) != 0)
1737 DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n");
1740 * We may be doing a reset in response to a request
1741 * that changes the channel so update any state that
1742 * might change as a result.
1744 ath_cache_conf_rate(sc, &hw->conf);
1746 ath_update_txpow(sc);
1748 if (sc->sc_flags & SC_OP_BEACONS)
1749 ath_beacon_config(sc, NULL); /* restart beacons */
1751 ath9k_hw_set_interrupts(ah, sc->imask);
1755 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1756 if (ATH_TXQ_SETUP(sc, i)) {
1757 spin_lock_bh(&sc->tx.txq[i].axq_lock);
1758 ath_txq_schedule(sc, &sc->tx.txq[i]);
1759 spin_unlock_bh(&sc->tx.txq[i].axq_lock);
1768 * This function will allocate both the DMA descriptor structure, and the
1769 * buffers it contains. These are used to contain the descriptors used
1772 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1773 struct list_head *head, const char *name,
1774 int nbuf, int ndesc)
1776 #define DS2PHYS(_dd, _ds) \
1777 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1778 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1779 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1781 struct ath_desc *ds;
1783 int i, bsize, error;
1785 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
1788 INIT_LIST_HEAD(head);
1789 /* ath_desc must be a multiple of DWORDs */
1790 if ((sizeof(struct ath_desc) % 4) != 0) {
1791 DPRINTF(sc, ATH_DBG_FATAL, "ath_desc not DWORD aligned\n");
1792 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1797 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1800 * Need additional DMA memory because we can't use
1801 * descriptors that cross the 4K page boundary. Assume
1802 * one skipped descriptor per 4K page.
1804 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1806 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1809 while (ndesc_skipped) {
1810 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1811 dd->dd_desc_len += dma_len;
1813 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1817 /* allocate descriptors */
1818 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
1819 &dd->dd_desc_paddr, GFP_KERNEL);
1820 if (dd->dd_desc == NULL) {
1825 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
1826 name, ds, (u32) dd->dd_desc_len,
1827 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1829 /* allocate buffers */
1830 bsize = sizeof(struct ath_buf) * nbuf;
1831 bf = kzalloc(bsize, GFP_KERNEL);
1838 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1840 bf->bf_daddr = DS2PHYS(dd, ds);
1842 if (!(sc->sc_ah->caps.hw_caps &
1843 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1845 * Skip descriptor addresses which can cause 4KB
1846 * boundary crossing (addr + length) with a 32 dword
1849 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1850 ASSERT((caddr_t) bf->bf_desc <
1851 ((caddr_t) dd->dd_desc +
1856 bf->bf_daddr = DS2PHYS(dd, ds);
1859 list_add_tail(&bf->list, head);
1863 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
1866 memset(dd, 0, sizeof(*dd));
1868 #undef ATH_DESC_4KB_BOUND_CHECK
1869 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1873 void ath_descdma_cleanup(struct ath_softc *sc,
1874 struct ath_descdma *dd,
1875 struct list_head *head)
1877 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
1880 INIT_LIST_HEAD(head);
1881 kfree(dd->dd_bufptr);
1882 memset(dd, 0, sizeof(*dd));
1885 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1891 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO];
1894 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI];
1897 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
1900 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK];
1903 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
1910 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1915 case ATH9K_WME_AC_VO:
1918 case ATH9K_WME_AC_VI:
1921 case ATH9K_WME_AC_BE:
1924 case ATH9K_WME_AC_BK:
1935 /* XXX: Remove me once we don't depend on ath9k_channel for all
1936 * this redundant data */
1937 void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
1938 struct ath9k_channel *ichan)
1940 struct ieee80211_channel *chan = hw->conf.channel;
1941 struct ieee80211_conf *conf = &hw->conf;
1943 ichan->channel = chan->center_freq;
1946 if (chan->band == IEEE80211_BAND_2GHZ) {
1947 ichan->chanmode = CHANNEL_G;
1948 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
1950 ichan->chanmode = CHANNEL_A;
1951 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
1954 sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1956 if (conf_is_ht(conf)) {
1957 if (conf_is_ht40(conf))
1958 sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1960 ichan->chanmode = ath_get_extchanmode(sc, chan,
1961 conf->channel_type);
1965 /**********************/
1966 /* mac80211 callbacks */
1967 /**********************/
1969 static int ath9k_start(struct ieee80211_hw *hw)
1971 struct ath_wiphy *aphy = hw->priv;
1972 struct ath_softc *sc = aphy->sc;
1973 struct ieee80211_channel *curchan = hw->conf.channel;
1974 struct ath9k_channel *init_channel;
1977 DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with "
1978 "initial channel: %d MHz\n", curchan->center_freq);
1980 mutex_lock(&sc->mutex);
1982 if (ath9k_wiphy_started(sc)) {
1983 if (sc->chan_idx == curchan->hw_value) {
1985 * Already on the operational channel, the new wiphy
1986 * can be marked active.
1988 aphy->state = ATH_WIPHY_ACTIVE;
1989 ieee80211_wake_queues(hw);
1992 * Another wiphy is on another channel, start the new
1993 * wiphy in paused state.
1995 aphy->state = ATH_WIPHY_PAUSED;
1996 ieee80211_stop_queues(hw);
1998 mutex_unlock(&sc->mutex);
2001 aphy->state = ATH_WIPHY_ACTIVE;
2003 /* setup initial channel */
2005 pos = curchan->hw_value;
2008 init_channel = &sc->sc_ah->channels[pos];
2009 ath9k_update_ichannel(sc, hw, init_channel);
2011 /* Reset SERDES registers */
2012 ath9k_hw_configpcipowersave(sc->sc_ah, 0);
2015 * The basic interface to setting the hardware in a good
2016 * state is ``reset''. On return the hardware is known to
2017 * be powered up and with interrupts disabled. This must
2018 * be followed by initialization of the appropriate bits
2019 * and then setup of the interrupt mask.
2021 spin_lock_bh(&sc->sc_resetlock);
2022 r = ath9k_hw_reset(sc->sc_ah, init_channel, false);
2024 DPRINTF(sc, ATH_DBG_FATAL,
2025 "Unable to reset hardware; reset status %u "
2026 "(freq %u MHz)\n", r,
2027 curchan->center_freq);
2028 spin_unlock_bh(&sc->sc_resetlock);
2031 spin_unlock_bh(&sc->sc_resetlock);
2034 * This is needed only to setup initial state
2035 * but it's best done after a reset.
2037 ath_update_txpow(sc);
2040 * Setup the hardware after reset:
2041 * The receive engine is set going.
2042 * Frame transmit is handled entirely
2043 * in the frame output path; there's nothing to do
2044 * here except setup the interrupt mask.
2046 if (ath_startrecv(sc) != 0) {
2047 DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n");
2052 /* Setup our intr mask. */
2053 sc->imask = ATH9K_INT_RX | ATH9K_INT_TX
2054 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
2055 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
2057 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
2058 sc->imask |= ATH9K_INT_GTT;
2060 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
2061 sc->imask |= ATH9K_INT_CST;
2063 ath_cache_conf_rate(sc, &hw->conf);
2065 sc->sc_flags &= ~SC_OP_INVALID;
2067 /* Disable BMISS interrupt when we're not associated */
2068 sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
2069 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
2071 ieee80211_wake_queues(hw);
2073 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2074 r = ath_start_rfkill_poll(sc);
2078 mutex_unlock(&sc->mutex);
2083 static int ath9k_tx(struct ieee80211_hw *hw,
2084 struct sk_buff *skb)
2086 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2087 struct ath_wiphy *aphy = hw->priv;
2088 struct ath_softc *sc = aphy->sc;
2089 struct ath_tx_control txctl;
2090 int hdrlen, padsize;
2092 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
2093 printk(KERN_DEBUG "ath9k: %s: TX in unexpected wiphy state "
2094 "%d\n", wiphy_name(hw->wiphy), aphy->state);
2098 memset(&txctl, 0, sizeof(struct ath_tx_control));
2101 * As a temporary workaround, assign seq# here; this will likely need
2102 * to be cleaned up to work better with Beacon transmission and virtual
2105 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2106 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2107 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2108 sc->tx.seq_no += 0x10;
2109 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2110 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2113 /* Add the padding after the header if this is not already done */
2114 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2116 padsize = hdrlen % 4;
2117 if (skb_headroom(skb) < padsize)
2119 skb_push(skb, padsize);
2120 memmove(skb->data, skb->data + padsize, hdrlen);
2123 /* Check if a tx queue is available */
2125 txctl.txq = ath_test_get_txq(sc, skb);
2129 DPRINTF(sc, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
2131 if (ath_tx_start(hw, skb, &txctl) != 0) {
2132 DPRINTF(sc, ATH_DBG_XMIT, "TX failed\n");
2138 dev_kfree_skb_any(skb);
2142 static void ath9k_stop(struct ieee80211_hw *hw)
2144 struct ath_wiphy *aphy = hw->priv;
2145 struct ath_softc *sc = aphy->sc;
2147 aphy->state = ATH_WIPHY_INACTIVE;
2149 if (sc->sc_flags & SC_OP_INVALID) {
2150 DPRINTF(sc, ATH_DBG_ANY, "Device not present\n");
2154 mutex_lock(&sc->mutex);
2156 ieee80211_stop_queues(hw);
2158 if (ath9k_wiphy_started(sc)) {
2159 mutex_unlock(&sc->mutex);
2160 return; /* another wiphy still in use */
2163 /* make sure h/w will not generate any interrupt
2164 * before setting the invalid flag. */
2165 ath9k_hw_set_interrupts(sc->sc_ah, 0);
2167 if (!(sc->sc_flags & SC_OP_INVALID)) {
2168 ath_drain_all_txq(sc, false);
2170 ath9k_hw_phy_disable(sc->sc_ah);
2172 sc->rx.rxlink = NULL;
2174 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2175 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
2176 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
2178 /* disable HAL and put h/w to sleep */
2179 ath9k_hw_disable(sc->sc_ah);
2180 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
2182 sc->sc_flags |= SC_OP_INVALID;
2184 mutex_unlock(&sc->mutex);
2186 DPRINTF(sc, ATH_DBG_CONFIG, "Driver halt\n");
2189 static int ath9k_add_interface(struct ieee80211_hw *hw,
2190 struct ieee80211_if_init_conf *conf)
2192 struct ath_wiphy *aphy = hw->priv;
2193 struct ath_softc *sc = aphy->sc;
2194 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2195 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
2198 mutex_lock(&sc->mutex);
2200 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
2206 switch (conf->type) {
2207 case NL80211_IFTYPE_STATION:
2208 ic_opmode = NL80211_IFTYPE_STATION;
2210 case NL80211_IFTYPE_ADHOC:
2211 case NL80211_IFTYPE_AP:
2212 case NL80211_IFTYPE_MESH_POINT:
2213 if (sc->nbcnvifs >= ATH_BCBUF) {
2217 ic_opmode = conf->type;
2220 DPRINTF(sc, ATH_DBG_FATAL,
2221 "Interface type %d not yet supported\n", conf->type);
2226 DPRINTF(sc, ATH_DBG_CONFIG, "Attach a VIF of type: %d\n", ic_opmode);
2228 /* Set the VIF opmode */
2229 avp->av_opmode = ic_opmode;
2234 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
2235 ath9k_set_bssid_mask(hw);
2238 goto out; /* skip global settings for secondary vif */
2240 if (ic_opmode == NL80211_IFTYPE_AP) {
2241 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
2242 sc->sc_flags |= SC_OP_TSF_RESET;
2245 /* Set the device opmode */
2246 sc->sc_ah->opmode = ic_opmode;
2249 * Enable MIB interrupts when there are hardware phy counters.
2250 * Note we only do this (at the moment) for station mode.
2252 if ((conf->type == NL80211_IFTYPE_STATION) ||
2253 (conf->type == NL80211_IFTYPE_ADHOC) ||
2254 (conf->type == NL80211_IFTYPE_MESH_POINT)) {
2255 if (ath9k_hw_phycounters(sc->sc_ah))
2256 sc->imask |= ATH9K_INT_MIB;
2257 sc->imask |= ATH9K_INT_TSFOOR;
2260 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
2262 if (conf->type == NL80211_IFTYPE_AP) {
2263 /* TODO: is this a suitable place to start ANI for AP mode? */
2265 mod_timer(&sc->ani.timer,
2266 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
2270 mutex_unlock(&sc->mutex);
2274 static void ath9k_remove_interface(struct ieee80211_hw *hw,
2275 struct ieee80211_if_init_conf *conf)
2277 struct ath_wiphy *aphy = hw->priv;
2278 struct ath_softc *sc = aphy->sc;
2279 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2282 DPRINTF(sc, ATH_DBG_CONFIG, "Detach Interface\n");
2284 mutex_lock(&sc->mutex);
2287 del_timer_sync(&sc->ani.timer);
2289 /* Reclaim beacon resources */
2290 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
2291 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
2292 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
2293 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2294 ath_beacon_return(sc, avp);
2297 sc->sc_flags &= ~SC_OP_BEACONS;
2299 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
2300 if (sc->beacon.bslot[i] == conf->vif) {
2301 printk(KERN_DEBUG "%s: vif had allocated beacon "
2302 "slot\n", __func__);
2303 sc->beacon.bslot[i] = NULL;
2304 sc->beacon.bslot_aphy[i] = NULL;
2310 mutex_unlock(&sc->mutex);
2313 static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2315 struct ath_wiphy *aphy = hw->priv;
2316 struct ath_softc *sc = aphy->sc;
2317 struct ieee80211_conf *conf = &hw->conf;
2318 struct ath_hw *ah = sc->sc_ah;
2320 mutex_lock(&sc->mutex);
2322 if (changed & IEEE80211_CONF_CHANGE_PS) {
2323 if (conf->flags & IEEE80211_CONF_PS) {
2324 if (!(ah->caps.hw_caps &
2325 ATH9K_HW_CAP_AUTOSLEEP)) {
2326 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
2327 sc->imask |= ATH9K_INT_TIM_TIMER;
2328 ath9k_hw_set_interrupts(sc->sc_ah,
2331 ath9k_hw_setrxabort(sc->sc_ah, 1);
2333 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
2335 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
2336 if (!(ah->caps.hw_caps &
2337 ATH9K_HW_CAP_AUTOSLEEP)) {
2338 ath9k_hw_setrxabort(sc->sc_ah, 0);
2339 sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
2340 if (sc->imask & ATH9K_INT_TIM_TIMER) {
2341 sc->imask &= ~ATH9K_INT_TIM_TIMER;
2342 ath9k_hw_set_interrupts(sc->sc_ah,
2349 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2350 struct ieee80211_channel *curchan = hw->conf.channel;
2351 int pos = curchan->hw_value;
2353 aphy->chan_idx = pos;
2354 aphy->chan_is_ht = conf_is_ht(conf);
2356 if (aphy->state == ATH_WIPHY_SCAN ||
2357 aphy->state == ATH_WIPHY_ACTIVE)
2358 ath9k_wiphy_pause_all_forced(sc, aphy);
2361 * Do not change operational channel based on a paused
2364 goto skip_chan_change;
2367 DPRINTF(sc, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
2368 curchan->center_freq);
2370 /* XXX: remove me eventualy */
2371 ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]);
2373 ath_update_chainmask(sc, conf_is_ht(conf));
2375 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
2376 DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n");
2377 mutex_unlock(&sc->mutex);
2383 if (changed & IEEE80211_CONF_CHANGE_POWER)
2384 sc->config.txpowlimit = 2 * conf->power_level;
2387 * The HW TSF has to be reset when the beacon interval changes.
2388 * We set the flag here, and ath_beacon_config_ap() would take this
2389 * into account when it gets called through the subsequent
2390 * config_interface() call - with IFCC_BEACON in the changed field.
2393 if (changed & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
2394 sc->sc_flags |= SC_OP_TSF_RESET;
2396 mutex_unlock(&sc->mutex);
2401 static int ath9k_config_interface(struct ieee80211_hw *hw,
2402 struct ieee80211_vif *vif,
2403 struct ieee80211_if_conf *conf)
2405 struct ath_wiphy *aphy = hw->priv;
2406 struct ath_softc *sc = aphy->sc;
2407 struct ath_hw *ah = sc->sc_ah;
2408 struct ath_vif *avp = (void *)vif->drv_priv;
2412 mutex_lock(&sc->mutex);
2414 /* TODO: Need to decide which hw opmode to use for multi-interface
2416 if (vif->type == NL80211_IFTYPE_AP &&
2417 ah->opmode != NL80211_IFTYPE_AP) {
2418 ah->opmode = NL80211_IFTYPE_STATION;
2419 ath9k_hw_setopmode(ah);
2420 memcpy(sc->curbssid, sc->sc_ah->macaddr, ETH_ALEN);
2422 ath9k_hw_write_associd(sc);
2423 /* Request full reset to get hw opmode changed properly */
2424 sc->sc_flags |= SC_OP_FULL_RESET;
2427 if ((conf->changed & IEEE80211_IFCC_BSSID) &&
2428 !is_zero_ether_addr(conf->bssid)) {
2429 switch (vif->type) {
2430 case NL80211_IFTYPE_STATION:
2431 case NL80211_IFTYPE_ADHOC:
2432 case NL80211_IFTYPE_MESH_POINT:
2434 memcpy(sc->curbssid, conf->bssid, ETH_ALEN);
2435 memcpy(avp->bssid, conf->bssid, ETH_ALEN);
2437 ath9k_hw_write_associd(sc);
2439 /* Set aggregation protection mode parameters */
2440 sc->config.ath_aggr_prot = 0;
2442 DPRINTF(sc, ATH_DBG_CONFIG,
2443 "RX filter 0x%x bssid %pM aid 0x%x\n",
2444 rfilt, sc->curbssid, sc->curaid);
2446 /* need to reconfigure the beacon */
2447 sc->sc_flags &= ~SC_OP_BEACONS ;
2455 if ((vif->type == NL80211_IFTYPE_ADHOC) ||
2456 (vif->type == NL80211_IFTYPE_AP) ||
2457 (vif->type == NL80211_IFTYPE_MESH_POINT)) {
2458 if ((conf->changed & IEEE80211_IFCC_BEACON) ||
2459 (conf->changed & IEEE80211_IFCC_BEACON_ENABLED &&
2460 conf->enable_beacon)) {
2462 * Allocate and setup the beacon frame.
2464 * Stop any previous beacon DMA. This may be
2465 * necessary, for example, when an ibss merge
2466 * causes reconfiguration; we may be called
2467 * with beacon transmission active.
2469 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2471 error = ath_beacon_alloc(aphy, vif);
2473 mutex_unlock(&sc->mutex);
2477 ath_beacon_config(sc, vif);
2481 /* Check for WLAN_CAPABILITY_PRIVACY ? */
2482 if ((avp->av_opmode != NL80211_IFTYPE_STATION)) {
2483 for (i = 0; i < IEEE80211_WEP_NKID; i++)
2484 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
2485 ath9k_hw_keysetmac(sc->sc_ah,
2490 /* Only legacy IBSS for now */
2491 if (vif->type == NL80211_IFTYPE_ADHOC)
2492 ath_update_chainmask(sc, 0);
2494 mutex_unlock(&sc->mutex);
2499 #define SUPPORTED_FILTERS \
2500 (FIF_PROMISC_IN_BSS | \
2504 FIF_BCN_PRBRESP_PROMISC | \
2507 /* FIXME: sc->sc_full_reset ? */
2508 static void ath9k_configure_filter(struct ieee80211_hw *hw,
2509 unsigned int changed_flags,
2510 unsigned int *total_flags,
2512 struct dev_mc_list *mclist)
2514 struct ath_wiphy *aphy = hw->priv;
2515 struct ath_softc *sc = aphy->sc;
2518 changed_flags &= SUPPORTED_FILTERS;
2519 *total_flags &= SUPPORTED_FILTERS;
2521 sc->rx.rxfilter = *total_flags;
2522 rfilt = ath_calcrxfilter(sc);
2523 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
2525 DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", sc->rx.rxfilter);
2528 static void ath9k_sta_notify(struct ieee80211_hw *hw,
2529 struct ieee80211_vif *vif,
2530 enum sta_notify_cmd cmd,
2531 struct ieee80211_sta *sta)
2533 struct ath_wiphy *aphy = hw->priv;
2534 struct ath_softc *sc = aphy->sc;
2537 case STA_NOTIFY_ADD:
2538 ath_node_attach(sc, sta);
2540 case STA_NOTIFY_REMOVE:
2541 ath_node_detach(sc, sta);
2548 static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
2549 const struct ieee80211_tx_queue_params *params)
2551 struct ath_wiphy *aphy = hw->priv;
2552 struct ath_softc *sc = aphy->sc;
2553 struct ath9k_tx_queue_info qi;
2556 if (queue >= WME_NUM_AC)
2559 mutex_lock(&sc->mutex);
2561 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
2563 qi.tqi_aifs = params->aifs;
2564 qi.tqi_cwmin = params->cw_min;
2565 qi.tqi_cwmax = params->cw_max;
2566 qi.tqi_burstTime = params->txop;
2567 qnum = ath_get_hal_qnum(queue, sc);
2569 DPRINTF(sc, ATH_DBG_CONFIG,
2570 "Configure tx [queue/halq] [%d/%d], "
2571 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
2572 queue, qnum, params->aifs, params->cw_min,
2573 params->cw_max, params->txop);
2575 ret = ath_txq_update(sc, qnum, &qi);
2577 DPRINTF(sc, ATH_DBG_FATAL, "TXQ Update failed\n");
2579 mutex_unlock(&sc->mutex);
2584 static int ath9k_set_key(struct ieee80211_hw *hw,
2585 enum set_key_cmd cmd,
2586 struct ieee80211_vif *vif,
2587 struct ieee80211_sta *sta,
2588 struct ieee80211_key_conf *key)
2590 struct ath_wiphy *aphy = hw->priv;
2591 struct ath_softc *sc = aphy->sc;
2594 if (modparam_nohwcrypt)
2597 mutex_lock(&sc->mutex);
2598 ath9k_ps_wakeup(sc);
2599 DPRINTF(sc, ATH_DBG_CONFIG, "Set HW Key\n");
2603 ret = ath_key_config(sc, vif, sta, key);
2605 key->hw_key_idx = ret;
2606 /* push IV and Michael MIC generation to stack */
2607 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2608 if (key->alg == ALG_TKIP)
2609 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2610 if (sc->sc_ah->sw_mgmt_crypto && key->alg == ALG_CCMP)
2611 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
2616 ath_key_delete(sc, key);
2622 ath9k_ps_restore(sc);
2623 mutex_unlock(&sc->mutex);
2628 static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2629 struct ieee80211_vif *vif,
2630 struct ieee80211_bss_conf *bss_conf,
2633 struct ath_wiphy *aphy = hw->priv;
2634 struct ath_softc *sc = aphy->sc;
2636 mutex_lock(&sc->mutex);
2638 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
2639 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
2640 bss_conf->use_short_preamble);
2641 if (bss_conf->use_short_preamble)
2642 sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
2644 sc->sc_flags &= ~SC_OP_PREAMBLE_SHORT;
2647 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
2648 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
2649 bss_conf->use_cts_prot);
2650 if (bss_conf->use_cts_prot &&
2651 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
2652 sc->sc_flags |= SC_OP_PROTECT_ENABLE;
2654 sc->sc_flags &= ~SC_OP_PROTECT_ENABLE;
2657 if (changed & BSS_CHANGED_ASSOC) {
2658 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
2660 ath9k_bss_assoc_info(sc, vif, bss_conf);
2663 mutex_unlock(&sc->mutex);
2666 static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
2669 struct ath_wiphy *aphy = hw->priv;
2670 struct ath_softc *sc = aphy->sc;
2672 mutex_lock(&sc->mutex);
2673 tsf = ath9k_hw_gettsf64(sc->sc_ah);
2674 mutex_unlock(&sc->mutex);
2679 static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
2681 struct ath_wiphy *aphy = hw->priv;
2682 struct ath_softc *sc = aphy->sc;
2684 mutex_lock(&sc->mutex);
2685 ath9k_hw_settsf64(sc->sc_ah, tsf);
2686 mutex_unlock(&sc->mutex);
2689 static void ath9k_reset_tsf(struct ieee80211_hw *hw)
2691 struct ath_wiphy *aphy = hw->priv;
2692 struct ath_softc *sc = aphy->sc;
2694 mutex_lock(&sc->mutex);
2695 ath9k_hw_reset_tsf(sc->sc_ah);
2696 mutex_unlock(&sc->mutex);
2699 static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2700 enum ieee80211_ampdu_mlme_action action,
2701 struct ieee80211_sta *sta,
2704 struct ath_wiphy *aphy = hw->priv;
2705 struct ath_softc *sc = aphy->sc;
2709 case IEEE80211_AMPDU_RX_START:
2710 if (!(sc->sc_flags & SC_OP_RXAGGR))
2713 case IEEE80211_AMPDU_RX_STOP:
2715 case IEEE80211_AMPDU_TX_START:
2716 ret = ath_tx_aggr_start(sc, sta, tid, ssn);
2718 DPRINTF(sc, ATH_DBG_FATAL,
2719 "Unable to start TX aggregation\n");
2721 ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2723 case IEEE80211_AMPDU_TX_STOP:
2724 ret = ath_tx_aggr_stop(sc, sta, tid);
2726 DPRINTF(sc, ATH_DBG_FATAL,
2727 "Unable to stop TX aggregation\n");
2729 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2731 case IEEE80211_AMPDU_TX_OPERATIONAL:
2732 ath_tx_aggr_resume(sc, sta, tid);
2735 DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n");
2741 static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
2743 struct ath_wiphy *aphy = hw->priv;
2744 struct ath_softc *sc = aphy->sc;
2746 if (ath9k_wiphy_scanning(sc)) {
2747 printk(KERN_DEBUG "ath9k: Two wiphys trying to scan at the "
2750 * Do not allow the concurrent scanning state for now. This
2751 * could be improved with scanning control moved into ath9k.
2756 aphy->state = ATH_WIPHY_SCAN;
2757 ath9k_wiphy_pause_all_forced(sc, aphy);
2759 mutex_lock(&sc->mutex);
2760 sc->sc_flags |= SC_OP_SCANNING;
2761 mutex_unlock(&sc->mutex);
2764 static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
2766 struct ath_wiphy *aphy = hw->priv;
2767 struct ath_softc *sc = aphy->sc;
2769 mutex_lock(&sc->mutex);
2770 aphy->state = ATH_WIPHY_ACTIVE;
2771 sc->sc_flags &= ~SC_OP_SCANNING;
2772 mutex_unlock(&sc->mutex);
2775 struct ieee80211_ops ath9k_ops = {
2777 .start = ath9k_start,
2779 .add_interface = ath9k_add_interface,
2780 .remove_interface = ath9k_remove_interface,
2781 .config = ath9k_config,
2782 .config_interface = ath9k_config_interface,
2783 .configure_filter = ath9k_configure_filter,
2784 .sta_notify = ath9k_sta_notify,
2785 .conf_tx = ath9k_conf_tx,
2786 .bss_info_changed = ath9k_bss_info_changed,
2787 .set_key = ath9k_set_key,
2788 .get_tsf = ath9k_get_tsf,
2789 .set_tsf = ath9k_set_tsf,
2790 .reset_tsf = ath9k_reset_tsf,
2791 .ampdu_action = ath9k_ampdu_action,
2792 .sw_scan_start = ath9k_sw_scan_start,
2793 .sw_scan_complete = ath9k_sw_scan_complete,
2799 } ath_mac_bb_names[] = {
2800 { AR_SREV_VERSION_5416_PCI, "5416" },
2801 { AR_SREV_VERSION_5416_PCIE, "5418" },
2802 { AR_SREV_VERSION_9100, "9100" },
2803 { AR_SREV_VERSION_9160, "9160" },
2804 { AR_SREV_VERSION_9280, "9280" },
2805 { AR_SREV_VERSION_9285, "9285" }
2811 } ath_rf_names[] = {
2813 { AR_RAD5133_SREV_MAJOR, "5133" },
2814 { AR_RAD5122_SREV_MAJOR, "5122" },
2815 { AR_RAD2133_SREV_MAJOR, "2133" },
2816 { AR_RAD2122_SREV_MAJOR, "2122" }
2820 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
2823 ath_mac_bb_name(u32 mac_bb_version)
2827 for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) {
2828 if (ath_mac_bb_names[i].version == mac_bb_version) {
2829 return ath_mac_bb_names[i].name;
2837 * Return the RF name. "????" is returned if the RF is unknown.
2840 ath_rf_name(u16 rf_version)
2844 for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) {
2845 if (ath_rf_names[i].version == rf_version) {
2846 return ath_rf_names[i].name;
2853 static int __init ath9k_init(void)
2857 /* Register rate control algorithm */
2858 error = ath_rate_control_register();
2861 "ath9k: Unable to register rate control "
2867 error = ath9k_debug_create_root();
2870 "ath9k: Unable to create debugfs root: %d\n",
2872 goto err_rate_unregister;
2875 error = ath_pci_init();
2878 "ath9k: No PCI devices found, driver not installed.\n");
2880 goto err_remove_root;
2883 error = ath_ahb_init();
2895 ath9k_debug_remove_root();
2896 err_rate_unregister:
2897 ath_rate_control_unregister();
2901 module_init(ath9k_init);
2903 static void __exit ath9k_exit(void)
2907 ath9k_debug_remove_root();
2908 ath_rate_control_unregister();
2909 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
2911 module_exit(ath9k_exit);