1 /*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/nl80211.h>
18 #include <linux/delay.h>
19 #include "ath9k.h"
20 #include "btcoex.h"
21
ath9k_parse_mpdudensity(u8 mpdudensity)22 u8 ath9k_parse_mpdudensity(u8 mpdudensity)
23 {
24 /*
25 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
26 * 0 for no restriction
27 * 1 for 1/4 us
28 * 2 for 1/2 us
29 * 3 for 1 us
30 * 4 for 2 us
31 * 5 for 4 us
32 * 6 for 8 us
33 * 7 for 16 us
34 */
35 switch (mpdudensity) {
36 case 0:
37 return 0;
38 case 1:
39 case 2:
40 case 3:
41 /* Our lower layer calculations limit our precision to
42 1 microsecond */
43 return 1;
44 case 4:
45 return 2;
46 case 5:
47 return 4;
48 case 6:
49 return 8;
50 case 7:
51 return 16;
52 default:
53 return 0;
54 }
55 }
56
ath9k_has_pending_frames(struct ath_softc * sc,struct ath_txq * txq,bool sw_pending)57 static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq,
58 bool sw_pending)
59 {
60 bool pending = false;
61
62 spin_lock_bh(&txq->axq_lock);
63
64 if (txq->axq_depth) {
65 pending = true;
66 goto out;
67 }
68
69 if (!sw_pending)
70 goto out;
71
72 if (txq->mac80211_qnum >= 0) {
73 struct list_head *list;
74
75 list = &sc->cur_chan->acq[txq->mac80211_qnum];
76 if (!list_empty(list))
77 pending = true;
78 }
79 out:
80 spin_unlock_bh(&txq->axq_lock);
81 return pending;
82 }
83
ath9k_setpower(struct ath_softc * sc,enum ath9k_power_mode mode)84 static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
85 {
86 unsigned long flags;
87 bool ret;
88
89 spin_lock_irqsave(&sc->sc_pm_lock, flags);
90 ret = ath9k_hw_setpower(sc->sc_ah, mode);
91 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
92
93 return ret;
94 }
95
ath_ps_full_sleep(unsigned long data)96 void ath_ps_full_sleep(unsigned long data)
97 {
98 struct ath_softc *sc = (struct ath_softc *) data;
99 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
100 bool reset;
101
102 spin_lock(&common->cc_lock);
103 ath_hw_cycle_counters_update(common);
104 spin_unlock(&common->cc_lock);
105
106 ath9k_hw_setrxabort(sc->sc_ah, 1);
107 ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
108
109 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
110 }
111
ath9k_ps_wakeup(struct ath_softc * sc)112 void ath9k_ps_wakeup(struct ath_softc *sc)
113 {
114 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
115 unsigned long flags;
116 enum ath9k_power_mode power_mode;
117
118 spin_lock_irqsave(&sc->sc_pm_lock, flags);
119 if (++sc->ps_usecount != 1)
120 goto unlock;
121
122 del_timer_sync(&sc->sleep_timer);
123 power_mode = sc->sc_ah->power_mode;
124 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
125
126 /*
127 * While the hardware is asleep, the cycle counters contain no
128 * useful data. Better clear them now so that they don't mess up
129 * survey data results.
130 */
131 if (power_mode != ATH9K_PM_AWAKE) {
132 spin_lock(&common->cc_lock);
133 ath_hw_cycle_counters_update(common);
134 memset(&common->cc_survey, 0, sizeof(common->cc_survey));
135 memset(&common->cc_ani, 0, sizeof(common->cc_ani));
136 spin_unlock(&common->cc_lock);
137 }
138
139 unlock:
140 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
141 }
142
ath9k_ps_restore(struct ath_softc * sc)143 void ath9k_ps_restore(struct ath_softc *sc)
144 {
145 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
146 enum ath9k_power_mode mode;
147 unsigned long flags;
148
149 spin_lock_irqsave(&sc->sc_pm_lock, flags);
150 if (--sc->ps_usecount != 0)
151 goto unlock;
152
153 if (sc->ps_idle) {
154 mod_timer(&sc->sleep_timer, jiffies + HZ / 10);
155 goto unlock;
156 }
157
158 if (sc->ps_enabled &&
159 !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
160 PS_WAIT_FOR_CAB |
161 PS_WAIT_FOR_PSPOLL_DATA |
162 PS_WAIT_FOR_TX_ACK |
163 PS_WAIT_FOR_ANI))) {
164 mode = ATH9K_PM_NETWORK_SLEEP;
165 if (ath9k_hw_btcoex_is_enabled(sc->sc_ah))
166 ath9k_btcoex_stop_gen_timer(sc);
167 } else {
168 goto unlock;
169 }
170
171 spin_lock(&common->cc_lock);
172 ath_hw_cycle_counters_update(common);
173 spin_unlock(&common->cc_lock);
174
175 ath9k_hw_setpower(sc->sc_ah, mode);
176
177 unlock:
178 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
179 }
180
__ath_cancel_work(struct ath_softc * sc)181 static void __ath_cancel_work(struct ath_softc *sc)
182 {
183 cancel_work_sync(&sc->paprd_work);
184 cancel_delayed_work_sync(&sc->tx_complete_work);
185 cancel_delayed_work_sync(&sc->hw_pll_work);
186
187 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
188 if (ath9k_hw_mci_is_enabled(sc->sc_ah))
189 cancel_work_sync(&sc->mci_work);
190 #endif
191 }
192
ath_cancel_work(struct ath_softc * sc)193 void ath_cancel_work(struct ath_softc *sc)
194 {
195 __ath_cancel_work(sc);
196 cancel_work_sync(&sc->hw_reset_work);
197 }
198
ath_restart_work(struct ath_softc * sc)199 void ath_restart_work(struct ath_softc *sc)
200 {
201 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
202
203 if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
204 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
205 msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
206
207 ath_start_ani(sc);
208 }
209
ath_prepare_reset(struct ath_softc * sc)210 static bool ath_prepare_reset(struct ath_softc *sc)
211 {
212 struct ath_hw *ah = sc->sc_ah;
213 bool ret = true;
214
215 ieee80211_stop_queues(sc->hw);
216 ath_stop_ani(sc);
217 ath9k_hw_disable_interrupts(ah);
218
219 if (AR_SREV_9300_20_OR_LATER(ah)) {
220 ret &= ath_stoprecv(sc);
221 ret &= ath_drain_all_txq(sc);
222 } else {
223 ret &= ath_drain_all_txq(sc);
224 ret &= ath_stoprecv(sc);
225 }
226
227 return ret;
228 }
229
ath_complete_reset(struct ath_softc * sc,bool start)230 static bool ath_complete_reset(struct ath_softc *sc, bool start)
231 {
232 struct ath_hw *ah = sc->sc_ah;
233 struct ath_common *common = ath9k_hw_common(ah);
234 unsigned long flags;
235
236 ath9k_calculate_summary_state(sc, sc->cur_chan);
237 ath_startrecv(sc);
238 ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower,
239 sc->cur_chan->txpower,
240 &sc->cur_chan->cur_txpower);
241 clear_bit(ATH_OP_HW_RESET, &common->op_flags);
242
243 if (!sc->cur_chan->offchannel && start) {
244 /* restore per chanctx TSF timer */
245 if (sc->cur_chan->tsf_val) {
246 u32 offset;
247
248 offset = ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts,
249 NULL);
250 ath9k_hw_settsf64(ah, sc->cur_chan->tsf_val + offset);
251 }
252
253
254 if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
255 goto work;
256
257 if (ah->opmode == NL80211_IFTYPE_STATION &&
258 test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
259 spin_lock_irqsave(&sc->sc_pm_lock, flags);
260 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
261 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
262 } else {
263 ath9k_set_beacon(sc);
264 }
265 work:
266 ath_restart_work(sc);
267 ath_txq_schedule_all(sc);
268 }
269
270 sc->gtt_cnt = 0;
271
272 ath9k_hw_set_interrupts(ah);
273 ath9k_hw_enable_interrupts(ah);
274 ieee80211_wake_queues(sc->hw);
275 ath9k_p2p_ps_timer(sc);
276
277 return true;
278 }
279
ath_reset_internal(struct ath_softc * sc,struct ath9k_channel * hchan)280 static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
281 {
282 struct ath_hw *ah = sc->sc_ah;
283 struct ath_common *common = ath9k_hw_common(ah);
284 struct ath9k_hw_cal_data *caldata = NULL;
285 bool fastcc = true;
286 int r;
287
288 __ath_cancel_work(sc);
289
290 disable_irq(sc->irq);
291 tasklet_disable(&sc->intr_tq);
292 tasklet_disable(&sc->bcon_tasklet);
293 spin_lock_bh(&sc->sc_pcu_lock);
294
295 if (!sc->cur_chan->offchannel) {
296 fastcc = false;
297 caldata = &sc->cur_chan->caldata;
298 }
299
300 if (!hchan) {
301 fastcc = false;
302 hchan = ah->curchan;
303 }
304
305 if (!ath_prepare_reset(sc))
306 fastcc = false;
307
308 if (ath9k_is_chanctx_enabled())
309 fastcc = false;
310
311 spin_lock_bh(&sc->chan_lock);
312 sc->cur_chandef = sc->cur_chan->chandef;
313 spin_unlock_bh(&sc->chan_lock);
314
315 ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
316 hchan->channel, IS_CHAN_HT40(hchan), fastcc);
317
318 r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
319 if (r) {
320 ath_err(common,
321 "Unable to reset channel, reset status %d\n", r);
322
323 ath9k_hw_enable_interrupts(ah);
324 ath9k_queue_reset(sc, RESET_TYPE_BB_HANG);
325
326 goto out;
327 }
328
329 if (ath9k_hw_mci_is_enabled(sc->sc_ah) &&
330 sc->cur_chan->offchannel)
331 ath9k_mci_set_txpower(sc, true, false);
332
333 if (!ath_complete_reset(sc, true))
334 r = -EIO;
335
336 out:
337 enable_irq(sc->irq);
338 spin_unlock_bh(&sc->sc_pcu_lock);
339 tasklet_enable(&sc->bcon_tasklet);
340 tasklet_enable(&sc->intr_tq);
341
342 return r;
343 }
344
ath_node_attach(struct ath_softc * sc,struct ieee80211_sta * sta,struct ieee80211_vif * vif)345 static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
346 struct ieee80211_vif *vif)
347 {
348 struct ath_node *an;
349 an = (struct ath_node *)sta->drv_priv;
350
351 an->sc = sc;
352 an->sta = sta;
353 an->vif = vif;
354 memset(&an->key_idx, 0, sizeof(an->key_idx));
355
356 ath_tx_node_init(sc, an);
357
358 ath_dynack_node_init(sc->sc_ah, an);
359 }
360
ath_node_detach(struct ath_softc * sc,struct ieee80211_sta * sta)361 static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
362 {
363 struct ath_node *an = (struct ath_node *)sta->drv_priv;
364 ath_tx_node_cleanup(sc, an);
365
366 ath_dynack_node_deinit(sc->sc_ah, an);
367 }
368
ath9k_tasklet(unsigned long data)369 void ath9k_tasklet(unsigned long data)
370 {
371 struct ath_softc *sc = (struct ath_softc *)data;
372 struct ath_hw *ah = sc->sc_ah;
373 struct ath_common *common = ath9k_hw_common(ah);
374 enum ath_reset_type type;
375 unsigned long flags;
376 u32 status = sc->intrstatus;
377 u32 rxmask;
378
379 ath9k_ps_wakeup(sc);
380 spin_lock(&sc->sc_pcu_lock);
381
382 if (status & ATH9K_INT_FATAL) {
383 type = RESET_TYPE_FATAL_INT;
384 ath9k_queue_reset(sc, type);
385
386 /*
387 * Increment the ref. counter here so that
388 * interrupts are enabled in the reset routine.
389 */
390 atomic_inc(&ah->intr_ref_cnt);
391 ath_dbg(common, RESET, "FATAL: Skipping interrupts\n");
392 goto out;
393 }
394
395 if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
396 (status & ATH9K_INT_BB_WATCHDOG)) {
397 spin_lock(&common->cc_lock);
398 ath_hw_cycle_counters_update(common);
399 ar9003_hw_bb_watchdog_dbg_info(ah);
400 spin_unlock(&common->cc_lock);
401
402 if (ar9003_hw_bb_watchdog_check(ah)) {
403 type = RESET_TYPE_BB_WATCHDOG;
404 ath9k_queue_reset(sc, type);
405
406 /*
407 * Increment the ref. counter here so that
408 * interrupts are enabled in the reset routine.
409 */
410 atomic_inc(&ah->intr_ref_cnt);
411 ath_dbg(common, RESET,
412 "BB_WATCHDOG: Skipping interrupts\n");
413 goto out;
414 }
415 }
416
417 if (status & ATH9K_INT_GTT) {
418 sc->gtt_cnt++;
419
420 if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) {
421 type = RESET_TYPE_TX_GTT;
422 ath9k_queue_reset(sc, type);
423 atomic_inc(&ah->intr_ref_cnt);
424 ath_dbg(common, RESET,
425 "GTT: Skipping interrupts\n");
426 goto out;
427 }
428 }
429
430 spin_lock_irqsave(&sc->sc_pm_lock, flags);
431 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
432 /*
433 * TSF sync does not look correct; remain awake to sync with
434 * the next Beacon.
435 */
436 ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n");
437 sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
438 }
439 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
440
441 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
442 rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
443 ATH9K_INT_RXORN);
444 else
445 rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
446
447 if (status & rxmask) {
448 /* Check for high priority Rx first */
449 if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
450 (status & ATH9K_INT_RXHP))
451 ath_rx_tasklet(sc, 0, true);
452
453 ath_rx_tasklet(sc, 0, false);
454 }
455
456 if (status & ATH9K_INT_TX) {
457 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
458 /*
459 * For EDMA chips, TX completion is enabled for the
460 * beacon queue, so if a beacon has been transmitted
461 * successfully after a GTT interrupt, the GTT counter
462 * gets reset to zero here.
463 */
464 sc->gtt_cnt = 0;
465
466 ath_tx_edma_tasklet(sc);
467 } else {
468 ath_tx_tasklet(sc);
469 }
470
471 wake_up(&sc->tx_wait);
472 }
473
474 if (status & ATH9K_INT_GENTIMER)
475 ath_gen_timer_isr(sc->sc_ah);
476
477 ath9k_btcoex_handle_interrupt(sc, status);
478
479 /* re-enable hardware interrupt */
480 ath9k_hw_enable_interrupts(ah);
481 out:
482 spin_unlock(&sc->sc_pcu_lock);
483 ath9k_ps_restore(sc);
484 }
485
ath_isr(int irq,void * dev)486 irqreturn_t ath_isr(int irq, void *dev)
487 {
488 #define SCHED_INTR ( \
489 ATH9K_INT_FATAL | \
490 ATH9K_INT_BB_WATCHDOG | \
491 ATH9K_INT_RXORN | \
492 ATH9K_INT_RXEOL | \
493 ATH9K_INT_RX | \
494 ATH9K_INT_RXLP | \
495 ATH9K_INT_RXHP | \
496 ATH9K_INT_TX | \
497 ATH9K_INT_BMISS | \
498 ATH9K_INT_CST | \
499 ATH9K_INT_GTT | \
500 ATH9K_INT_TSFOOR | \
501 ATH9K_INT_GENTIMER | \
502 ATH9K_INT_MCI)
503
504 struct ath_softc *sc = dev;
505 struct ath_hw *ah = sc->sc_ah;
506 struct ath_common *common = ath9k_hw_common(ah);
507 enum ath9k_int status;
508 u32 sync_cause = 0;
509 bool sched = false;
510
511 /*
512 * The hardware is not ready/present, don't
513 * touch anything. Note this can happen early
514 * on if the IRQ is shared.
515 */
516 if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
517 return IRQ_NONE;
518
519 /* shared irq, not for us */
520 if (!ath9k_hw_intrpend(ah))
521 return IRQ_NONE;
522
523 /*
524 * Figure out the reason(s) for the interrupt. Note
525 * that the hal returns a pseudo-ISR that may include
526 * bits we haven't explicitly enabled so we mask the
527 * value to insure we only process bits we requested.
528 */
529 ath9k_hw_getisr(ah, &status, &sync_cause); /* NB: clears ISR too */
530 ath9k_debug_sync_cause(sc, sync_cause);
531 status &= ah->imask; /* discard unasked-for bits */
532
533 if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
534 return IRQ_HANDLED;
535
536 /*
537 * If there are no status bits set, then this interrupt was not
538 * for me (should have been caught above).
539 */
540 if (!status)
541 return IRQ_NONE;
542
543 /* Cache the status */
544 sc->intrstatus = status;
545
546 if (status & SCHED_INTR)
547 sched = true;
548
549 /*
550 * If a FATAL interrupt is received, we have to reset the chip
551 * immediately.
552 */
553 if (status & ATH9K_INT_FATAL)
554 goto chip_reset;
555
556 if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
557 (status & ATH9K_INT_BB_WATCHDOG))
558 goto chip_reset;
559
560 if (status & ATH9K_INT_SWBA)
561 tasklet_schedule(&sc->bcon_tasklet);
562
563 if (status & ATH9K_INT_TXURN)
564 ath9k_hw_updatetxtriglevel(ah, true);
565
566 if (status & ATH9K_INT_RXEOL) {
567 ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
568 ath9k_hw_set_interrupts(ah);
569 }
570
571 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
572 if (status & ATH9K_INT_TIM_TIMER) {
573 if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle))
574 goto chip_reset;
575 /* Clear RxAbort bit so that we can
576 * receive frames */
577 ath9k_setpower(sc, ATH9K_PM_AWAKE);
578 spin_lock(&sc->sc_pm_lock);
579 ath9k_hw_setrxabort(sc->sc_ah, 0);
580 sc->ps_flags |= PS_WAIT_FOR_BEACON;
581 spin_unlock(&sc->sc_pm_lock);
582 }
583
584 chip_reset:
585
586 ath_debug_stat_interrupt(sc, status);
587
588 if (sched) {
589 /* turn off every interrupt */
590 ath9k_hw_disable_interrupts(ah);
591 tasklet_schedule(&sc->intr_tq);
592 }
593
594 return IRQ_HANDLED;
595
596 #undef SCHED_INTR
597 }
598
599 /*
600 * This function is called when a HW reset cannot be deferred
601 * and has to be immediate.
602 */
ath_reset(struct ath_softc * sc,struct ath9k_channel * hchan)603 int ath_reset(struct ath_softc *sc, struct ath9k_channel *hchan)
604 {
605 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
606 int r;
607
608 ath9k_hw_kill_interrupts(sc->sc_ah);
609 set_bit(ATH_OP_HW_RESET, &common->op_flags);
610
611 ath9k_ps_wakeup(sc);
612 r = ath_reset_internal(sc, hchan);
613 ath9k_ps_restore(sc);
614
615 return r;
616 }
617
618 /*
619 * When a HW reset can be deferred, it is added to the
620 * hw_reset_work workqueue, but we set ATH_OP_HW_RESET before
621 * queueing.
622 */
ath9k_queue_reset(struct ath_softc * sc,enum ath_reset_type type)623 void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type)
624 {
625 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
626 #ifdef CONFIG_ATH9K_DEBUGFS
627 RESET_STAT_INC(sc, type);
628 #endif
629 ath9k_hw_kill_interrupts(sc->sc_ah);
630 set_bit(ATH_OP_HW_RESET, &common->op_flags);
631 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
632 }
633
ath_reset_work(struct work_struct * work)634 void ath_reset_work(struct work_struct *work)
635 {
636 struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
637
638 ath9k_ps_wakeup(sc);
639 ath_reset_internal(sc, NULL);
640 ath9k_ps_restore(sc);
641 }
642
643 /**********************/
644 /* mac80211 callbacks */
645 /**********************/
646
ath9k_start(struct ieee80211_hw * hw)647 static int ath9k_start(struct ieee80211_hw *hw)
648 {
649 struct ath_softc *sc = hw->priv;
650 struct ath_hw *ah = sc->sc_ah;
651 struct ath_common *common = ath9k_hw_common(ah);
652 struct ieee80211_channel *curchan = sc->cur_chan->chandef.chan;
653 struct ath_chanctx *ctx = sc->cur_chan;
654 struct ath9k_channel *init_channel;
655 int r;
656
657 ath_dbg(common, CONFIG,
658 "Starting driver with initial channel: %d MHz\n",
659 curchan->center_freq);
660
661 ath9k_ps_wakeup(sc);
662 mutex_lock(&sc->mutex);
663
664 init_channel = ath9k_cmn_get_channel(hw, ah, &ctx->chandef);
665 sc->cur_chandef = hw->conf.chandef;
666
667 /* Reset SERDES registers */
668 ath9k_hw_configpcipowersave(ah, false);
669
670 /*
671 * The basic interface to setting the hardware in a good
672 * state is ``reset''. On return the hardware is known to
673 * be powered up and with interrupts disabled. This must
674 * be followed by initialization of the appropriate bits
675 * and then setup of the interrupt mask.
676 */
677 spin_lock_bh(&sc->sc_pcu_lock);
678
679 atomic_set(&ah->intr_ref_cnt, -1);
680
681 r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
682 if (r) {
683 ath_err(common,
684 "Unable to reset hardware; reset status %d (freq %u MHz)\n",
685 r, curchan->center_freq);
686 ah->reset_power_on = false;
687 }
688
689 /* Setup our intr mask. */
690 ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
691 ATH9K_INT_RXORN | ATH9K_INT_FATAL |
692 ATH9K_INT_GLOBAL;
693
694 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
695 ah->imask |= ATH9K_INT_RXHP |
696 ATH9K_INT_RXLP;
697 else
698 ah->imask |= ATH9K_INT_RX;
699
700 if (ah->config.hw_hang_checks & HW_BB_WATCHDOG)
701 ah->imask |= ATH9K_INT_BB_WATCHDOG;
702
703 /*
704 * Enable GTT interrupts only for AR9003/AR9004 chips
705 * for now.
706 */
707 if (AR_SREV_9300_20_OR_LATER(ah))
708 ah->imask |= ATH9K_INT_GTT;
709
710 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
711 ah->imask |= ATH9K_INT_CST;
712
713 ath_mci_enable(sc);
714
715 clear_bit(ATH_OP_INVALID, &common->op_flags);
716 sc->sc_ah->is_monitoring = false;
717
718 if (!ath_complete_reset(sc, false))
719 ah->reset_power_on = false;
720
721 if (ah->led_pin >= 0) {
722 ath9k_hw_cfg_output(ah, ah->led_pin,
723 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
724 ath9k_hw_set_gpio(ah, ah->led_pin,
725 (ah->config.led_active_high) ? 1 : 0);
726 }
727
728 /*
729 * Reset key cache to sane defaults (all entries cleared) instead of
730 * semi-random values after suspend/resume.
731 */
732 ath9k_cmn_init_crypto(sc->sc_ah);
733
734 ath9k_hw_reset_tsf(ah);
735
736 spin_unlock_bh(&sc->sc_pcu_lock);
737
738 mutex_unlock(&sc->mutex);
739
740 ath9k_ps_restore(sc);
741
742 return 0;
743 }
744
ath9k_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)745 static void ath9k_tx(struct ieee80211_hw *hw,
746 struct ieee80211_tx_control *control,
747 struct sk_buff *skb)
748 {
749 struct ath_softc *sc = hw->priv;
750 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
751 struct ath_tx_control txctl;
752 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
753 unsigned long flags;
754
755 if (sc->ps_enabled) {
756 /*
757 * mac80211 does not set PM field for normal data frames, so we
758 * need to update that based on the current PS mode.
759 */
760 if (ieee80211_is_data(hdr->frame_control) &&
761 !ieee80211_is_nullfunc(hdr->frame_control) &&
762 !ieee80211_has_pm(hdr->frame_control)) {
763 ath_dbg(common, PS,
764 "Add PM=1 for a TX frame while in PS mode\n");
765 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
766 }
767 }
768
769 if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP)) {
770 /*
771 * We are using PS-Poll and mac80211 can request TX while in
772 * power save mode. Need to wake up hardware for the TX to be
773 * completed and if needed, also for RX of buffered frames.
774 */
775 ath9k_ps_wakeup(sc);
776 spin_lock_irqsave(&sc->sc_pm_lock, flags);
777 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
778 ath9k_hw_setrxabort(sc->sc_ah, 0);
779 if (ieee80211_is_pspoll(hdr->frame_control)) {
780 ath_dbg(common, PS,
781 "Sending PS-Poll to pick a buffered frame\n");
782 sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
783 } else {
784 ath_dbg(common, PS, "Wake up to complete TX\n");
785 sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
786 }
787 /*
788 * The actual restore operation will happen only after
789 * the ps_flags bit is cleared. We are just dropping
790 * the ps_usecount here.
791 */
792 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
793 ath9k_ps_restore(sc);
794 }
795
796 /*
797 * Cannot tx while the hardware is in full sleep, it first needs a full
798 * chip reset to recover from that
799 */
800 if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP)) {
801 ath_err(common, "TX while HW is in FULL_SLEEP mode\n");
802 goto exit;
803 }
804
805 memset(&txctl, 0, sizeof(struct ath_tx_control));
806 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
807 txctl.sta = control->sta;
808
809 ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
810
811 if (ath_tx_start(hw, skb, &txctl) != 0) {
812 ath_dbg(common, XMIT, "TX failed\n");
813 TX_STAT_INC(txctl.txq->axq_qnum, txfailed);
814 goto exit;
815 }
816
817 return;
818 exit:
819 ieee80211_free_txskb(hw, skb);
820 }
821
ath9k_stop(struct ieee80211_hw * hw)822 static void ath9k_stop(struct ieee80211_hw *hw)
823 {
824 struct ath_softc *sc = hw->priv;
825 struct ath_hw *ah = sc->sc_ah;
826 struct ath_common *common = ath9k_hw_common(ah);
827 bool prev_idle;
828
829 ath9k_deinit_channel_context(sc);
830
831 mutex_lock(&sc->mutex);
832
833 ath_cancel_work(sc);
834
835 if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
836 ath_dbg(common, ANY, "Device not present\n");
837 mutex_unlock(&sc->mutex);
838 return;
839 }
840
841 /* Ensure HW is awake when we try to shut it down. */
842 ath9k_ps_wakeup(sc);
843
844 spin_lock_bh(&sc->sc_pcu_lock);
845
846 /* prevent tasklets to enable interrupts once we disable them */
847 ah->imask &= ~ATH9K_INT_GLOBAL;
848
849 /* make sure h/w will not generate any interrupt
850 * before setting the invalid flag. */
851 ath9k_hw_disable_interrupts(ah);
852
853 spin_unlock_bh(&sc->sc_pcu_lock);
854
855 /* we can now sync irq and kill any running tasklets, since we already
856 * disabled interrupts and not holding a spin lock */
857 synchronize_irq(sc->irq);
858 tasklet_kill(&sc->intr_tq);
859 tasklet_kill(&sc->bcon_tasklet);
860
861 prev_idle = sc->ps_idle;
862 sc->ps_idle = true;
863
864 spin_lock_bh(&sc->sc_pcu_lock);
865
866 if (ah->led_pin >= 0) {
867 ath9k_hw_set_gpio(ah, ah->led_pin,
868 (ah->config.led_active_high) ? 0 : 1);
869 ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
870 }
871
872 ath_prepare_reset(sc);
873
874 if (sc->rx.frag) {
875 dev_kfree_skb_any(sc->rx.frag);
876 sc->rx.frag = NULL;
877 }
878
879 if (!ah->curchan)
880 ah->curchan = ath9k_cmn_get_channel(hw, ah,
881 &sc->cur_chan->chandef);
882
883 ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
884
885 set_bit(ATH_OP_INVALID, &common->op_flags);
886
887 ath9k_hw_phy_disable(ah);
888
889 ath9k_hw_configpcipowersave(ah, true);
890
891 spin_unlock_bh(&sc->sc_pcu_lock);
892
893 ath9k_ps_restore(sc);
894
895 sc->ps_idle = prev_idle;
896
897 mutex_unlock(&sc->mutex);
898
899 ath_dbg(common, CONFIG, "Driver halt\n");
900 }
901
ath9k_uses_beacons(int type)902 static bool ath9k_uses_beacons(int type)
903 {
904 switch (type) {
905 case NL80211_IFTYPE_AP:
906 case NL80211_IFTYPE_ADHOC:
907 case NL80211_IFTYPE_MESH_POINT:
908 return true;
909 default:
910 return false;
911 }
912 }
913
ath9k_vif_iter(struct ath9k_vif_iter_data * iter_data,u8 * mac,struct ieee80211_vif * vif)914 static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data,
915 u8 *mac, struct ieee80211_vif *vif)
916 {
917 struct ath_vif *avp = (struct ath_vif *)vif->drv_priv;
918 int i;
919
920 if (iter_data->has_hw_macaddr) {
921 for (i = 0; i < ETH_ALEN; i++)
922 iter_data->mask[i] &=
923 ~(iter_data->hw_macaddr[i] ^ mac[i]);
924 } else {
925 memcpy(iter_data->hw_macaddr, mac, ETH_ALEN);
926 iter_data->has_hw_macaddr = true;
927 }
928
929 if (!vif->bss_conf.use_short_slot)
930 iter_data->slottime = ATH9K_SLOT_TIME_20;
931
932 switch (vif->type) {
933 case NL80211_IFTYPE_AP:
934 iter_data->naps++;
935 break;
936 case NL80211_IFTYPE_STATION:
937 iter_data->nstations++;
938 if (avp->assoc && !iter_data->primary_sta)
939 iter_data->primary_sta = vif;
940 break;
941 case NL80211_IFTYPE_OCB:
942 iter_data->nocbs++;
943 break;
944 case NL80211_IFTYPE_ADHOC:
945 iter_data->nadhocs++;
946 if (vif->bss_conf.enable_beacon)
947 iter_data->beacons = true;
948 break;
949 case NL80211_IFTYPE_MESH_POINT:
950 iter_data->nmeshes++;
951 if (vif->bss_conf.enable_beacon)
952 iter_data->beacons = true;
953 break;
954 case NL80211_IFTYPE_WDS:
955 iter_data->nwds++;
956 break;
957 default:
958 break;
959 }
960 }
961
ath9k_update_bssid_mask(struct ath_softc * sc,struct ath_chanctx * ctx,struct ath9k_vif_iter_data * iter_data)962 static void ath9k_update_bssid_mask(struct ath_softc *sc,
963 struct ath_chanctx *ctx,
964 struct ath9k_vif_iter_data *iter_data)
965 {
966 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
967 struct ath_vif *avp;
968 int i;
969
970 if (!ath9k_is_chanctx_enabled())
971 return;
972
973 list_for_each_entry(avp, &ctx->vifs, list) {
974 if (ctx->nvifs_assigned != 1)
975 continue;
976
977 if (!avp->vif->p2p || !iter_data->has_hw_macaddr)
978 continue;
979
980 ether_addr_copy(common->curbssid, avp->bssid);
981
982 /* perm_addr will be used as the p2p device address. */
983 for (i = 0; i < ETH_ALEN; i++)
984 iter_data->mask[i] &=
985 ~(iter_data->hw_macaddr[i] ^
986 sc->hw->wiphy->perm_addr[i]);
987 }
988 }
989
990 /* Called with sc->mutex held. */
ath9k_calculate_iter_data(struct ath_softc * sc,struct ath_chanctx * ctx,struct ath9k_vif_iter_data * iter_data)991 void ath9k_calculate_iter_data(struct ath_softc *sc,
992 struct ath_chanctx *ctx,
993 struct ath9k_vif_iter_data *iter_data)
994 {
995 struct ath_vif *avp;
996
997 /*
998 * The hardware will use primary station addr together with the
999 * BSSID mask when matching addresses.
1000 */
1001 memset(iter_data, 0, sizeof(*iter_data));
1002 eth_broadcast_addr(iter_data->mask);
1003 iter_data->slottime = ATH9K_SLOT_TIME_9;
1004
1005 list_for_each_entry(avp, &ctx->vifs, list)
1006 ath9k_vif_iter(iter_data, avp->vif->addr, avp->vif);
1007
1008 ath9k_update_bssid_mask(sc, ctx, iter_data);
1009 }
1010
ath9k_set_assoc_state(struct ath_softc * sc,struct ieee80211_vif * vif,bool changed)1011 static void ath9k_set_assoc_state(struct ath_softc *sc,
1012 struct ieee80211_vif *vif, bool changed)
1013 {
1014 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1015 struct ath_vif *avp = (struct ath_vif *)vif->drv_priv;
1016 unsigned long flags;
1017
1018 set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
1019
1020 ether_addr_copy(common->curbssid, avp->bssid);
1021 common->curaid = avp->aid;
1022 ath9k_hw_write_associd(sc->sc_ah);
1023
1024 if (changed) {
1025 common->last_rssi = ATH_RSSI_DUMMY_MARKER;
1026 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
1027
1028 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1029 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
1030 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1031 }
1032
1033 if (ath9k_hw_mci_is_enabled(sc->sc_ah))
1034 ath9k_mci_update_wlan_channels(sc, false);
1035
1036 ath_dbg(common, CONFIG,
1037 "Primary Station interface: %pM, BSSID: %pM\n",
1038 vif->addr, common->curbssid);
1039 }
1040
1041 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
ath9k_set_offchannel_state(struct ath_softc * sc)1042 static void ath9k_set_offchannel_state(struct ath_softc *sc)
1043 {
1044 struct ath_hw *ah = sc->sc_ah;
1045 struct ath_common *common = ath9k_hw_common(ah);
1046 struct ieee80211_vif *vif = NULL;
1047
1048 ath9k_ps_wakeup(sc);
1049
1050 if (sc->offchannel.state < ATH_OFFCHANNEL_ROC_START)
1051 vif = sc->offchannel.scan_vif;
1052 else
1053 vif = sc->offchannel.roc_vif;
1054
1055 if (WARN_ON(!vif))
1056 goto exit;
1057
1058 eth_zero_addr(common->curbssid);
1059 eth_broadcast_addr(common->bssidmask);
1060 memcpy(common->macaddr, vif->addr, ETH_ALEN);
1061 common->curaid = 0;
1062 ah->opmode = vif->type;
1063 ah->imask &= ~ATH9K_INT_SWBA;
1064 ah->imask &= ~ATH9K_INT_TSFOOR;
1065 ah->slottime = ATH9K_SLOT_TIME_9;
1066
1067 ath_hw_setbssidmask(common);
1068 ath9k_hw_setopmode(ah);
1069 ath9k_hw_write_associd(sc->sc_ah);
1070 ath9k_hw_set_interrupts(ah);
1071 ath9k_hw_init_global_settings(ah);
1072
1073 exit:
1074 ath9k_ps_restore(sc);
1075 }
1076 #endif
1077
1078 /* Called with sc->mutex held. */
ath9k_calculate_summary_state(struct ath_softc * sc,struct ath_chanctx * ctx)1079 void ath9k_calculate_summary_state(struct ath_softc *sc,
1080 struct ath_chanctx *ctx)
1081 {
1082 struct ath_hw *ah = sc->sc_ah;
1083 struct ath_common *common = ath9k_hw_common(ah);
1084 struct ath9k_vif_iter_data iter_data;
1085 struct ath_beacon_config *cur_conf;
1086
1087 ath_chanctx_check_active(sc, ctx);
1088
1089 if (ctx != sc->cur_chan)
1090 return;
1091
1092 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
1093 if (ctx == &sc->offchannel.chan)
1094 return ath9k_set_offchannel_state(sc);
1095 #endif
1096
1097 ath9k_ps_wakeup(sc);
1098 ath9k_calculate_iter_data(sc, ctx, &iter_data);
1099
1100 if (iter_data.has_hw_macaddr)
1101 memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
1102
1103 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
1104 ath_hw_setbssidmask(common);
1105
1106 if (iter_data.naps > 0) {
1107 cur_conf = &ctx->beacon;
1108 ath9k_hw_set_tsfadjust(ah, true);
1109 ah->opmode = NL80211_IFTYPE_AP;
1110 if (cur_conf->enable_beacon)
1111 iter_data.beacons = true;
1112 } else {
1113 ath9k_hw_set_tsfadjust(ah, false);
1114
1115 if (iter_data.nmeshes)
1116 ah->opmode = NL80211_IFTYPE_MESH_POINT;
1117 else if (iter_data.nocbs)
1118 ah->opmode = NL80211_IFTYPE_OCB;
1119 else if (iter_data.nwds)
1120 ah->opmode = NL80211_IFTYPE_AP;
1121 else if (iter_data.nadhocs)
1122 ah->opmode = NL80211_IFTYPE_ADHOC;
1123 else
1124 ah->opmode = NL80211_IFTYPE_STATION;
1125 }
1126
1127 ath9k_hw_setopmode(ah);
1128
1129 ctx->switch_after_beacon = false;
1130 if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0)
1131 ah->imask |= ATH9K_INT_TSFOOR;
1132 else {
1133 ah->imask &= ~ATH9K_INT_TSFOOR;
1134 if (iter_data.naps == 1 && iter_data.beacons)
1135 ctx->switch_after_beacon = true;
1136 }
1137
1138 ah->imask &= ~ATH9K_INT_SWBA;
1139 if (ah->opmode == NL80211_IFTYPE_STATION) {
1140 bool changed = (iter_data.primary_sta != ctx->primary_sta);
1141
1142 if (iter_data.primary_sta) {
1143 iter_data.beacons = true;
1144 ath9k_set_assoc_state(sc, iter_data.primary_sta,
1145 changed);
1146 ctx->primary_sta = iter_data.primary_sta;
1147 } else {
1148 ctx->primary_sta = NULL;
1149 eth_zero_addr(common->curbssid);
1150 common->curaid = 0;
1151 ath9k_hw_write_associd(sc->sc_ah);
1152 if (ath9k_hw_mci_is_enabled(sc->sc_ah))
1153 ath9k_mci_update_wlan_channels(sc, true);
1154 }
1155 } else if (iter_data.beacons) {
1156 ah->imask |= ATH9K_INT_SWBA;
1157 }
1158 ath9k_hw_set_interrupts(ah);
1159
1160 if (iter_data.beacons)
1161 set_bit(ATH_OP_BEACONS, &common->op_flags);
1162 else
1163 clear_bit(ATH_OP_BEACONS, &common->op_flags);
1164
1165 if (ah->slottime != iter_data.slottime) {
1166 ah->slottime = iter_data.slottime;
1167 ath9k_hw_init_global_settings(ah);
1168 }
1169
1170 if (iter_data.primary_sta)
1171 set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
1172 else
1173 clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
1174
1175 ath_dbg(common, CONFIG,
1176 "macaddr: %pM, bssid: %pM, bssidmask: %pM\n",
1177 common->macaddr, common->curbssid, common->bssidmask);
1178
1179 ath9k_ps_restore(sc);
1180 }
1181
ath9k_tpc_vif_iter(void * data,u8 * mac,struct ieee80211_vif * vif)1182 static void ath9k_tpc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1183 {
1184 int *power = (int *)data;
1185
1186 if (*power < vif->bss_conf.txpower)
1187 *power = vif->bss_conf.txpower;
1188 }
1189
1190 /* Called with sc->mutex held. */
ath9k_set_txpower(struct ath_softc * sc,struct ieee80211_vif * vif)1191 void ath9k_set_txpower(struct ath_softc *sc, struct ieee80211_vif *vif)
1192 {
1193 int power;
1194 struct ath_hw *ah = sc->sc_ah;
1195 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
1196
1197 ath9k_ps_wakeup(sc);
1198 if (ah->tpc_enabled) {
1199 power = (vif) ? vif->bss_conf.txpower : -1;
1200 ieee80211_iterate_active_interfaces_atomic(
1201 sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
1202 ath9k_tpc_vif_iter, &power);
1203 if (power == -1)
1204 power = sc->hw->conf.power_level;
1205 } else {
1206 power = sc->hw->conf.power_level;
1207 }
1208 sc->cur_chan->txpower = 2 * power;
1209 ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
1210 sc->cur_chan->cur_txpower = reg->max_power_level;
1211 ath9k_ps_restore(sc);
1212 }
1213
ath9k_assign_hw_queues(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1214 static void ath9k_assign_hw_queues(struct ieee80211_hw *hw,
1215 struct ieee80211_vif *vif)
1216 {
1217 int i;
1218
1219 if (!ath9k_is_chanctx_enabled())
1220 return;
1221
1222 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1223 vif->hw_queue[i] = i;
1224
1225 if (vif->type == NL80211_IFTYPE_AP ||
1226 vif->type == NL80211_IFTYPE_MESH_POINT)
1227 vif->cab_queue = hw->queues - 2;
1228 else
1229 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
1230 }
1231
ath9k_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1232 static int ath9k_add_interface(struct ieee80211_hw *hw,
1233 struct ieee80211_vif *vif)
1234 {
1235 struct ath_softc *sc = hw->priv;
1236 struct ath_hw *ah = sc->sc_ah;
1237 struct ath_common *common = ath9k_hw_common(ah);
1238 struct ath_vif *avp = (void *)vif->drv_priv;
1239 struct ath_node *an = &avp->mcast_node;
1240
1241 mutex_lock(&sc->mutex);
1242
1243 if (config_enabled(CONFIG_ATH9K_TX99)) {
1244 if (sc->cur_chan->nvifs >= 1) {
1245 mutex_unlock(&sc->mutex);
1246 return -EOPNOTSUPP;
1247 }
1248 sc->tx99_vif = vif;
1249 }
1250
1251 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
1252 sc->cur_chan->nvifs++;
1253
1254 if (ath9k_uses_beacons(vif->type))
1255 ath9k_beacon_assign_slot(sc, vif);
1256
1257 avp->vif = vif;
1258 if (!ath9k_is_chanctx_enabled()) {
1259 avp->chanctx = sc->cur_chan;
1260 list_add_tail(&avp->list, &avp->chanctx->vifs);
1261 }
1262
1263 ath9k_calculate_summary_state(sc, avp->chanctx);
1264
1265 ath9k_assign_hw_queues(hw, vif);
1266
1267 ath9k_set_txpower(sc, vif);
1268
1269 an->sc = sc;
1270 an->sta = NULL;
1271 an->vif = vif;
1272 an->no_ps_filter = true;
1273 ath_tx_node_init(sc, an);
1274
1275 mutex_unlock(&sc->mutex);
1276 return 0;
1277 }
1278
ath9k_change_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum nl80211_iftype new_type,bool p2p)1279 static int ath9k_change_interface(struct ieee80211_hw *hw,
1280 struct ieee80211_vif *vif,
1281 enum nl80211_iftype new_type,
1282 bool p2p)
1283 {
1284 struct ath_softc *sc = hw->priv;
1285 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1286 struct ath_vif *avp = (void *)vif->drv_priv;
1287
1288 mutex_lock(&sc->mutex);
1289
1290 if (config_enabled(CONFIG_ATH9K_TX99)) {
1291 mutex_unlock(&sc->mutex);
1292 return -EOPNOTSUPP;
1293 }
1294
1295 ath_dbg(common, CONFIG, "Change Interface\n");
1296
1297 if (ath9k_uses_beacons(vif->type))
1298 ath9k_beacon_remove_slot(sc, vif);
1299
1300 vif->type = new_type;
1301 vif->p2p = p2p;
1302
1303 if (ath9k_uses_beacons(vif->type))
1304 ath9k_beacon_assign_slot(sc, vif);
1305
1306 ath9k_assign_hw_queues(hw, vif);
1307 ath9k_calculate_summary_state(sc, avp->chanctx);
1308
1309 ath9k_set_txpower(sc, vif);
1310
1311 mutex_unlock(&sc->mutex);
1312 return 0;
1313 }
1314
ath9k_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1315 static void ath9k_remove_interface(struct ieee80211_hw *hw,
1316 struct ieee80211_vif *vif)
1317 {
1318 struct ath_softc *sc = hw->priv;
1319 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1320 struct ath_vif *avp = (void *)vif->drv_priv;
1321
1322 ath_dbg(common, CONFIG, "Detach Interface\n");
1323
1324 mutex_lock(&sc->mutex);
1325
1326 ath9k_p2p_remove_vif(sc, vif);
1327
1328 sc->cur_chan->nvifs--;
1329 sc->tx99_vif = NULL;
1330 if (!ath9k_is_chanctx_enabled())
1331 list_del(&avp->list);
1332
1333 if (ath9k_uses_beacons(vif->type))
1334 ath9k_beacon_remove_slot(sc, vif);
1335
1336 ath_tx_node_cleanup(sc, &avp->mcast_node);
1337
1338 ath9k_calculate_summary_state(sc, avp->chanctx);
1339
1340 ath9k_set_txpower(sc, NULL);
1341
1342 mutex_unlock(&sc->mutex);
1343 }
1344
ath9k_enable_ps(struct ath_softc * sc)1345 static void ath9k_enable_ps(struct ath_softc *sc)
1346 {
1347 struct ath_hw *ah = sc->sc_ah;
1348 struct ath_common *common = ath9k_hw_common(ah);
1349
1350 if (config_enabled(CONFIG_ATH9K_TX99))
1351 return;
1352
1353 sc->ps_enabled = true;
1354 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1355 if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
1356 ah->imask |= ATH9K_INT_TIM_TIMER;
1357 ath9k_hw_set_interrupts(ah);
1358 }
1359 ath9k_hw_setrxabort(ah, 1);
1360 }
1361 ath_dbg(common, PS, "PowerSave enabled\n");
1362 }
1363
ath9k_disable_ps(struct ath_softc * sc)1364 static void ath9k_disable_ps(struct ath_softc *sc)
1365 {
1366 struct ath_hw *ah = sc->sc_ah;
1367 struct ath_common *common = ath9k_hw_common(ah);
1368
1369 if (config_enabled(CONFIG_ATH9K_TX99))
1370 return;
1371
1372 sc->ps_enabled = false;
1373 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
1374 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1375 ath9k_hw_setrxabort(ah, 0);
1376 sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
1377 PS_WAIT_FOR_CAB |
1378 PS_WAIT_FOR_PSPOLL_DATA |
1379 PS_WAIT_FOR_TX_ACK);
1380 if (ah->imask & ATH9K_INT_TIM_TIMER) {
1381 ah->imask &= ~ATH9K_INT_TIM_TIMER;
1382 ath9k_hw_set_interrupts(ah);
1383 }
1384 }
1385 ath_dbg(common, PS, "PowerSave disabled\n");
1386 }
1387
ath9k_config(struct ieee80211_hw * hw,u32 changed)1388 static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1389 {
1390 struct ath_softc *sc = hw->priv;
1391 struct ath_hw *ah = sc->sc_ah;
1392 struct ath_common *common = ath9k_hw_common(ah);
1393 struct ieee80211_conf *conf = &hw->conf;
1394 struct ath_chanctx *ctx = sc->cur_chan;
1395
1396 ath9k_ps_wakeup(sc);
1397 mutex_lock(&sc->mutex);
1398
1399 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1400 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1401 if (sc->ps_idle) {
1402 ath_cancel_work(sc);
1403 ath9k_stop_btcoex(sc);
1404 } else {
1405 ath9k_start_btcoex(sc);
1406 /*
1407 * The chip needs a reset to properly wake up from
1408 * full sleep
1409 */
1410 ath_chanctx_set_channel(sc, ctx, &ctx->chandef);
1411 }
1412 }
1413
1414 /*
1415 * We just prepare to enable PS. We have to wait until our AP has
1416 * ACK'd our null data frame to disable RX otherwise we'll ignore
1417 * those ACKs and end up retransmitting the same null data frames.
1418 * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode.
1419 */
1420 if (changed & IEEE80211_CONF_CHANGE_PS) {
1421 unsigned long flags;
1422 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1423 if (conf->flags & IEEE80211_CONF_PS)
1424 ath9k_enable_ps(sc);
1425 else
1426 ath9k_disable_ps(sc);
1427 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1428 }
1429
1430 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1431 if (conf->flags & IEEE80211_CONF_MONITOR) {
1432 ath_dbg(common, CONFIG, "Monitor mode is enabled\n");
1433 sc->sc_ah->is_monitoring = true;
1434 } else {
1435 ath_dbg(common, CONFIG, "Monitor mode is disabled\n");
1436 sc->sc_ah->is_monitoring = false;
1437 }
1438 }
1439
1440 if (!ath9k_is_chanctx_enabled() && (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
1441 ctx->offchannel = !!(conf->flags & IEEE80211_CONF_OFFCHANNEL);
1442 ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
1443 }
1444
1445 mutex_unlock(&sc->mutex);
1446 ath9k_ps_restore(sc);
1447
1448 return 0;
1449 }
1450
1451 #define SUPPORTED_FILTERS \
1452 (FIF_ALLMULTI | \
1453 FIF_CONTROL | \
1454 FIF_PSPOLL | \
1455 FIF_OTHER_BSS | \
1456 FIF_BCN_PRBRESP_PROMISC | \
1457 FIF_PROBE_REQ | \
1458 FIF_FCSFAIL)
1459
1460 /* FIXME: sc->sc_full_reset ? */
ath9k_configure_filter(struct ieee80211_hw * hw,unsigned int changed_flags,unsigned int * total_flags,u64 multicast)1461 static void ath9k_configure_filter(struct ieee80211_hw *hw,
1462 unsigned int changed_flags,
1463 unsigned int *total_flags,
1464 u64 multicast)
1465 {
1466 struct ath_softc *sc = hw->priv;
1467 struct ath_chanctx *ctx;
1468 u32 rfilt;
1469
1470 changed_flags &= SUPPORTED_FILTERS;
1471 *total_flags &= SUPPORTED_FILTERS;
1472
1473 spin_lock_bh(&sc->chan_lock);
1474 ath_for_each_chanctx(sc, ctx)
1475 ctx->rxfilter = *total_flags;
1476 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
1477 sc->offchannel.chan.rxfilter = *total_flags;
1478 #endif
1479 spin_unlock_bh(&sc->chan_lock);
1480
1481 ath9k_ps_wakeup(sc);
1482 rfilt = ath_calcrxfilter(sc);
1483 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
1484 ath9k_ps_restore(sc);
1485
1486 ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, "Set HW RX filter: 0x%x\n",
1487 rfilt);
1488 }
1489
ath9k_sta_add(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1490 static int ath9k_sta_add(struct ieee80211_hw *hw,
1491 struct ieee80211_vif *vif,
1492 struct ieee80211_sta *sta)
1493 {
1494 struct ath_softc *sc = hw->priv;
1495 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1496 struct ath_node *an = (struct ath_node *) sta->drv_priv;
1497 struct ieee80211_key_conf ps_key = { };
1498 int key;
1499
1500 ath_node_attach(sc, sta, vif);
1501
1502 if (vif->type != NL80211_IFTYPE_AP &&
1503 vif->type != NL80211_IFTYPE_AP_VLAN)
1504 return 0;
1505
1506 key = ath_key_config(common, vif, sta, &ps_key);
1507 if (key > 0) {
1508 an->ps_key = key;
1509 an->key_idx[0] = key;
1510 }
1511
1512 return 0;
1513 }
1514
ath9k_del_ps_key(struct ath_softc * sc,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1515 static void ath9k_del_ps_key(struct ath_softc *sc,
1516 struct ieee80211_vif *vif,
1517 struct ieee80211_sta *sta)
1518 {
1519 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1520 struct ath_node *an = (struct ath_node *) sta->drv_priv;
1521 struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key };
1522
1523 if (!an->ps_key)
1524 return;
1525
1526 ath_key_delete(common, &ps_key);
1527 an->ps_key = 0;
1528 an->key_idx[0] = 0;
1529 }
1530
ath9k_sta_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1531 static int ath9k_sta_remove(struct ieee80211_hw *hw,
1532 struct ieee80211_vif *vif,
1533 struct ieee80211_sta *sta)
1534 {
1535 struct ath_softc *sc = hw->priv;
1536
1537 ath9k_del_ps_key(sc, vif, sta);
1538 ath_node_detach(sc, sta);
1539
1540 return 0;
1541 }
1542
ath9k_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)1543 static int ath9k_sta_state(struct ieee80211_hw *hw,
1544 struct ieee80211_vif *vif,
1545 struct ieee80211_sta *sta,
1546 enum ieee80211_sta_state old_state,
1547 enum ieee80211_sta_state new_state)
1548 {
1549 struct ath_softc *sc = hw->priv;
1550 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1551 int ret = 0;
1552
1553 if (old_state == IEEE80211_STA_AUTH &&
1554 new_state == IEEE80211_STA_ASSOC) {
1555 ret = ath9k_sta_add(hw, vif, sta);
1556 ath_dbg(common, CONFIG,
1557 "Add station: %pM\n", sta->addr);
1558 } else if (old_state == IEEE80211_STA_ASSOC &&
1559 new_state == IEEE80211_STA_AUTH) {
1560 ret = ath9k_sta_remove(hw, vif, sta);
1561 ath_dbg(common, CONFIG,
1562 "Remove station: %pM\n", sta->addr);
1563 }
1564
1565 if (ath9k_is_chanctx_enabled()) {
1566 if (vif->type == NL80211_IFTYPE_STATION) {
1567 if (old_state == IEEE80211_STA_ASSOC &&
1568 new_state == IEEE80211_STA_AUTHORIZED)
1569 ath_chanctx_event(sc, vif,
1570 ATH_CHANCTX_EVENT_AUTHORIZED);
1571 }
1572 }
1573
1574 return ret;
1575 }
1576
ath9k_sta_set_tx_filter(struct ath_hw * ah,struct ath_node * an,bool set)1577 static void ath9k_sta_set_tx_filter(struct ath_hw *ah,
1578 struct ath_node *an,
1579 bool set)
1580 {
1581 int i;
1582
1583 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
1584 if (!an->key_idx[i])
1585 continue;
1586 ath9k_hw_set_tx_filter(ah, an->key_idx[i], set);
1587 }
1588 }
1589
ath9k_sta_notify(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum sta_notify_cmd cmd,struct ieee80211_sta * sta)1590 static void ath9k_sta_notify(struct ieee80211_hw *hw,
1591 struct ieee80211_vif *vif,
1592 enum sta_notify_cmd cmd,
1593 struct ieee80211_sta *sta)
1594 {
1595 struct ath_softc *sc = hw->priv;
1596 struct ath_node *an = (struct ath_node *) sta->drv_priv;
1597
1598 switch (cmd) {
1599 case STA_NOTIFY_SLEEP:
1600 an->sleeping = true;
1601 ath_tx_aggr_sleep(sta, sc, an);
1602 ath9k_sta_set_tx_filter(sc->sc_ah, an, true);
1603 break;
1604 case STA_NOTIFY_AWAKE:
1605 ath9k_sta_set_tx_filter(sc->sc_ah, an, false);
1606 an->sleeping = false;
1607 ath_tx_aggr_wakeup(sc, an);
1608 break;
1609 }
1610 }
1611
ath9k_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 queue,const struct ieee80211_tx_queue_params * params)1612 static int ath9k_conf_tx(struct ieee80211_hw *hw,
1613 struct ieee80211_vif *vif, u16 queue,
1614 const struct ieee80211_tx_queue_params *params)
1615 {
1616 struct ath_softc *sc = hw->priv;
1617 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1618 struct ath_txq *txq;
1619 struct ath9k_tx_queue_info qi;
1620 int ret = 0;
1621
1622 if (queue >= IEEE80211_NUM_ACS)
1623 return 0;
1624
1625 txq = sc->tx.txq_map[queue];
1626
1627 ath9k_ps_wakeup(sc);
1628 mutex_lock(&sc->mutex);
1629
1630 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
1631
1632 qi.tqi_aifs = params->aifs;
1633 qi.tqi_cwmin = params->cw_min;
1634 qi.tqi_cwmax = params->cw_max;
1635 qi.tqi_burstTime = params->txop * 32;
1636
1637 ath_dbg(common, CONFIG,
1638 "Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
1639 queue, txq->axq_qnum, params->aifs, params->cw_min,
1640 params->cw_max, params->txop);
1641
1642 ath_update_max_aggr_framelen(sc, queue, qi.tqi_burstTime);
1643 ret = ath_txq_update(sc, txq->axq_qnum, &qi);
1644 if (ret)
1645 ath_err(common, "TXQ Update failed\n");
1646
1647 mutex_unlock(&sc->mutex);
1648 ath9k_ps_restore(sc);
1649
1650 return ret;
1651 }
1652
ath9k_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)1653 static int ath9k_set_key(struct ieee80211_hw *hw,
1654 enum set_key_cmd cmd,
1655 struct ieee80211_vif *vif,
1656 struct ieee80211_sta *sta,
1657 struct ieee80211_key_conf *key)
1658 {
1659 struct ath_softc *sc = hw->priv;
1660 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1661 struct ath_node *an = NULL;
1662 int ret = 0, i;
1663
1664 if (ath9k_modparam_nohwcrypt)
1665 return -ENOSPC;
1666
1667 if ((vif->type == NL80211_IFTYPE_ADHOC ||
1668 vif->type == NL80211_IFTYPE_MESH_POINT) &&
1669 (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
1670 key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
1671 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1672 /*
1673 * For now, disable hw crypto for the RSN IBSS group keys. This
1674 * could be optimized in the future to use a modified key cache
1675 * design to support per-STA RX GTK, but until that gets
1676 * implemented, use of software crypto for group addressed
1677 * frames is a acceptable to allow RSN IBSS to be used.
1678 */
1679 return -EOPNOTSUPP;
1680 }
1681
1682 mutex_lock(&sc->mutex);
1683 ath9k_ps_wakeup(sc);
1684 ath_dbg(common, CONFIG, "Set HW Key %d\n", cmd);
1685 if (sta)
1686 an = (struct ath_node *)sta->drv_priv;
1687
1688 switch (cmd) {
1689 case SET_KEY:
1690 if (sta)
1691 ath9k_del_ps_key(sc, vif, sta);
1692
1693 key->hw_key_idx = 0;
1694 ret = ath_key_config(common, vif, sta, key);
1695 if (ret >= 0) {
1696 key->hw_key_idx = ret;
1697 /* push IV and Michael MIC generation to stack */
1698 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1699 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
1700 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1701 if (sc->sc_ah->sw_mgmt_crypto_tx &&
1702 key->cipher == WLAN_CIPHER_SUITE_CCMP)
1703 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1704 ret = 0;
1705 }
1706 if (an && key->hw_key_idx) {
1707 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
1708 if (an->key_idx[i])
1709 continue;
1710 an->key_idx[i] = key->hw_key_idx;
1711 break;
1712 }
1713 WARN_ON(i == ARRAY_SIZE(an->key_idx));
1714 }
1715 break;
1716 case DISABLE_KEY:
1717 ath_key_delete(common, key);
1718 if (an) {
1719 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
1720 if (an->key_idx[i] != key->hw_key_idx)
1721 continue;
1722 an->key_idx[i] = 0;
1723 break;
1724 }
1725 }
1726 key->hw_key_idx = 0;
1727 break;
1728 default:
1729 ret = -EINVAL;
1730 }
1731
1732 ath9k_ps_restore(sc);
1733 mutex_unlock(&sc->mutex);
1734
1735 return ret;
1736 }
1737
ath9k_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)1738 static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1739 struct ieee80211_vif *vif,
1740 struct ieee80211_bss_conf *bss_conf,
1741 u32 changed)
1742 {
1743 #define CHECK_ANI \
1744 (BSS_CHANGED_ASSOC | \
1745 BSS_CHANGED_IBSS | \
1746 BSS_CHANGED_BEACON_ENABLED)
1747
1748 struct ath_softc *sc = hw->priv;
1749 struct ath_hw *ah = sc->sc_ah;
1750 struct ath_common *common = ath9k_hw_common(ah);
1751 struct ath_vif *avp = (void *)vif->drv_priv;
1752 int slottime;
1753
1754 ath9k_ps_wakeup(sc);
1755 mutex_lock(&sc->mutex);
1756
1757 if (changed & BSS_CHANGED_ASSOC) {
1758 ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n",
1759 bss_conf->bssid, bss_conf->assoc);
1760
1761 memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
1762 avp->aid = bss_conf->aid;
1763 avp->assoc = bss_conf->assoc;
1764
1765 ath9k_calculate_summary_state(sc, avp->chanctx);
1766 }
1767
1768 if ((changed & BSS_CHANGED_IBSS) ||
1769 (changed & BSS_CHANGED_OCB)) {
1770 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1771 common->curaid = bss_conf->aid;
1772 ath9k_hw_write_associd(sc->sc_ah);
1773 }
1774
1775 if ((changed & BSS_CHANGED_BEACON_ENABLED) ||
1776 (changed & BSS_CHANGED_BEACON_INT) ||
1777 (changed & BSS_CHANGED_BEACON_INFO)) {
1778 ath9k_beacon_config(sc, vif, changed);
1779 if (changed & BSS_CHANGED_BEACON_ENABLED)
1780 ath9k_calculate_summary_state(sc, avp->chanctx);
1781 }
1782
1783 if ((avp->chanctx == sc->cur_chan) &&
1784 (changed & BSS_CHANGED_ERP_SLOT)) {
1785 if (bss_conf->use_short_slot)
1786 slottime = 9;
1787 else
1788 slottime = 20;
1789 if (vif->type == NL80211_IFTYPE_AP) {
1790 /*
1791 * Defer update, so that connected stations can adjust
1792 * their settings at the same time.
1793 * See beacon.c for more details
1794 */
1795 sc->beacon.slottime = slottime;
1796 sc->beacon.updateslot = UPDATE;
1797 } else {
1798 ah->slottime = slottime;
1799 ath9k_hw_init_global_settings(ah);
1800 }
1801 }
1802
1803 if (changed & BSS_CHANGED_P2P_PS)
1804 ath9k_p2p_bss_info_changed(sc, vif);
1805
1806 if (changed & CHECK_ANI)
1807 ath_check_ani(sc);
1808
1809 if (changed & BSS_CHANGED_TXPOWER) {
1810 ath_dbg(common, CONFIG, "vif %pM power %d dbm power_type %d\n",
1811 vif->addr, bss_conf->txpower, bss_conf->txpower_type);
1812 ath9k_set_txpower(sc, vif);
1813 }
1814
1815 mutex_unlock(&sc->mutex);
1816 ath9k_ps_restore(sc);
1817
1818 #undef CHECK_ANI
1819 }
1820
ath9k_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1821 static u64 ath9k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1822 {
1823 struct ath_softc *sc = hw->priv;
1824 u64 tsf;
1825
1826 mutex_lock(&sc->mutex);
1827 ath9k_ps_wakeup(sc);
1828 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1829 ath9k_ps_restore(sc);
1830 mutex_unlock(&sc->mutex);
1831
1832 return tsf;
1833 }
1834
ath9k_set_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u64 tsf)1835 static void ath9k_set_tsf(struct ieee80211_hw *hw,
1836 struct ieee80211_vif *vif,
1837 u64 tsf)
1838 {
1839 struct ath_softc *sc = hw->priv;
1840
1841 mutex_lock(&sc->mutex);
1842 ath9k_ps_wakeup(sc);
1843 ath9k_hw_settsf64(sc->sc_ah, tsf);
1844 ath9k_ps_restore(sc);
1845 mutex_unlock(&sc->mutex);
1846 }
1847
ath9k_reset_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1848 static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1849 {
1850 struct ath_softc *sc = hw->priv;
1851
1852 mutex_lock(&sc->mutex);
1853
1854 ath9k_ps_wakeup(sc);
1855 ath9k_hw_reset_tsf(sc->sc_ah);
1856 ath9k_ps_restore(sc);
1857
1858 mutex_unlock(&sc->mutex);
1859 }
1860
ath9k_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum ieee80211_ampdu_mlme_action action,struct ieee80211_sta * sta,u16 tid,u16 * ssn,u8 buf_size,bool amsdu)1861 static int ath9k_ampdu_action(struct ieee80211_hw *hw,
1862 struct ieee80211_vif *vif,
1863 enum ieee80211_ampdu_mlme_action action,
1864 struct ieee80211_sta *sta,
1865 u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
1866 {
1867 struct ath_softc *sc = hw->priv;
1868 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1869 bool flush = false;
1870 int ret = 0;
1871
1872 mutex_lock(&sc->mutex);
1873
1874 switch (action) {
1875 case IEEE80211_AMPDU_RX_START:
1876 break;
1877 case IEEE80211_AMPDU_RX_STOP:
1878 break;
1879 case IEEE80211_AMPDU_TX_START:
1880 if (ath9k_is_chanctx_enabled()) {
1881 if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
1882 ret = -EBUSY;
1883 break;
1884 }
1885 }
1886 ath9k_ps_wakeup(sc);
1887 ret = ath_tx_aggr_start(sc, sta, tid, ssn);
1888 if (!ret)
1889 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1890 ath9k_ps_restore(sc);
1891 break;
1892 case IEEE80211_AMPDU_TX_STOP_FLUSH:
1893 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1894 flush = true;
1895 case IEEE80211_AMPDU_TX_STOP_CONT:
1896 ath9k_ps_wakeup(sc);
1897 ath_tx_aggr_stop(sc, sta, tid);
1898 if (!flush)
1899 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1900 ath9k_ps_restore(sc);
1901 break;
1902 case IEEE80211_AMPDU_TX_OPERATIONAL:
1903 ath9k_ps_wakeup(sc);
1904 ath_tx_aggr_resume(sc, sta, tid);
1905 ath9k_ps_restore(sc);
1906 break;
1907 default:
1908 ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
1909 }
1910
1911 mutex_unlock(&sc->mutex);
1912
1913 return ret;
1914 }
1915
ath9k_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)1916 static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
1917 struct survey_info *survey)
1918 {
1919 struct ath_softc *sc = hw->priv;
1920 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1921 struct ieee80211_supported_band *sband;
1922 struct ieee80211_channel *chan;
1923 int pos;
1924
1925 if (config_enabled(CONFIG_ATH9K_TX99))
1926 return -EOPNOTSUPP;
1927
1928 spin_lock_bh(&common->cc_lock);
1929 if (idx == 0)
1930 ath_update_survey_stats(sc);
1931
1932 sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
1933 if (sband && idx >= sband->n_channels) {
1934 idx -= sband->n_channels;
1935 sband = NULL;
1936 }
1937
1938 if (!sband)
1939 sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
1940
1941 if (!sband || idx >= sband->n_channels) {
1942 spin_unlock_bh(&common->cc_lock);
1943 return -ENOENT;
1944 }
1945
1946 chan = &sband->channels[idx];
1947 pos = chan->hw_value;
1948 memcpy(survey, &sc->survey[pos], sizeof(*survey));
1949 survey->channel = chan;
1950 spin_unlock_bh(&common->cc_lock);
1951
1952 return 0;
1953 }
1954
ath9k_enable_dynack(struct ath_softc * sc)1955 static void ath9k_enable_dynack(struct ath_softc *sc)
1956 {
1957 #ifdef CONFIG_ATH9K_DYNACK
1958 u32 rfilt;
1959 struct ath_hw *ah = sc->sc_ah;
1960
1961 ath_dynack_reset(ah);
1962
1963 ah->dynack.enabled = true;
1964 rfilt = ath_calcrxfilter(sc);
1965 ath9k_hw_setrxfilter(ah, rfilt);
1966 #endif
1967 }
1968
ath9k_set_coverage_class(struct ieee80211_hw * hw,s16 coverage_class)1969 static void ath9k_set_coverage_class(struct ieee80211_hw *hw,
1970 s16 coverage_class)
1971 {
1972 struct ath_softc *sc = hw->priv;
1973 struct ath_hw *ah = sc->sc_ah;
1974
1975 if (config_enabled(CONFIG_ATH9K_TX99))
1976 return;
1977
1978 mutex_lock(&sc->mutex);
1979
1980 if (coverage_class >= 0) {
1981 ah->coverage_class = coverage_class;
1982 if (ah->dynack.enabled) {
1983 u32 rfilt;
1984
1985 ah->dynack.enabled = false;
1986 rfilt = ath_calcrxfilter(sc);
1987 ath9k_hw_setrxfilter(ah, rfilt);
1988 }
1989 ath9k_ps_wakeup(sc);
1990 ath9k_hw_init_global_settings(ah);
1991 ath9k_ps_restore(sc);
1992 } else if (!ah->dynack.enabled) {
1993 ath9k_enable_dynack(sc);
1994 }
1995
1996 mutex_unlock(&sc->mutex);
1997 }
1998
ath9k_has_tx_pending(struct ath_softc * sc,bool sw_pending)1999 static bool ath9k_has_tx_pending(struct ath_softc *sc,
2000 bool sw_pending)
2001 {
2002 int i, npend = 0;
2003
2004 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2005 if (!ATH_TXQ_SETUP(sc, i))
2006 continue;
2007
2008 npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i],
2009 sw_pending);
2010 if (npend)
2011 break;
2012 }
2013
2014 return !!npend;
2015 }
2016
ath9k_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)2017 static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2018 u32 queues, bool drop)
2019 {
2020 struct ath_softc *sc = hw->priv;
2021 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2022
2023 if (ath9k_is_chanctx_enabled()) {
2024 if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
2025 goto flush;
2026
2027 /*
2028 * If MCC is active, extend the flush timeout
2029 * and wait for the HW/SW queues to become
2030 * empty. This needs to be done outside the
2031 * sc->mutex lock to allow the channel scheduler
2032 * to switch channel contexts.
2033 *
2034 * The vif queues have been stopped in mac80211,
2035 * so there won't be any incoming frames.
2036 */
2037 __ath9k_flush(hw, queues, drop, true, true);
2038 return;
2039 }
2040 flush:
2041 mutex_lock(&sc->mutex);
2042 __ath9k_flush(hw, queues, drop, true, false);
2043 mutex_unlock(&sc->mutex);
2044 }
2045
__ath9k_flush(struct ieee80211_hw * hw,u32 queues,bool drop,bool sw_pending,bool timeout_override)2046 void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
2047 bool sw_pending, bool timeout_override)
2048 {
2049 struct ath_softc *sc = hw->priv;
2050 struct ath_hw *ah = sc->sc_ah;
2051 struct ath_common *common = ath9k_hw_common(ah);
2052 int timeout;
2053 bool drain_txq;
2054
2055 cancel_delayed_work_sync(&sc->tx_complete_work);
2056
2057 if (ah->ah_flags & AH_UNPLUGGED) {
2058 ath_dbg(common, ANY, "Device has been unplugged!\n");
2059 return;
2060 }
2061
2062 if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
2063 ath_dbg(common, ANY, "Device not present\n");
2064 return;
2065 }
2066
2067 spin_lock_bh(&sc->chan_lock);
2068 if (timeout_override)
2069 timeout = HZ / 5;
2070 else
2071 timeout = sc->cur_chan->flush_timeout;
2072 spin_unlock_bh(&sc->chan_lock);
2073
2074 ath_dbg(common, CHAN_CTX,
2075 "Flush timeout: %d\n", jiffies_to_msecs(timeout));
2076
2077 if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc, sw_pending),
2078 timeout) > 0)
2079 drop = false;
2080
2081 if (drop) {
2082 ath9k_ps_wakeup(sc);
2083 spin_lock_bh(&sc->sc_pcu_lock);
2084 drain_txq = ath_drain_all_txq(sc);
2085 spin_unlock_bh(&sc->sc_pcu_lock);
2086
2087 if (!drain_txq)
2088 ath_reset(sc, NULL);
2089
2090 ath9k_ps_restore(sc);
2091 }
2092
2093 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
2094 }
2095
ath9k_tx_frames_pending(struct ieee80211_hw * hw)2096 static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
2097 {
2098 struct ath_softc *sc = hw->priv;
2099
2100 return ath9k_has_tx_pending(sc, true);
2101 }
2102
ath9k_tx_last_beacon(struct ieee80211_hw * hw)2103 static int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
2104 {
2105 struct ath_softc *sc = hw->priv;
2106 struct ath_hw *ah = sc->sc_ah;
2107 struct ieee80211_vif *vif;
2108 struct ath_vif *avp;
2109 struct ath_buf *bf;
2110 struct ath_tx_status ts;
2111 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
2112 int status;
2113
2114 vif = sc->beacon.bslot[0];
2115 if (!vif)
2116 return 0;
2117
2118 if (!vif->bss_conf.enable_beacon)
2119 return 0;
2120
2121 avp = (void *)vif->drv_priv;
2122
2123 if (!sc->beacon.tx_processed && !edma) {
2124 tasklet_disable(&sc->bcon_tasklet);
2125
2126 bf = avp->av_bcbuf;
2127 if (!bf || !bf->bf_mpdu)
2128 goto skip;
2129
2130 status = ath9k_hw_txprocdesc(ah, bf->bf_desc, &ts);
2131 if (status == -EINPROGRESS)
2132 goto skip;
2133
2134 sc->beacon.tx_processed = true;
2135 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2136
2137 skip:
2138 tasklet_enable(&sc->bcon_tasklet);
2139 }
2140
2141 return sc->beacon.tx_last;
2142 }
2143
ath9k_get_stats(struct ieee80211_hw * hw,struct ieee80211_low_level_stats * stats)2144 static int ath9k_get_stats(struct ieee80211_hw *hw,
2145 struct ieee80211_low_level_stats *stats)
2146 {
2147 struct ath_softc *sc = hw->priv;
2148 struct ath_hw *ah = sc->sc_ah;
2149 struct ath9k_mib_stats *mib_stats = &ah->ah_mibStats;
2150
2151 stats->dot11ACKFailureCount = mib_stats->ackrcv_bad;
2152 stats->dot11RTSFailureCount = mib_stats->rts_bad;
2153 stats->dot11FCSErrorCount = mib_stats->fcs_bad;
2154 stats->dot11RTSSuccessCount = mib_stats->rts_good;
2155 return 0;
2156 }
2157
fill_chainmask(u32 cap,u32 new)2158 static u32 fill_chainmask(u32 cap, u32 new)
2159 {
2160 u32 filled = 0;
2161 int i;
2162
2163 for (i = 0; cap && new; i++, cap >>= 1) {
2164 if (!(cap & BIT(0)))
2165 continue;
2166
2167 if (new & BIT(0))
2168 filled |= BIT(i);
2169
2170 new >>= 1;
2171 }
2172
2173 return filled;
2174 }
2175
validate_antenna_mask(struct ath_hw * ah,u32 val)2176 static bool validate_antenna_mask(struct ath_hw *ah, u32 val)
2177 {
2178 if (AR_SREV_9300_20_OR_LATER(ah))
2179 return true;
2180
2181 switch (val & 0x7) {
2182 case 0x1:
2183 case 0x3:
2184 case 0x7:
2185 return true;
2186 case 0x2:
2187 return (ah->caps.rx_chainmask == 1);
2188 default:
2189 return false;
2190 }
2191 }
2192
ath9k_set_antenna(struct ieee80211_hw * hw,u32 tx_ant,u32 rx_ant)2193 static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
2194 {
2195 struct ath_softc *sc = hw->priv;
2196 struct ath_hw *ah = sc->sc_ah;
2197
2198 if (ah->caps.rx_chainmask != 1)
2199 rx_ant |= tx_ant;
2200
2201 if (!validate_antenna_mask(ah, rx_ant) || !tx_ant)
2202 return -EINVAL;
2203
2204 sc->ant_rx = rx_ant;
2205 sc->ant_tx = tx_ant;
2206
2207 if (ah->caps.rx_chainmask == 1)
2208 return 0;
2209
2210 /* AR9100 runs into calibration issues if not all rx chains are enabled */
2211 if (AR_SREV_9100(ah))
2212 ah->rxchainmask = 0x7;
2213 else
2214 ah->rxchainmask = fill_chainmask(ah->caps.rx_chainmask, rx_ant);
2215
2216 ah->txchainmask = fill_chainmask(ah->caps.tx_chainmask, tx_ant);
2217 ath9k_cmn_reload_chainmask(ah);
2218
2219 return 0;
2220 }
2221
ath9k_get_antenna(struct ieee80211_hw * hw,u32 * tx_ant,u32 * rx_ant)2222 static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
2223 {
2224 struct ath_softc *sc = hw->priv;
2225
2226 *tx_ant = sc->ant_tx;
2227 *rx_ant = sc->ant_rx;
2228 return 0;
2229 }
2230
ath9k_sw_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const u8 * mac_addr)2231 static void ath9k_sw_scan_start(struct ieee80211_hw *hw,
2232 struct ieee80211_vif *vif,
2233 const u8 *mac_addr)
2234 {
2235 struct ath_softc *sc = hw->priv;
2236 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2237 set_bit(ATH_OP_SCANNING, &common->op_flags);
2238 }
2239
ath9k_sw_scan_complete(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2240 static void ath9k_sw_scan_complete(struct ieee80211_hw *hw,
2241 struct ieee80211_vif *vif)
2242 {
2243 struct ath_softc *sc = hw->priv;
2244 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2245 clear_bit(ATH_OP_SCANNING, &common->op_flags);
2246 }
2247
2248 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
2249
ath9k_cancel_pending_offchannel(struct ath_softc * sc)2250 static void ath9k_cancel_pending_offchannel(struct ath_softc *sc)
2251 {
2252 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2253
2254 if (sc->offchannel.roc_vif) {
2255 ath_dbg(common, CHAN_CTX,
2256 "%s: Aborting RoC\n", __func__);
2257
2258 del_timer_sync(&sc->offchannel.timer);
2259 if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
2260 ath_roc_complete(sc, ATH_ROC_COMPLETE_ABORT);
2261 }
2262
2263 if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
2264 ath_dbg(common, CHAN_CTX,
2265 "%s: Aborting HW scan\n", __func__);
2266
2267 del_timer_sync(&sc->offchannel.timer);
2268 ath_scan_complete(sc, true);
2269 }
2270 }
2271
ath9k_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)2272 static int ath9k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2273 struct ieee80211_scan_request *hw_req)
2274 {
2275 struct cfg80211_scan_request *req = &hw_req->req;
2276 struct ath_softc *sc = hw->priv;
2277 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2278 int ret = 0;
2279
2280 mutex_lock(&sc->mutex);
2281
2282 if (WARN_ON(sc->offchannel.scan_req)) {
2283 ret = -EBUSY;
2284 goto out;
2285 }
2286
2287 ath9k_ps_wakeup(sc);
2288 set_bit(ATH_OP_SCANNING, &common->op_flags);
2289 sc->offchannel.scan_vif = vif;
2290 sc->offchannel.scan_req = req;
2291 sc->offchannel.scan_idx = 0;
2292
2293 ath_dbg(common, CHAN_CTX, "HW scan request received on vif: %pM\n",
2294 vif->addr);
2295
2296 if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) {
2297 ath_dbg(common, CHAN_CTX, "Starting HW scan\n");
2298 ath_offchannel_next(sc);
2299 }
2300
2301 out:
2302 mutex_unlock(&sc->mutex);
2303
2304 return ret;
2305 }
2306
ath9k_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2307 static void ath9k_cancel_hw_scan(struct ieee80211_hw *hw,
2308 struct ieee80211_vif *vif)
2309 {
2310 struct ath_softc *sc = hw->priv;
2311 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2312
2313 ath_dbg(common, CHAN_CTX, "Cancel HW scan on vif: %pM\n", vif->addr);
2314
2315 mutex_lock(&sc->mutex);
2316 del_timer_sync(&sc->offchannel.timer);
2317 ath_scan_complete(sc, true);
2318 mutex_unlock(&sc->mutex);
2319 }
2320
ath9k_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)2321 static int ath9k_remain_on_channel(struct ieee80211_hw *hw,
2322 struct ieee80211_vif *vif,
2323 struct ieee80211_channel *chan, int duration,
2324 enum ieee80211_roc_type type)
2325 {
2326 struct ath_softc *sc = hw->priv;
2327 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2328 int ret = 0;
2329
2330 mutex_lock(&sc->mutex);
2331
2332 if (WARN_ON(sc->offchannel.roc_vif)) {
2333 ret = -EBUSY;
2334 goto out;
2335 }
2336
2337 ath9k_ps_wakeup(sc);
2338 sc->offchannel.roc_vif = vif;
2339 sc->offchannel.roc_chan = chan;
2340 sc->offchannel.roc_duration = duration;
2341
2342 ath_dbg(common, CHAN_CTX,
2343 "RoC request on vif: %pM, type: %d duration: %d\n",
2344 vif->addr, type, duration);
2345
2346 if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) {
2347 ath_dbg(common, CHAN_CTX, "Starting RoC period\n");
2348 ath_offchannel_next(sc);
2349 }
2350
2351 out:
2352 mutex_unlock(&sc->mutex);
2353
2354 return ret;
2355 }
2356
ath9k_cancel_remain_on_channel(struct ieee80211_hw * hw)2357 static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw)
2358 {
2359 struct ath_softc *sc = hw->priv;
2360 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2361
2362 mutex_lock(&sc->mutex);
2363
2364 ath_dbg(common, CHAN_CTX, "Cancel RoC\n");
2365 del_timer_sync(&sc->offchannel.timer);
2366
2367 if (sc->offchannel.roc_vif) {
2368 if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
2369 ath_roc_complete(sc, ATH_ROC_COMPLETE_CANCEL);
2370 }
2371
2372 mutex_unlock(&sc->mutex);
2373
2374 return 0;
2375 }
2376
ath9k_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf)2377 static int ath9k_add_chanctx(struct ieee80211_hw *hw,
2378 struct ieee80211_chanctx_conf *conf)
2379 {
2380 struct ath_softc *sc = hw->priv;
2381 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2382 struct ath_chanctx *ctx, **ptr;
2383 int pos;
2384
2385 mutex_lock(&sc->mutex);
2386
2387 ath_for_each_chanctx(sc, ctx) {
2388 if (ctx->assigned)
2389 continue;
2390
2391 ptr = (void *) conf->drv_priv;
2392 *ptr = ctx;
2393 ctx->assigned = true;
2394 pos = ctx - &sc->chanctx[0];
2395 ctx->hw_queue_base = pos * IEEE80211_NUM_ACS;
2396
2397 ath_dbg(common, CHAN_CTX,
2398 "Add channel context: %d MHz\n",
2399 conf->def.chan->center_freq);
2400
2401 ath_chanctx_set_channel(sc, ctx, &conf->def);
2402
2403 mutex_unlock(&sc->mutex);
2404 return 0;
2405 }
2406
2407 mutex_unlock(&sc->mutex);
2408 return -ENOSPC;
2409 }
2410
2411
ath9k_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf)2412 static void ath9k_remove_chanctx(struct ieee80211_hw *hw,
2413 struct ieee80211_chanctx_conf *conf)
2414 {
2415 struct ath_softc *sc = hw->priv;
2416 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2417 struct ath_chanctx *ctx = ath_chanctx_get(conf);
2418
2419 mutex_lock(&sc->mutex);
2420
2421 ath_dbg(common, CHAN_CTX,
2422 "Remove channel context: %d MHz\n",
2423 conf->def.chan->center_freq);
2424
2425 ctx->assigned = false;
2426 ctx->hw_queue_base = 0;
2427 ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_UNASSIGN);
2428
2429 mutex_unlock(&sc->mutex);
2430 }
2431
ath9k_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf,u32 changed)2432 static void ath9k_change_chanctx(struct ieee80211_hw *hw,
2433 struct ieee80211_chanctx_conf *conf,
2434 u32 changed)
2435 {
2436 struct ath_softc *sc = hw->priv;
2437 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2438 struct ath_chanctx *ctx = ath_chanctx_get(conf);
2439
2440 mutex_lock(&sc->mutex);
2441 ath_dbg(common, CHAN_CTX,
2442 "Change channel context: %d MHz\n",
2443 conf->def.chan->center_freq);
2444 ath_chanctx_set_channel(sc, ctx, &conf->def);
2445 mutex_unlock(&sc->mutex);
2446 }
2447
ath9k_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * conf)2448 static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw,
2449 struct ieee80211_vif *vif,
2450 struct ieee80211_chanctx_conf *conf)
2451 {
2452 struct ath_softc *sc = hw->priv;
2453 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2454 struct ath_vif *avp = (void *)vif->drv_priv;
2455 struct ath_chanctx *ctx = ath_chanctx_get(conf);
2456 int i;
2457
2458 ath9k_cancel_pending_offchannel(sc);
2459
2460 mutex_lock(&sc->mutex);
2461
2462 ath_dbg(common, CHAN_CTX,
2463 "Assign VIF (addr: %pM, type: %d, p2p: %d) to channel context: %d MHz\n",
2464 vif->addr, vif->type, vif->p2p,
2465 conf->def.chan->center_freq);
2466
2467 avp->chanctx = ctx;
2468 ctx->nvifs_assigned++;
2469 list_add_tail(&avp->list, &ctx->vifs);
2470 ath9k_calculate_summary_state(sc, ctx);
2471 for (i = 0; i < IEEE80211_NUM_ACS; i++)
2472 vif->hw_queue[i] = ctx->hw_queue_base + i;
2473
2474 mutex_unlock(&sc->mutex);
2475
2476 return 0;
2477 }
2478
ath9k_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * conf)2479 static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw,
2480 struct ieee80211_vif *vif,
2481 struct ieee80211_chanctx_conf *conf)
2482 {
2483 struct ath_softc *sc = hw->priv;
2484 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2485 struct ath_vif *avp = (void *)vif->drv_priv;
2486 struct ath_chanctx *ctx = ath_chanctx_get(conf);
2487 int ac;
2488
2489 ath9k_cancel_pending_offchannel(sc);
2490
2491 mutex_lock(&sc->mutex);
2492
2493 ath_dbg(common, CHAN_CTX,
2494 "Remove VIF (addr: %pM, type: %d, p2p: %d) from channel context: %d MHz\n",
2495 vif->addr, vif->type, vif->p2p,
2496 conf->def.chan->center_freq);
2497
2498 avp->chanctx = NULL;
2499 ctx->nvifs_assigned--;
2500 list_del(&avp->list);
2501 ath9k_calculate_summary_state(sc, ctx);
2502 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
2503 vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
2504
2505 mutex_unlock(&sc->mutex);
2506 }
2507
ath9k_mgd_prepare_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2508 static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw,
2509 struct ieee80211_vif *vif)
2510 {
2511 struct ath_softc *sc = hw->priv;
2512 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2513 struct ath_vif *avp = (struct ath_vif *) vif->drv_priv;
2514 struct ath_beacon_config *cur_conf;
2515 struct ath_chanctx *go_ctx;
2516 unsigned long timeout;
2517 bool changed = false;
2518 u32 beacon_int;
2519
2520 if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
2521 return;
2522
2523 if (!avp->chanctx)
2524 return;
2525
2526 mutex_lock(&sc->mutex);
2527
2528 spin_lock_bh(&sc->chan_lock);
2529 if (sc->next_chan || (sc->cur_chan != avp->chanctx))
2530 changed = true;
2531 spin_unlock_bh(&sc->chan_lock);
2532
2533 if (!changed)
2534 goto out;
2535
2536 ath9k_cancel_pending_offchannel(sc);
2537
2538 go_ctx = ath_is_go_chanctx_present(sc);
2539
2540 if (go_ctx) {
2541 /*
2542 * Wait till the GO interface gets a chance
2543 * to send out an NoA.
2544 */
2545 spin_lock_bh(&sc->chan_lock);
2546 sc->sched.mgd_prepare_tx = true;
2547 cur_conf = &go_ctx->beacon;
2548 beacon_int = TU_TO_USEC(cur_conf->beacon_interval);
2549 spin_unlock_bh(&sc->chan_lock);
2550
2551 timeout = usecs_to_jiffies(beacon_int * 2);
2552 init_completion(&sc->go_beacon);
2553
2554 mutex_unlock(&sc->mutex);
2555
2556 if (wait_for_completion_timeout(&sc->go_beacon,
2557 timeout) == 0) {
2558 ath_dbg(common, CHAN_CTX,
2559 "Failed to send new NoA\n");
2560
2561 spin_lock_bh(&sc->chan_lock);
2562 sc->sched.mgd_prepare_tx = false;
2563 spin_unlock_bh(&sc->chan_lock);
2564 }
2565
2566 mutex_lock(&sc->mutex);
2567 }
2568
2569 ath_dbg(common, CHAN_CTX,
2570 "%s: Set chanctx state to FORCE_ACTIVE for vif: %pM\n",
2571 __func__, vif->addr);
2572
2573 spin_lock_bh(&sc->chan_lock);
2574 sc->next_chan = avp->chanctx;
2575 sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE;
2576 spin_unlock_bh(&sc->chan_lock);
2577
2578 ath_chanctx_set_next(sc, true);
2579 out:
2580 mutex_unlock(&sc->mutex);
2581 }
2582
ath9k_fill_chanctx_ops(void)2583 void ath9k_fill_chanctx_ops(void)
2584 {
2585 if (!ath9k_is_chanctx_enabled())
2586 return;
2587
2588 ath9k_ops.hw_scan = ath9k_hw_scan;
2589 ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
2590 ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
2591 ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
2592 ath9k_ops.add_chanctx = ath9k_add_chanctx;
2593 ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
2594 ath9k_ops.change_chanctx = ath9k_change_chanctx;
2595 ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
2596 ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
2597 ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
2598 }
2599
2600 #endif
2601
ath9k_get_txpower(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int * dbm)2602 static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2603 int *dbm)
2604 {
2605 struct ath_softc *sc = hw->priv;
2606 struct ath_vif *avp = (void *)vif->drv_priv;
2607
2608 mutex_lock(&sc->mutex);
2609 if (avp->chanctx)
2610 *dbm = avp->chanctx->cur_txpower;
2611 else
2612 *dbm = sc->cur_chan->cur_txpower;
2613 mutex_unlock(&sc->mutex);
2614
2615 *dbm /= 2;
2616
2617 return 0;
2618 }
2619
2620 struct ieee80211_ops ath9k_ops = {
2621 .tx = ath9k_tx,
2622 .start = ath9k_start,
2623 .stop = ath9k_stop,
2624 .add_interface = ath9k_add_interface,
2625 .change_interface = ath9k_change_interface,
2626 .remove_interface = ath9k_remove_interface,
2627 .config = ath9k_config,
2628 .configure_filter = ath9k_configure_filter,
2629 .sta_state = ath9k_sta_state,
2630 .sta_notify = ath9k_sta_notify,
2631 .conf_tx = ath9k_conf_tx,
2632 .bss_info_changed = ath9k_bss_info_changed,
2633 .set_key = ath9k_set_key,
2634 .get_tsf = ath9k_get_tsf,
2635 .set_tsf = ath9k_set_tsf,
2636 .reset_tsf = ath9k_reset_tsf,
2637 .ampdu_action = ath9k_ampdu_action,
2638 .get_survey = ath9k_get_survey,
2639 .rfkill_poll = ath9k_rfkill_poll_state,
2640 .set_coverage_class = ath9k_set_coverage_class,
2641 .flush = ath9k_flush,
2642 .tx_frames_pending = ath9k_tx_frames_pending,
2643 .tx_last_beacon = ath9k_tx_last_beacon,
2644 .release_buffered_frames = ath9k_release_buffered_frames,
2645 .get_stats = ath9k_get_stats,
2646 .set_antenna = ath9k_set_antenna,
2647 .get_antenna = ath9k_get_antenna,
2648
2649 #ifdef CONFIG_ATH9K_WOW
2650 .suspend = ath9k_suspend,
2651 .resume = ath9k_resume,
2652 .set_wakeup = ath9k_set_wakeup,
2653 #endif
2654
2655 #ifdef CONFIG_ATH9K_DEBUGFS
2656 .get_et_sset_count = ath9k_get_et_sset_count,
2657 .get_et_stats = ath9k_get_et_stats,
2658 .get_et_strings = ath9k_get_et_strings,
2659 #endif
2660
2661 #if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_STATION_STATISTICS)
2662 .sta_add_debugfs = ath9k_sta_add_debugfs,
2663 #endif
2664 .sw_scan_start = ath9k_sw_scan_start,
2665 .sw_scan_complete = ath9k_sw_scan_complete,
2666 .get_txpower = ath9k_get_txpower,
2667 };
2668