1
2 /*
3 * This file is part of wlcore
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/interrupt.h>
29 #include <linux/irq.h>
30
31 #include "wlcore.h"
32 #include "debug.h"
33 #include "wl12xx_80211.h"
34 #include "io.h"
35 #include "tx.h"
36 #include "ps.h"
37 #include "init.h"
38 #include "debugfs.h"
39 #include "testmode.h"
40 #include "vendor_cmd.h"
41 #include "scan.h"
42 #include "hw_ops.h"
43 #include "sysfs.h"
44
45 #define WL1271_BOOT_RETRIES 3
46
47 static char *fwlog_param;
48 static int fwlog_mem_blocks = -1;
49 static int bug_on_recovery = -1;
50 static int no_recovery = -1;
51
52 static void __wl1271_op_remove_interface(struct wl1271 *wl,
53 struct ieee80211_vif *vif,
54 bool reset_tx_queues);
55 static void wlcore_op_stop_locked(struct wl1271 *wl);
56 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
57
wl12xx_set_authorized(struct wl1271 * wl,struct wl12xx_vif * wlvif)58 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
59 {
60 int ret;
61
62 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
63 return -EINVAL;
64
65 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
66 return 0;
67
68 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
69 return 0;
70
71 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
72 if (ret < 0)
73 return ret;
74
75 wl1271_info("Association completed.");
76 return 0;
77 }
78
wl1271_reg_notify(struct wiphy * wiphy,struct regulatory_request * request)79 static void wl1271_reg_notify(struct wiphy *wiphy,
80 struct regulatory_request *request)
81 {
82 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
83 struct wl1271 *wl = hw->priv;
84
85 /* copy the current dfs region */
86 if (request)
87 wl->dfs_region = request->dfs_region;
88
89 wlcore_regdomain_config(wl);
90 }
91
wl1271_set_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool enable)92 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
93 bool enable)
94 {
95 int ret = 0;
96
97 /* we should hold wl->mutex */
98 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
99 if (ret < 0)
100 goto out;
101
102 if (enable)
103 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
104 else
105 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
106 out:
107 return ret;
108 }
109
110 /*
111 * this function is being called when the rx_streaming interval
112 * has beed changed or rx_streaming should be disabled
113 */
wl1271_recalc_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif)114 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
115 {
116 int ret = 0;
117 int period = wl->conf.rx_streaming.interval;
118
119 /* don't reconfigure if rx_streaming is disabled */
120 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
121 goto out;
122
123 /* reconfigure/disable according to new streaming_period */
124 if (period &&
125 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
126 (wl->conf.rx_streaming.always ||
127 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
128 ret = wl1271_set_rx_streaming(wl, wlvif, true);
129 else {
130 ret = wl1271_set_rx_streaming(wl, wlvif, false);
131 /* don't cancel_work_sync since we might deadlock */
132 del_timer_sync(&wlvif->rx_streaming_timer);
133 }
134 out:
135 return ret;
136 }
137
wl1271_rx_streaming_enable_work(struct work_struct * work)138 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
139 {
140 int ret;
141 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
142 rx_streaming_enable_work);
143 struct wl1271 *wl = wlvif->wl;
144
145 mutex_lock(&wl->mutex);
146
147 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
148 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
149 (!wl->conf.rx_streaming.always &&
150 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
151 goto out;
152
153 if (!wl->conf.rx_streaming.interval)
154 goto out;
155
156 ret = wl1271_ps_elp_wakeup(wl);
157 if (ret < 0)
158 goto out;
159
160 ret = wl1271_set_rx_streaming(wl, wlvif, true);
161 if (ret < 0)
162 goto out_sleep;
163
164 /* stop it after some time of inactivity */
165 mod_timer(&wlvif->rx_streaming_timer,
166 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
167
168 out_sleep:
169 wl1271_ps_elp_sleep(wl);
170 out:
171 mutex_unlock(&wl->mutex);
172 }
173
wl1271_rx_streaming_disable_work(struct work_struct * work)174 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
175 {
176 int ret;
177 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
178 rx_streaming_disable_work);
179 struct wl1271 *wl = wlvif->wl;
180
181 mutex_lock(&wl->mutex);
182
183 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
184 goto out;
185
186 ret = wl1271_ps_elp_wakeup(wl);
187 if (ret < 0)
188 goto out;
189
190 ret = wl1271_set_rx_streaming(wl, wlvif, false);
191 if (ret)
192 goto out_sleep;
193
194 out_sleep:
195 wl1271_ps_elp_sleep(wl);
196 out:
197 mutex_unlock(&wl->mutex);
198 }
199
wl1271_rx_streaming_timer(unsigned long data)200 static void wl1271_rx_streaming_timer(unsigned long data)
201 {
202 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
203 struct wl1271 *wl = wlvif->wl;
204 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
205 }
206
207 /* wl->mutex must be taken */
wl12xx_rearm_tx_watchdog_locked(struct wl1271 * wl)208 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
209 {
210 /* if the watchdog is not armed, don't do anything */
211 if (wl->tx_allocated_blocks == 0)
212 return;
213
214 cancel_delayed_work(&wl->tx_watchdog_work);
215 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
216 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
217 }
218
wlcore_rc_update_work(struct work_struct * work)219 static void wlcore_rc_update_work(struct work_struct *work)
220 {
221 int ret;
222 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
223 rc_update_work);
224 struct wl1271 *wl = wlvif->wl;
225
226 mutex_lock(&wl->mutex);
227
228 if (unlikely(wl->state != WLCORE_STATE_ON))
229 goto out;
230
231 ret = wl1271_ps_elp_wakeup(wl);
232 if (ret < 0)
233 goto out;
234
235 wlcore_hw_sta_rc_update(wl, wlvif);
236
237 wl1271_ps_elp_sleep(wl);
238 out:
239 mutex_unlock(&wl->mutex);
240 }
241
wl12xx_tx_watchdog_work(struct work_struct * work)242 static void wl12xx_tx_watchdog_work(struct work_struct *work)
243 {
244 struct delayed_work *dwork;
245 struct wl1271 *wl;
246
247 dwork = container_of(work, struct delayed_work, work);
248 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
249
250 mutex_lock(&wl->mutex);
251
252 if (unlikely(wl->state != WLCORE_STATE_ON))
253 goto out;
254
255 /* Tx went out in the meantime - everything is ok */
256 if (unlikely(wl->tx_allocated_blocks == 0))
257 goto out;
258
259 /*
260 * if a ROC is in progress, we might not have any Tx for a long
261 * time (e.g. pending Tx on the non-ROC channels)
262 */
263 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265 wl->conf.tx.tx_watchdog_timeout);
266 wl12xx_rearm_tx_watchdog_locked(wl);
267 goto out;
268 }
269
270 /*
271 * if a scan is in progress, we might not have any Tx for a long
272 * time
273 */
274 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276 wl->conf.tx.tx_watchdog_timeout);
277 wl12xx_rearm_tx_watchdog_locked(wl);
278 goto out;
279 }
280
281 /*
282 * AP might cache a frame for a long time for a sleeping station,
283 * so rearm the timer if there's an AP interface with stations. If
284 * Tx is genuinely stuck we will most hopefully discover it when all
285 * stations are removed due to inactivity.
286 */
287 if (wl->active_sta_count) {
288 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
289 " %d stations",
290 wl->conf.tx.tx_watchdog_timeout,
291 wl->active_sta_count);
292 wl12xx_rearm_tx_watchdog_locked(wl);
293 goto out;
294 }
295
296 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297 wl->conf.tx.tx_watchdog_timeout);
298 wl12xx_queue_recovery_work(wl);
299
300 out:
301 mutex_unlock(&wl->mutex);
302 }
303
wlcore_adjust_conf(struct wl1271 * wl)304 static void wlcore_adjust_conf(struct wl1271 *wl)
305 {
306 /* Adjust settings according to optional module parameters */
307
308 /* Firmware Logger params */
309 if (fwlog_mem_blocks != -1) {
310 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
311 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
312 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
313 } else {
314 wl1271_error(
315 "Illegal fwlog_mem_blocks=%d using default %d",
316 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
317 }
318 }
319
320 if (fwlog_param) {
321 if (!strcmp(fwlog_param, "continuous")) {
322 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
323 } else if (!strcmp(fwlog_param, "ondemand")) {
324 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
325 } else if (!strcmp(fwlog_param, "dbgpins")) {
326 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
327 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
328 } else if (!strcmp(fwlog_param, "disable")) {
329 wl->conf.fwlog.mem_blocks = 0;
330 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
331 } else {
332 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
333 }
334 }
335
336 if (bug_on_recovery != -1)
337 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
338
339 if (no_recovery != -1)
340 wl->conf.recovery.no_recovery = (u8) no_recovery;
341 }
342
wl12xx_irq_ps_regulate_link(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,u8 tx_pkts)343 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
344 struct wl12xx_vif *wlvif,
345 u8 hlid, u8 tx_pkts)
346 {
347 bool fw_ps;
348
349 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
350
351 /*
352 * Wake up from high level PS if the STA is asleep with too little
353 * packets in FW or if the STA is awake.
354 */
355 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
356 wl12xx_ps_link_end(wl, wlvif, hlid);
357
358 /*
359 * Start high-level PS if the STA is asleep with enough blocks in FW.
360 * Make an exception if this is the only connected link. In this
361 * case FW-memory congestion is less of a problem.
362 * Note that a single connected STA means 2*ap_count + 1 active links,
363 * since we must account for the global and broadcast AP links
364 * for each AP. The "fw_ps" check assures us the other link is a STA
365 * connected to the AP. Otherwise the FW would not set the PSM bit.
366 */
367 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
368 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
369 wl12xx_ps_link_start(wl, wlvif, hlid, true);
370 }
371
wl12xx_irq_update_links_status(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl_fw_status * status)372 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
373 struct wl12xx_vif *wlvif,
374 struct wl_fw_status *status)
375 {
376 unsigned long cur_fw_ps_map;
377 u8 hlid;
378
379 cur_fw_ps_map = status->link_ps_bitmap;
380 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
381 wl1271_debug(DEBUG_PSM,
382 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
383 wl->ap_fw_ps_map, cur_fw_ps_map,
384 wl->ap_fw_ps_map ^ cur_fw_ps_map);
385
386 wl->ap_fw_ps_map = cur_fw_ps_map;
387 }
388
389 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
390 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
391 wl->links[hlid].allocated_pkts);
392 }
393
wlcore_fw_status(struct wl1271 * wl,struct wl_fw_status * status)394 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
395 {
396 struct wl12xx_vif *wlvif;
397 struct timespec ts;
398 u32 old_tx_blk_count = wl->tx_blocks_available;
399 int avail, freed_blocks;
400 int i;
401 int ret;
402 struct wl1271_link *lnk;
403
404 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
405 wl->raw_fw_status,
406 wl->fw_status_len, false);
407 if (ret < 0)
408 return ret;
409
410 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
411
412 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
413 "drv_rx_counter = %d, tx_results_counter = %d)",
414 status->intr,
415 status->fw_rx_counter,
416 status->drv_rx_counter,
417 status->tx_results_counter);
418
419 for (i = 0; i < NUM_TX_QUEUES; i++) {
420 /* prevent wrap-around in freed-packets counter */
421 wl->tx_allocated_pkts[i] -=
422 (status->counters.tx_released_pkts[i] -
423 wl->tx_pkts_freed[i]) & 0xff;
424
425 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
426 }
427
428
429 for_each_set_bit(i, wl->links_map, wl->num_links) {
430 u8 diff;
431 lnk = &wl->links[i];
432
433 /* prevent wrap-around in freed-packets counter */
434 diff = (status->counters.tx_lnk_free_pkts[i] -
435 lnk->prev_freed_pkts) & 0xff;
436
437 if (diff == 0)
438 continue;
439
440 lnk->allocated_pkts -= diff;
441 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
442
443 /* accumulate the prev_freed_pkts counter */
444 lnk->total_freed_pkts += diff;
445 }
446
447 /* prevent wrap-around in total blocks counter */
448 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
449 freed_blocks = status->total_released_blks -
450 wl->tx_blocks_freed;
451 else
452 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
453 status->total_released_blks;
454
455 wl->tx_blocks_freed = status->total_released_blks;
456
457 wl->tx_allocated_blocks -= freed_blocks;
458
459 /*
460 * If the FW freed some blocks:
461 * If we still have allocated blocks - re-arm the timer, Tx is
462 * not stuck. Otherwise, cancel the timer (no Tx currently).
463 */
464 if (freed_blocks) {
465 if (wl->tx_allocated_blocks)
466 wl12xx_rearm_tx_watchdog_locked(wl);
467 else
468 cancel_delayed_work(&wl->tx_watchdog_work);
469 }
470
471 avail = status->tx_total - wl->tx_allocated_blocks;
472
473 /*
474 * The FW might change the total number of TX memblocks before
475 * we get a notification about blocks being released. Thus, the
476 * available blocks calculation might yield a temporary result
477 * which is lower than the actual available blocks. Keeping in
478 * mind that only blocks that were allocated can be moved from
479 * TX to RX, tx_blocks_available should never decrease here.
480 */
481 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
482 avail);
483
484 /* if more blocks are available now, tx work can be scheduled */
485 if (wl->tx_blocks_available > old_tx_blk_count)
486 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
487
488 /* for AP update num of allocated TX blocks per link and ps status */
489 wl12xx_for_each_wlvif_ap(wl, wlvif) {
490 wl12xx_irq_update_links_status(wl, wlvif, status);
491 }
492
493 /* update the host-chipset time offset */
494 getnstimeofday(&ts);
495 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
496 (s64)(status->fw_localtime);
497
498 wl->fw_fast_lnk_map = status->link_fast_bitmap;
499
500 return 0;
501 }
502
wl1271_flush_deferred_work(struct wl1271 * wl)503 static void wl1271_flush_deferred_work(struct wl1271 *wl)
504 {
505 struct sk_buff *skb;
506
507 /* Pass all received frames to the network stack */
508 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
509 ieee80211_rx_ni(wl->hw, skb);
510
511 /* Return sent skbs to the network stack */
512 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
513 ieee80211_tx_status_ni(wl->hw, skb);
514 }
515
wl1271_netstack_work(struct work_struct * work)516 static void wl1271_netstack_work(struct work_struct *work)
517 {
518 struct wl1271 *wl =
519 container_of(work, struct wl1271, netstack_work);
520
521 do {
522 wl1271_flush_deferred_work(wl);
523 } while (skb_queue_len(&wl->deferred_rx_queue));
524 }
525
526 #define WL1271_IRQ_MAX_LOOPS 256
527
wlcore_irq_locked(struct wl1271 * wl)528 static int wlcore_irq_locked(struct wl1271 *wl)
529 {
530 int ret = 0;
531 u32 intr;
532 int loopcount = WL1271_IRQ_MAX_LOOPS;
533 bool done = false;
534 unsigned int defer_count;
535 unsigned long flags;
536
537 /*
538 * In case edge triggered interrupt must be used, we cannot iterate
539 * more than once without introducing race conditions with the hardirq.
540 */
541 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
542 loopcount = 1;
543
544 wl1271_debug(DEBUG_IRQ, "IRQ work");
545
546 if (unlikely(wl->state != WLCORE_STATE_ON))
547 goto out;
548
549 ret = wl1271_ps_elp_wakeup(wl);
550 if (ret < 0)
551 goto out;
552
553 while (!done && loopcount--) {
554 /*
555 * In order to avoid a race with the hardirq, clear the flag
556 * before acknowledging the chip. Since the mutex is held,
557 * wl1271_ps_elp_wakeup cannot be called concurrently.
558 */
559 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
560 smp_mb__after_atomic();
561
562 ret = wlcore_fw_status(wl, wl->fw_status);
563 if (ret < 0)
564 goto out;
565
566 wlcore_hw_tx_immediate_compl(wl);
567
568 intr = wl->fw_status->intr;
569 intr &= WLCORE_ALL_INTR_MASK;
570 if (!intr) {
571 done = true;
572 continue;
573 }
574
575 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
576 wl1271_error("HW watchdog interrupt received! starting recovery.");
577 wl->watchdog_recovery = true;
578 ret = -EIO;
579
580 /* restarting the chip. ignore any other interrupt. */
581 goto out;
582 }
583
584 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
585 wl1271_error("SW watchdog interrupt received! "
586 "starting recovery.");
587 wl->watchdog_recovery = true;
588 ret = -EIO;
589
590 /* restarting the chip. ignore any other interrupt. */
591 goto out;
592 }
593
594 if (likely(intr & WL1271_ACX_INTR_DATA)) {
595 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
596
597 ret = wlcore_rx(wl, wl->fw_status);
598 if (ret < 0)
599 goto out;
600
601 /* Check if any tx blocks were freed */
602 spin_lock_irqsave(&wl->wl_lock, flags);
603 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
604 wl1271_tx_total_queue_count(wl) > 0) {
605 spin_unlock_irqrestore(&wl->wl_lock, flags);
606 /*
607 * In order to avoid starvation of the TX path,
608 * call the work function directly.
609 */
610 ret = wlcore_tx_work_locked(wl);
611 if (ret < 0)
612 goto out;
613 } else {
614 spin_unlock_irqrestore(&wl->wl_lock, flags);
615 }
616
617 /* check for tx results */
618 ret = wlcore_hw_tx_delayed_compl(wl);
619 if (ret < 0)
620 goto out;
621
622 /* Make sure the deferred queues don't get too long */
623 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
624 skb_queue_len(&wl->deferred_rx_queue);
625 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
626 wl1271_flush_deferred_work(wl);
627 }
628
629 if (intr & WL1271_ACX_INTR_EVENT_A) {
630 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
631 ret = wl1271_event_handle(wl, 0);
632 if (ret < 0)
633 goto out;
634 }
635
636 if (intr & WL1271_ACX_INTR_EVENT_B) {
637 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
638 ret = wl1271_event_handle(wl, 1);
639 if (ret < 0)
640 goto out;
641 }
642
643 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
644 wl1271_debug(DEBUG_IRQ,
645 "WL1271_ACX_INTR_INIT_COMPLETE");
646
647 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
648 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
649 }
650
651 wl1271_ps_elp_sleep(wl);
652
653 out:
654 return ret;
655 }
656
wlcore_irq(int irq,void * cookie)657 static irqreturn_t wlcore_irq(int irq, void *cookie)
658 {
659 int ret;
660 unsigned long flags;
661 struct wl1271 *wl = cookie;
662
663 /* complete the ELP completion */
664 spin_lock_irqsave(&wl->wl_lock, flags);
665 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
666 if (wl->elp_compl) {
667 complete(wl->elp_compl);
668 wl->elp_compl = NULL;
669 }
670
671 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
672 /* don't enqueue a work right now. mark it as pending */
673 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
674 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
675 disable_irq_nosync(wl->irq);
676 pm_wakeup_event(wl->dev, 0);
677 spin_unlock_irqrestore(&wl->wl_lock, flags);
678 return IRQ_HANDLED;
679 }
680 spin_unlock_irqrestore(&wl->wl_lock, flags);
681
682 /* TX might be handled here, avoid redundant work */
683 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
684 cancel_work_sync(&wl->tx_work);
685
686 mutex_lock(&wl->mutex);
687
688 ret = wlcore_irq_locked(wl);
689 if (ret)
690 wl12xx_queue_recovery_work(wl);
691
692 spin_lock_irqsave(&wl->wl_lock, flags);
693 /* In case TX was not handled here, queue TX work */
694 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
695 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
696 wl1271_tx_total_queue_count(wl) > 0)
697 ieee80211_queue_work(wl->hw, &wl->tx_work);
698 spin_unlock_irqrestore(&wl->wl_lock, flags);
699
700 mutex_unlock(&wl->mutex);
701
702 return IRQ_HANDLED;
703 }
704
705 struct vif_counter_data {
706 u8 counter;
707
708 struct ieee80211_vif *cur_vif;
709 bool cur_vif_running;
710 };
711
wl12xx_vif_count_iter(void * data,u8 * mac,struct ieee80211_vif * vif)712 static void wl12xx_vif_count_iter(void *data, u8 *mac,
713 struct ieee80211_vif *vif)
714 {
715 struct vif_counter_data *counter = data;
716
717 counter->counter++;
718 if (counter->cur_vif == vif)
719 counter->cur_vif_running = true;
720 }
721
722 /* caller must not hold wl->mutex, as it might deadlock */
wl12xx_get_vif_count(struct ieee80211_hw * hw,struct ieee80211_vif * cur_vif,struct vif_counter_data * data)723 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
724 struct ieee80211_vif *cur_vif,
725 struct vif_counter_data *data)
726 {
727 memset(data, 0, sizeof(*data));
728 data->cur_vif = cur_vif;
729
730 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
731 wl12xx_vif_count_iter, data);
732 }
733
wl12xx_fetch_firmware(struct wl1271 * wl,bool plt)734 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
735 {
736 const struct firmware *fw;
737 const char *fw_name;
738 enum wl12xx_fw_type fw_type;
739 int ret;
740
741 if (plt) {
742 fw_type = WL12XX_FW_TYPE_PLT;
743 fw_name = wl->plt_fw_name;
744 } else {
745 /*
746 * we can't call wl12xx_get_vif_count() here because
747 * wl->mutex is taken, so use the cached last_vif_count value
748 */
749 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
750 fw_type = WL12XX_FW_TYPE_MULTI;
751 fw_name = wl->mr_fw_name;
752 } else {
753 fw_type = WL12XX_FW_TYPE_NORMAL;
754 fw_name = wl->sr_fw_name;
755 }
756 }
757
758 if (wl->fw_type == fw_type)
759 return 0;
760
761 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
762
763 ret = request_firmware(&fw, fw_name, wl->dev);
764
765 if (ret < 0) {
766 wl1271_error("could not get firmware %s: %d", fw_name, ret);
767 return ret;
768 }
769
770 if (fw->size % 4) {
771 wl1271_error("firmware size is not multiple of 32 bits: %zu",
772 fw->size);
773 ret = -EILSEQ;
774 goto out;
775 }
776
777 vfree(wl->fw);
778 wl->fw_type = WL12XX_FW_TYPE_NONE;
779 wl->fw_len = fw->size;
780 wl->fw = vmalloc(wl->fw_len);
781
782 if (!wl->fw) {
783 wl1271_error("could not allocate memory for the firmware");
784 ret = -ENOMEM;
785 goto out;
786 }
787
788 memcpy(wl->fw, fw->data, wl->fw_len);
789 ret = 0;
790 wl->fw_type = fw_type;
791 out:
792 release_firmware(fw);
793
794 return ret;
795 }
796
wl12xx_queue_recovery_work(struct wl1271 * wl)797 void wl12xx_queue_recovery_work(struct wl1271 *wl)
798 {
799 /* Avoid a recursive recovery */
800 if (wl->state == WLCORE_STATE_ON) {
801 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
802 &wl->flags));
803
804 wl->state = WLCORE_STATE_RESTARTING;
805 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
806 wl1271_ps_elp_wakeup(wl);
807 wlcore_disable_interrupts_nosync(wl);
808 ieee80211_queue_work(wl->hw, &wl->recovery_work);
809 }
810 }
811
wl12xx_copy_fwlog(struct wl1271 * wl,u8 * memblock,size_t maxlen)812 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
813 {
814 size_t len;
815
816 /* Make sure we have enough room */
817 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
818
819 /* Fill the FW log file, consumed by the sysfs fwlog entry */
820 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
821 wl->fwlog_size += len;
822
823 return len;
824 }
825
wl12xx_read_fwlog_panic(struct wl1271 * wl)826 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
827 {
828 struct wlcore_partition_set part, old_part;
829 u32 addr;
830 u32 offset;
831 u32 end_of_log;
832 u8 *block;
833 int ret;
834
835 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
836 (wl->conf.fwlog.mem_blocks == 0))
837 return;
838
839 wl1271_info("Reading FW panic log");
840
841 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
842 if (!block)
843 return;
844
845 /*
846 * Make sure the chip is awake and the logger isn't active.
847 * Do not send a stop fwlog command if the fw is hanged or if
848 * dbgpins are used (due to some fw bug).
849 */
850 if (wl1271_ps_elp_wakeup(wl))
851 goto out;
852 if (!wl->watchdog_recovery &&
853 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
854 wl12xx_cmd_stop_fwlog(wl);
855
856 /* Read the first memory block address */
857 ret = wlcore_fw_status(wl, wl->fw_status);
858 if (ret < 0)
859 goto out;
860
861 addr = wl->fw_status->log_start_addr;
862 if (!addr)
863 goto out;
864
865 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
866 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
867 end_of_log = wl->fwlog_end;
868 } else {
869 offset = sizeof(addr);
870 end_of_log = addr;
871 }
872
873 old_part = wl->curr_part;
874 memset(&part, 0, sizeof(part));
875
876 /* Traverse the memory blocks linked list */
877 do {
878 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
879 part.mem.size = PAGE_SIZE;
880
881 ret = wlcore_set_partition(wl, &part);
882 if (ret < 0) {
883 wl1271_error("%s: set_partition start=0x%X size=%d",
884 __func__, part.mem.start, part.mem.size);
885 goto out;
886 }
887
888 memset(block, 0, wl->fw_mem_block_size);
889 ret = wlcore_read_hwaddr(wl, addr, block,
890 wl->fw_mem_block_size, false);
891
892 if (ret < 0)
893 goto out;
894
895 /*
896 * Memory blocks are linked to one another. The first 4 bytes
897 * of each memory block hold the hardware address of the next
898 * one. The last memory block points to the first one in
899 * on demand mode and is equal to 0x2000000 in continuous mode.
900 */
901 addr = le32_to_cpup((__le32 *)block);
902
903 if (!wl12xx_copy_fwlog(wl, block + offset,
904 wl->fw_mem_block_size - offset))
905 break;
906 } while (addr && (addr != end_of_log));
907
908 wake_up_interruptible(&wl->fwlog_waitq);
909
910 out:
911 kfree(block);
912 wlcore_set_partition(wl, &old_part);
913 }
914
wlcore_save_freed_pkts(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,struct ieee80211_sta * sta)915 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
916 u8 hlid, struct ieee80211_sta *sta)
917 {
918 struct wl1271_station *wl_sta;
919 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
920
921 wl_sta = (void *)sta->drv_priv;
922 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
923
924 /*
925 * increment the initial seq number on recovery to account for
926 * transmitted packets that we haven't yet got in the FW status
927 */
928 if (wlvif->encryption_type == KEY_GEM)
929 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
930
931 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
932 wl_sta->total_freed_pkts += sqn_recovery_padding;
933 }
934
wlcore_save_freed_pkts_addr(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,const u8 * addr)935 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
936 struct wl12xx_vif *wlvif,
937 u8 hlid, const u8 *addr)
938 {
939 struct ieee80211_sta *sta;
940 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
941
942 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
943 is_zero_ether_addr(addr)))
944 return;
945
946 rcu_read_lock();
947 sta = ieee80211_find_sta(vif, addr);
948 if (sta)
949 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
950 rcu_read_unlock();
951 }
952
wlcore_print_recovery(struct wl1271 * wl)953 static void wlcore_print_recovery(struct wl1271 *wl)
954 {
955 u32 pc = 0;
956 u32 hint_sts = 0;
957 int ret;
958
959 wl1271_info("Hardware recovery in progress. FW ver: %s",
960 wl->chip.fw_ver_str);
961
962 /* change partitions momentarily so we can read the FW pc */
963 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
964 if (ret < 0)
965 return;
966
967 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
968 if (ret < 0)
969 return;
970
971 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
972 if (ret < 0)
973 return;
974
975 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
976 pc, hint_sts, ++wl->recovery_count);
977
978 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
979 }
980
981
wl1271_recovery_work(struct work_struct * work)982 static void wl1271_recovery_work(struct work_struct *work)
983 {
984 struct wl1271 *wl =
985 container_of(work, struct wl1271, recovery_work);
986 struct wl12xx_vif *wlvif;
987 struct ieee80211_vif *vif;
988
989 mutex_lock(&wl->mutex);
990
991 if (wl->state == WLCORE_STATE_OFF || wl->plt)
992 goto out_unlock;
993
994 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
995 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
996 wl12xx_read_fwlog_panic(wl);
997 wlcore_print_recovery(wl);
998 }
999
1000 BUG_ON(wl->conf.recovery.bug_on_recovery &&
1001 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1002
1003 if (wl->conf.recovery.no_recovery) {
1004 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1005 goto out_unlock;
1006 }
1007
1008 /* Prevent spurious TX during FW restart */
1009 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1010
1011 /* reboot the chipset */
1012 while (!list_empty(&wl->wlvif_list)) {
1013 wlvif = list_first_entry(&wl->wlvif_list,
1014 struct wl12xx_vif, list);
1015 vif = wl12xx_wlvif_to_vif(wlvif);
1016
1017 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1018 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1019 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1020 vif->bss_conf.bssid);
1021 }
1022
1023 __wl1271_op_remove_interface(wl, vif, false);
1024 }
1025
1026 wlcore_op_stop_locked(wl);
1027
1028 ieee80211_restart_hw(wl->hw);
1029
1030 /*
1031 * Its safe to enable TX now - the queues are stopped after a request
1032 * to restart the HW.
1033 */
1034 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1035
1036 out_unlock:
1037 wl->watchdog_recovery = false;
1038 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1039 mutex_unlock(&wl->mutex);
1040 }
1041
wlcore_fw_wakeup(struct wl1271 * wl)1042 static int wlcore_fw_wakeup(struct wl1271 *wl)
1043 {
1044 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1045 }
1046
wl1271_setup(struct wl1271 * wl)1047 static int wl1271_setup(struct wl1271 *wl)
1048 {
1049 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1050 if (!wl->raw_fw_status)
1051 goto err;
1052
1053 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1054 if (!wl->fw_status)
1055 goto err;
1056
1057 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1058 if (!wl->tx_res_if)
1059 goto err;
1060
1061 return 0;
1062 err:
1063 kfree(wl->fw_status);
1064 kfree(wl->raw_fw_status);
1065 return -ENOMEM;
1066 }
1067
wl12xx_set_power_on(struct wl1271 * wl)1068 static int wl12xx_set_power_on(struct wl1271 *wl)
1069 {
1070 int ret;
1071
1072 msleep(WL1271_PRE_POWER_ON_SLEEP);
1073 ret = wl1271_power_on(wl);
1074 if (ret < 0)
1075 goto out;
1076 msleep(WL1271_POWER_ON_SLEEP);
1077 wl1271_io_reset(wl);
1078 wl1271_io_init(wl);
1079
1080 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1081 if (ret < 0)
1082 goto fail;
1083
1084 /* ELP module wake up */
1085 ret = wlcore_fw_wakeup(wl);
1086 if (ret < 0)
1087 goto fail;
1088
1089 out:
1090 return ret;
1091
1092 fail:
1093 wl1271_power_off(wl);
1094 return ret;
1095 }
1096
wl12xx_chip_wakeup(struct wl1271 * wl,bool plt)1097 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1098 {
1099 int ret = 0;
1100
1101 ret = wl12xx_set_power_on(wl);
1102 if (ret < 0)
1103 goto out;
1104
1105 /*
1106 * For wl127x based devices we could use the default block
1107 * size (512 bytes), but due to a bug in the sdio driver, we
1108 * need to set it explicitly after the chip is powered on. To
1109 * simplify the code and since the performance impact is
1110 * negligible, we use the same block size for all different
1111 * chip types.
1112 *
1113 * Check if the bus supports blocksize alignment and, if it
1114 * doesn't, make sure we don't have the quirk.
1115 */
1116 if (!wl1271_set_block_size(wl))
1117 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1118
1119 /* TODO: make sure the lower driver has set things up correctly */
1120
1121 ret = wl1271_setup(wl);
1122 if (ret < 0)
1123 goto out;
1124
1125 ret = wl12xx_fetch_firmware(wl, plt);
1126 if (ret < 0)
1127 goto out;
1128
1129 out:
1130 return ret;
1131 }
1132
wl1271_plt_start(struct wl1271 * wl,const enum plt_mode plt_mode)1133 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1134 {
1135 int retries = WL1271_BOOT_RETRIES;
1136 struct wiphy *wiphy = wl->hw->wiphy;
1137
1138 static const char* const PLT_MODE[] = {
1139 "PLT_OFF",
1140 "PLT_ON",
1141 "PLT_FEM_DETECT",
1142 "PLT_CHIP_AWAKE"
1143 };
1144
1145 int ret;
1146
1147 mutex_lock(&wl->mutex);
1148
1149 wl1271_notice("power up");
1150
1151 if (wl->state != WLCORE_STATE_OFF) {
1152 wl1271_error("cannot go into PLT state because not "
1153 "in off state: %d", wl->state);
1154 ret = -EBUSY;
1155 goto out;
1156 }
1157
1158 /* Indicate to lower levels that we are now in PLT mode */
1159 wl->plt = true;
1160 wl->plt_mode = plt_mode;
1161
1162 while (retries) {
1163 retries--;
1164 ret = wl12xx_chip_wakeup(wl, true);
1165 if (ret < 0)
1166 goto power_off;
1167
1168 if (plt_mode != PLT_CHIP_AWAKE) {
1169 ret = wl->ops->plt_init(wl);
1170 if (ret < 0)
1171 goto power_off;
1172 }
1173
1174 wl->state = WLCORE_STATE_ON;
1175 wl1271_notice("firmware booted in PLT mode %s (%s)",
1176 PLT_MODE[plt_mode],
1177 wl->chip.fw_ver_str);
1178
1179 /* update hw/fw version info in wiphy struct */
1180 wiphy->hw_version = wl->chip.id;
1181 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1182 sizeof(wiphy->fw_version));
1183
1184 goto out;
1185
1186 power_off:
1187 wl1271_power_off(wl);
1188 }
1189
1190 wl->plt = false;
1191 wl->plt_mode = PLT_OFF;
1192
1193 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1194 WL1271_BOOT_RETRIES);
1195 out:
1196 mutex_unlock(&wl->mutex);
1197
1198 return ret;
1199 }
1200
wl1271_plt_stop(struct wl1271 * wl)1201 int wl1271_plt_stop(struct wl1271 *wl)
1202 {
1203 int ret = 0;
1204
1205 wl1271_notice("power down");
1206
1207 /*
1208 * Interrupts must be disabled before setting the state to OFF.
1209 * Otherwise, the interrupt handler might be called and exit without
1210 * reading the interrupt status.
1211 */
1212 wlcore_disable_interrupts(wl);
1213 mutex_lock(&wl->mutex);
1214 if (!wl->plt) {
1215 mutex_unlock(&wl->mutex);
1216
1217 /*
1218 * This will not necessarily enable interrupts as interrupts
1219 * may have been disabled when op_stop was called. It will,
1220 * however, balance the above call to disable_interrupts().
1221 */
1222 wlcore_enable_interrupts(wl);
1223
1224 wl1271_error("cannot power down because not in PLT "
1225 "state: %d", wl->state);
1226 ret = -EBUSY;
1227 goto out;
1228 }
1229
1230 mutex_unlock(&wl->mutex);
1231
1232 wl1271_flush_deferred_work(wl);
1233 cancel_work_sync(&wl->netstack_work);
1234 cancel_work_sync(&wl->recovery_work);
1235 cancel_delayed_work_sync(&wl->elp_work);
1236 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1237
1238 mutex_lock(&wl->mutex);
1239 wl1271_power_off(wl);
1240 wl->flags = 0;
1241 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1242 wl->state = WLCORE_STATE_OFF;
1243 wl->plt = false;
1244 wl->plt_mode = PLT_OFF;
1245 wl->rx_counter = 0;
1246 mutex_unlock(&wl->mutex);
1247
1248 out:
1249 return ret;
1250 }
1251
wl1271_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)1252 static void wl1271_op_tx(struct ieee80211_hw *hw,
1253 struct ieee80211_tx_control *control,
1254 struct sk_buff *skb)
1255 {
1256 struct wl1271 *wl = hw->priv;
1257 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1258 struct ieee80211_vif *vif = info->control.vif;
1259 struct wl12xx_vif *wlvif = NULL;
1260 unsigned long flags;
1261 int q, mapping;
1262 u8 hlid;
1263
1264 if (!vif) {
1265 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1266 ieee80211_free_txskb(hw, skb);
1267 return;
1268 }
1269
1270 wlvif = wl12xx_vif_to_data(vif);
1271 mapping = skb_get_queue_mapping(skb);
1272 q = wl1271_tx_get_queue(mapping);
1273
1274 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1275
1276 spin_lock_irqsave(&wl->wl_lock, flags);
1277
1278 /*
1279 * drop the packet if the link is invalid or the queue is stopped
1280 * for any reason but watermark. Watermark is a "soft"-stop so we
1281 * allow these packets through.
1282 */
1283 if (hlid == WL12XX_INVALID_LINK_ID ||
1284 (!test_bit(hlid, wlvif->links_map)) ||
1285 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1286 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1287 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1288 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1289 ieee80211_free_txskb(hw, skb);
1290 goto out;
1291 }
1292
1293 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1294 hlid, q, skb->len);
1295 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1296
1297 wl->tx_queue_count[q]++;
1298 wlvif->tx_queue_count[q]++;
1299
1300 /*
1301 * The workqueue is slow to process the tx_queue and we need stop
1302 * the queue here, otherwise the queue will get too long.
1303 */
1304 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1305 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1306 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1307 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1308 wlcore_stop_queue_locked(wl, wlvif, q,
1309 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1310 }
1311
1312 /*
1313 * The chip specific setup must run before the first TX packet -
1314 * before that, the tx_work will not be initialized!
1315 */
1316
1317 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1318 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1319 ieee80211_queue_work(wl->hw, &wl->tx_work);
1320
1321 out:
1322 spin_unlock_irqrestore(&wl->wl_lock, flags);
1323 }
1324
wl1271_tx_dummy_packet(struct wl1271 * wl)1325 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1326 {
1327 unsigned long flags;
1328 int q;
1329
1330 /* no need to queue a new dummy packet if one is already pending */
1331 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1332 return 0;
1333
1334 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1335
1336 spin_lock_irqsave(&wl->wl_lock, flags);
1337 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1338 wl->tx_queue_count[q]++;
1339 spin_unlock_irqrestore(&wl->wl_lock, flags);
1340
1341 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1342 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1343 return wlcore_tx_work_locked(wl);
1344
1345 /*
1346 * If the FW TX is busy, TX work will be scheduled by the threaded
1347 * interrupt handler function
1348 */
1349 return 0;
1350 }
1351
1352 /*
1353 * The size of the dummy packet should be at least 1400 bytes. However, in
1354 * order to minimize the number of bus transactions, aligning it to 512 bytes
1355 * boundaries could be beneficial, performance wise
1356 */
1357 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1358
wl12xx_alloc_dummy_packet(struct wl1271 * wl)1359 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1360 {
1361 struct sk_buff *skb;
1362 struct ieee80211_hdr_3addr *hdr;
1363 unsigned int dummy_packet_size;
1364
1365 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1366 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1367
1368 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1369 if (!skb) {
1370 wl1271_warning("Failed to allocate a dummy packet skb");
1371 return NULL;
1372 }
1373
1374 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1375
1376 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1377 memset(hdr, 0, sizeof(*hdr));
1378 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1379 IEEE80211_STYPE_NULLFUNC |
1380 IEEE80211_FCTL_TODS);
1381
1382 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1383
1384 /* Dummy packets require the TID to be management */
1385 skb->priority = WL1271_TID_MGMT;
1386
1387 /* Initialize all fields that might be used */
1388 skb_set_queue_mapping(skb, 0);
1389 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1390
1391 return skb;
1392 }
1393
1394
1395 #ifdef CONFIG_PM
1396 static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern * p)1397 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1398 {
1399 int num_fields = 0, in_field = 0, fields_size = 0;
1400 int i, pattern_len = 0;
1401
1402 if (!p->mask) {
1403 wl1271_warning("No mask in WoWLAN pattern");
1404 return -EINVAL;
1405 }
1406
1407 /*
1408 * The pattern is broken up into segments of bytes at different offsets
1409 * that need to be checked by the FW filter. Each segment is called
1410 * a field in the FW API. We verify that the total number of fields
1411 * required for this pattern won't exceed FW limits (8)
1412 * as well as the total fields buffer won't exceed the FW limit.
1413 * Note that if there's a pattern which crosses Ethernet/IP header
1414 * boundary a new field is required.
1415 */
1416 for (i = 0; i < p->pattern_len; i++) {
1417 if (test_bit(i, (unsigned long *)p->mask)) {
1418 if (!in_field) {
1419 in_field = 1;
1420 pattern_len = 1;
1421 } else {
1422 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1423 num_fields++;
1424 fields_size += pattern_len +
1425 RX_FILTER_FIELD_OVERHEAD;
1426 pattern_len = 1;
1427 } else
1428 pattern_len++;
1429 }
1430 } else {
1431 if (in_field) {
1432 in_field = 0;
1433 fields_size += pattern_len +
1434 RX_FILTER_FIELD_OVERHEAD;
1435 num_fields++;
1436 }
1437 }
1438 }
1439
1440 if (in_field) {
1441 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1442 num_fields++;
1443 }
1444
1445 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1446 wl1271_warning("RX Filter too complex. Too many segments");
1447 return -EINVAL;
1448 }
1449
1450 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1451 wl1271_warning("RX filter pattern is too big");
1452 return -E2BIG;
1453 }
1454
1455 return 0;
1456 }
1457
wl1271_rx_filter_alloc(void)1458 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1459 {
1460 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1461 }
1462
wl1271_rx_filter_free(struct wl12xx_rx_filter * filter)1463 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1464 {
1465 int i;
1466
1467 if (filter == NULL)
1468 return;
1469
1470 for (i = 0; i < filter->num_fields; i++)
1471 kfree(filter->fields[i].pattern);
1472
1473 kfree(filter);
1474 }
1475
wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter * filter,u16 offset,u8 flags,const u8 * pattern,u8 len)1476 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1477 u16 offset, u8 flags,
1478 const u8 *pattern, u8 len)
1479 {
1480 struct wl12xx_rx_filter_field *field;
1481
1482 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1483 wl1271_warning("Max fields per RX filter. can't alloc another");
1484 return -EINVAL;
1485 }
1486
1487 field = &filter->fields[filter->num_fields];
1488
1489 field->pattern = kzalloc(len, GFP_KERNEL);
1490 if (!field->pattern) {
1491 wl1271_warning("Failed to allocate RX filter pattern");
1492 return -ENOMEM;
1493 }
1494
1495 filter->num_fields++;
1496
1497 field->offset = cpu_to_le16(offset);
1498 field->flags = flags;
1499 field->len = len;
1500 memcpy(field->pattern, pattern, len);
1501
1502 return 0;
1503 }
1504
wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter * filter)1505 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1506 {
1507 int i, fields_size = 0;
1508
1509 for (i = 0; i < filter->num_fields; i++)
1510 fields_size += filter->fields[i].len +
1511 sizeof(struct wl12xx_rx_filter_field) -
1512 sizeof(u8 *);
1513
1514 return fields_size;
1515 }
1516
wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter * filter,u8 * buf)1517 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1518 u8 *buf)
1519 {
1520 int i;
1521 struct wl12xx_rx_filter_field *field;
1522
1523 for (i = 0; i < filter->num_fields; i++) {
1524 field = (struct wl12xx_rx_filter_field *)buf;
1525
1526 field->offset = filter->fields[i].offset;
1527 field->flags = filter->fields[i].flags;
1528 field->len = filter->fields[i].len;
1529
1530 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1531 buf += sizeof(struct wl12xx_rx_filter_field) -
1532 sizeof(u8 *) + field->len;
1533 }
1534 }
1535
1536 /*
1537 * Allocates an RX filter returned through f
1538 * which needs to be freed using rx_filter_free()
1539 */
1540 static int
wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern * p,struct wl12xx_rx_filter ** f)1541 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1542 struct wl12xx_rx_filter **f)
1543 {
1544 int i, j, ret = 0;
1545 struct wl12xx_rx_filter *filter;
1546 u16 offset;
1547 u8 flags, len;
1548
1549 filter = wl1271_rx_filter_alloc();
1550 if (!filter) {
1551 wl1271_warning("Failed to alloc rx filter");
1552 ret = -ENOMEM;
1553 goto err;
1554 }
1555
1556 i = 0;
1557 while (i < p->pattern_len) {
1558 if (!test_bit(i, (unsigned long *)p->mask)) {
1559 i++;
1560 continue;
1561 }
1562
1563 for (j = i; j < p->pattern_len; j++) {
1564 if (!test_bit(j, (unsigned long *)p->mask))
1565 break;
1566
1567 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1568 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1569 break;
1570 }
1571
1572 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1573 offset = i;
1574 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1575 } else {
1576 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1577 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1578 }
1579
1580 len = j - i;
1581
1582 ret = wl1271_rx_filter_alloc_field(filter,
1583 offset,
1584 flags,
1585 &p->pattern[i], len);
1586 if (ret)
1587 goto err;
1588
1589 i = j;
1590 }
1591
1592 filter->action = FILTER_SIGNAL;
1593
1594 *f = filter;
1595 return 0;
1596
1597 err:
1598 wl1271_rx_filter_free(filter);
1599 *f = NULL;
1600
1601 return ret;
1602 }
1603
wl1271_configure_wowlan(struct wl1271 * wl,struct cfg80211_wowlan * wow)1604 static int wl1271_configure_wowlan(struct wl1271 *wl,
1605 struct cfg80211_wowlan *wow)
1606 {
1607 int i, ret;
1608
1609 if (!wow || wow->any || !wow->n_patterns) {
1610 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1611 FILTER_SIGNAL);
1612 if (ret)
1613 goto out;
1614
1615 ret = wl1271_rx_filter_clear_all(wl);
1616 if (ret)
1617 goto out;
1618
1619 return 0;
1620 }
1621
1622 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1623 return -EINVAL;
1624
1625 /* Validate all incoming patterns before clearing current FW state */
1626 for (i = 0; i < wow->n_patterns; i++) {
1627 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1628 if (ret) {
1629 wl1271_warning("Bad wowlan pattern %d", i);
1630 return ret;
1631 }
1632 }
1633
1634 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1635 if (ret)
1636 goto out;
1637
1638 ret = wl1271_rx_filter_clear_all(wl);
1639 if (ret)
1640 goto out;
1641
1642 /* Translate WoWLAN patterns into filters */
1643 for (i = 0; i < wow->n_patterns; i++) {
1644 struct cfg80211_pkt_pattern *p;
1645 struct wl12xx_rx_filter *filter = NULL;
1646
1647 p = &wow->patterns[i];
1648
1649 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1650 if (ret) {
1651 wl1271_warning("Failed to create an RX filter from "
1652 "wowlan pattern %d", i);
1653 goto out;
1654 }
1655
1656 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1657
1658 wl1271_rx_filter_free(filter);
1659 if (ret)
1660 goto out;
1661 }
1662
1663 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1664
1665 out:
1666 return ret;
1667 }
1668
wl1271_configure_suspend_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1669 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1670 struct wl12xx_vif *wlvif,
1671 struct cfg80211_wowlan *wow)
1672 {
1673 int ret = 0;
1674
1675 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1676 goto out;
1677
1678 ret = wl1271_configure_wowlan(wl, wow);
1679 if (ret < 0)
1680 goto out;
1681
1682 if ((wl->conf.conn.suspend_wake_up_event ==
1683 wl->conf.conn.wake_up_event) &&
1684 (wl->conf.conn.suspend_listen_interval ==
1685 wl->conf.conn.listen_interval))
1686 goto out;
1687
1688 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1689 wl->conf.conn.suspend_wake_up_event,
1690 wl->conf.conn.suspend_listen_interval);
1691
1692 if (ret < 0)
1693 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1694 out:
1695 return ret;
1696
1697 }
1698
wl1271_configure_suspend_ap(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1699 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1700 struct wl12xx_vif *wlvif,
1701 struct cfg80211_wowlan *wow)
1702 {
1703 int ret = 0;
1704
1705 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1706 goto out;
1707
1708 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1709 if (ret < 0)
1710 goto out;
1711
1712 ret = wl1271_configure_wowlan(wl, wow);
1713 if (ret < 0)
1714 goto out;
1715
1716 out:
1717 return ret;
1718
1719 }
1720
wl1271_configure_suspend(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1721 static int wl1271_configure_suspend(struct wl1271 *wl,
1722 struct wl12xx_vif *wlvif,
1723 struct cfg80211_wowlan *wow)
1724 {
1725 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1726 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1727 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1728 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1729 return 0;
1730 }
1731
wl1271_configure_resume(struct wl1271 * wl,struct wl12xx_vif * wlvif)1732 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1733 {
1734 int ret = 0;
1735 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1736 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1737
1738 if ((!is_ap) && (!is_sta))
1739 return;
1740
1741 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1742 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1743 return;
1744
1745 wl1271_configure_wowlan(wl, NULL);
1746
1747 if (is_sta) {
1748 if ((wl->conf.conn.suspend_wake_up_event ==
1749 wl->conf.conn.wake_up_event) &&
1750 (wl->conf.conn.suspend_listen_interval ==
1751 wl->conf.conn.listen_interval))
1752 return;
1753
1754 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1755 wl->conf.conn.wake_up_event,
1756 wl->conf.conn.listen_interval);
1757
1758 if (ret < 0)
1759 wl1271_error("resume: wake up conditions failed: %d",
1760 ret);
1761
1762 } else if (is_ap) {
1763 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1764 }
1765 }
1766
wl1271_op_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wow)1767 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1768 struct cfg80211_wowlan *wow)
1769 {
1770 struct wl1271 *wl = hw->priv;
1771 struct wl12xx_vif *wlvif;
1772 int ret;
1773
1774 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1775 WARN_ON(!wow);
1776
1777 /* we want to perform the recovery before suspending */
1778 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1779 wl1271_warning("postponing suspend to perform recovery");
1780 return -EBUSY;
1781 }
1782
1783 wl1271_tx_flush(wl);
1784
1785 mutex_lock(&wl->mutex);
1786
1787 ret = wl1271_ps_elp_wakeup(wl);
1788 if (ret < 0) {
1789 mutex_unlock(&wl->mutex);
1790 return ret;
1791 }
1792
1793 wl->wow_enabled = true;
1794 wl12xx_for_each_wlvif(wl, wlvif) {
1795 if (wlcore_is_p2p_mgmt(wlvif))
1796 continue;
1797
1798 ret = wl1271_configure_suspend(wl, wlvif, wow);
1799 if (ret < 0) {
1800 mutex_unlock(&wl->mutex);
1801 wl1271_warning("couldn't prepare device to suspend");
1802 return ret;
1803 }
1804 }
1805
1806 /* disable fast link flow control notifications from FW */
1807 ret = wlcore_hw_interrupt_notify(wl, false);
1808 if (ret < 0)
1809 goto out_sleep;
1810
1811 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1812 ret = wlcore_hw_rx_ba_filter(wl,
1813 !!wl->conf.conn.suspend_rx_ba_activity);
1814 if (ret < 0)
1815 goto out_sleep;
1816
1817 out_sleep:
1818 wl1271_ps_elp_sleep(wl);
1819 mutex_unlock(&wl->mutex);
1820
1821 if (ret < 0) {
1822 wl1271_warning("couldn't prepare device to suspend");
1823 return ret;
1824 }
1825
1826 /* flush any remaining work */
1827 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1828
1829 /*
1830 * disable and re-enable interrupts in order to flush
1831 * the threaded_irq
1832 */
1833 wlcore_disable_interrupts(wl);
1834
1835 /*
1836 * set suspended flag to avoid triggering a new threaded_irq
1837 * work. no need for spinlock as interrupts are disabled.
1838 */
1839 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1840
1841 wlcore_enable_interrupts(wl);
1842 flush_work(&wl->tx_work);
1843 flush_delayed_work(&wl->elp_work);
1844
1845 /*
1846 * Cancel the watchdog even if above tx_flush failed. We will detect
1847 * it on resume anyway.
1848 */
1849 cancel_delayed_work(&wl->tx_watchdog_work);
1850
1851 return 0;
1852 }
1853
wl1271_op_resume(struct ieee80211_hw * hw)1854 static int wl1271_op_resume(struct ieee80211_hw *hw)
1855 {
1856 struct wl1271 *wl = hw->priv;
1857 struct wl12xx_vif *wlvif;
1858 unsigned long flags;
1859 bool run_irq_work = false, pending_recovery;
1860 int ret;
1861
1862 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1863 wl->wow_enabled);
1864 WARN_ON(!wl->wow_enabled);
1865
1866 /*
1867 * re-enable irq_work enqueuing, and call irq_work directly if
1868 * there is a pending work.
1869 */
1870 spin_lock_irqsave(&wl->wl_lock, flags);
1871 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1872 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1873 run_irq_work = true;
1874 spin_unlock_irqrestore(&wl->wl_lock, flags);
1875
1876 mutex_lock(&wl->mutex);
1877
1878 /* test the recovery flag before calling any SDIO functions */
1879 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1880 &wl->flags);
1881
1882 if (run_irq_work) {
1883 wl1271_debug(DEBUG_MAC80211,
1884 "run postponed irq_work directly");
1885
1886 /* don't talk to the HW if recovery is pending */
1887 if (!pending_recovery) {
1888 ret = wlcore_irq_locked(wl);
1889 if (ret)
1890 wl12xx_queue_recovery_work(wl);
1891 }
1892
1893 wlcore_enable_interrupts(wl);
1894 }
1895
1896 if (pending_recovery) {
1897 wl1271_warning("queuing forgotten recovery on resume");
1898 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1899 goto out_sleep;
1900 }
1901
1902 ret = wl1271_ps_elp_wakeup(wl);
1903 if (ret < 0)
1904 goto out;
1905
1906 wl12xx_for_each_wlvif(wl, wlvif) {
1907 if (wlcore_is_p2p_mgmt(wlvif))
1908 continue;
1909
1910 wl1271_configure_resume(wl, wlvif);
1911 }
1912
1913 ret = wlcore_hw_interrupt_notify(wl, true);
1914 if (ret < 0)
1915 goto out_sleep;
1916
1917 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1918 ret = wlcore_hw_rx_ba_filter(wl, false);
1919 if (ret < 0)
1920 goto out_sleep;
1921
1922 out_sleep:
1923 wl1271_ps_elp_sleep(wl);
1924
1925 out:
1926 wl->wow_enabled = false;
1927
1928 /*
1929 * Set a flag to re-init the watchdog on the first Tx after resume.
1930 * That way we avoid possible conditions where Tx-complete interrupts
1931 * fail to arrive and we perform a spurious recovery.
1932 */
1933 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1934 mutex_unlock(&wl->mutex);
1935
1936 return 0;
1937 }
1938 #endif
1939
wl1271_op_start(struct ieee80211_hw * hw)1940 static int wl1271_op_start(struct ieee80211_hw *hw)
1941 {
1942 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1943
1944 /*
1945 * We have to delay the booting of the hardware because
1946 * we need to know the local MAC address before downloading and
1947 * initializing the firmware. The MAC address cannot be changed
1948 * after boot, and without the proper MAC address, the firmware
1949 * will not function properly.
1950 *
1951 * The MAC address is first known when the corresponding interface
1952 * is added. That is where we will initialize the hardware.
1953 */
1954
1955 return 0;
1956 }
1957
wlcore_op_stop_locked(struct wl1271 * wl)1958 static void wlcore_op_stop_locked(struct wl1271 *wl)
1959 {
1960 int i;
1961
1962 if (wl->state == WLCORE_STATE_OFF) {
1963 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1964 &wl->flags))
1965 wlcore_enable_interrupts(wl);
1966
1967 return;
1968 }
1969
1970 /*
1971 * this must be before the cancel_work calls below, so that the work
1972 * functions don't perform further work.
1973 */
1974 wl->state = WLCORE_STATE_OFF;
1975
1976 /*
1977 * Use the nosync variant to disable interrupts, so the mutex could be
1978 * held while doing so without deadlocking.
1979 */
1980 wlcore_disable_interrupts_nosync(wl);
1981
1982 mutex_unlock(&wl->mutex);
1983
1984 wlcore_synchronize_interrupts(wl);
1985 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1986 cancel_work_sync(&wl->recovery_work);
1987 wl1271_flush_deferred_work(wl);
1988 cancel_delayed_work_sync(&wl->scan_complete_work);
1989 cancel_work_sync(&wl->netstack_work);
1990 cancel_work_sync(&wl->tx_work);
1991 cancel_delayed_work_sync(&wl->elp_work);
1992 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1993
1994 /* let's notify MAC80211 about the remaining pending TX frames */
1995 mutex_lock(&wl->mutex);
1996 wl12xx_tx_reset(wl);
1997
1998 wl1271_power_off(wl);
1999 /*
2000 * In case a recovery was scheduled, interrupts were disabled to avoid
2001 * an interrupt storm. Now that the power is down, it is safe to
2002 * re-enable interrupts to balance the disable depth
2003 */
2004 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
2005 wlcore_enable_interrupts(wl);
2006
2007 wl->band = IEEE80211_BAND_2GHZ;
2008
2009 wl->rx_counter = 0;
2010 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2011 wl->channel_type = NL80211_CHAN_NO_HT;
2012 wl->tx_blocks_available = 0;
2013 wl->tx_allocated_blocks = 0;
2014 wl->tx_results_count = 0;
2015 wl->tx_packets_count = 0;
2016 wl->time_offset = 0;
2017 wl->ap_fw_ps_map = 0;
2018 wl->ap_ps_map = 0;
2019 wl->sleep_auth = WL1271_PSM_ILLEGAL;
2020 memset(wl->roles_map, 0, sizeof(wl->roles_map));
2021 memset(wl->links_map, 0, sizeof(wl->links_map));
2022 memset(wl->roc_map, 0, sizeof(wl->roc_map));
2023 memset(wl->session_ids, 0, sizeof(wl->session_ids));
2024 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
2025 wl->active_sta_count = 0;
2026 wl->active_link_count = 0;
2027
2028 /* The system link is always allocated */
2029 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
2030 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
2031 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2032
2033 /*
2034 * this is performed after the cancel_work calls and the associated
2035 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2036 * get executed before all these vars have been reset.
2037 */
2038 wl->flags = 0;
2039
2040 wl->tx_blocks_freed = 0;
2041
2042 for (i = 0; i < NUM_TX_QUEUES; i++) {
2043 wl->tx_pkts_freed[i] = 0;
2044 wl->tx_allocated_pkts[i] = 0;
2045 }
2046
2047 wl1271_debugfs_reset(wl);
2048
2049 kfree(wl->raw_fw_status);
2050 wl->raw_fw_status = NULL;
2051 kfree(wl->fw_status);
2052 wl->fw_status = NULL;
2053 kfree(wl->tx_res_if);
2054 wl->tx_res_if = NULL;
2055 kfree(wl->target_mem_map);
2056 wl->target_mem_map = NULL;
2057
2058 /*
2059 * FW channels must be re-calibrated after recovery,
2060 * save current Reg-Domain channel configuration and clear it.
2061 */
2062 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2063 sizeof(wl->reg_ch_conf_pending));
2064 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2065 }
2066
wlcore_op_stop(struct ieee80211_hw * hw)2067 static void wlcore_op_stop(struct ieee80211_hw *hw)
2068 {
2069 struct wl1271 *wl = hw->priv;
2070
2071 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2072
2073 mutex_lock(&wl->mutex);
2074
2075 wlcore_op_stop_locked(wl);
2076
2077 mutex_unlock(&wl->mutex);
2078 }
2079
wlcore_channel_switch_work(struct work_struct * work)2080 static void wlcore_channel_switch_work(struct work_struct *work)
2081 {
2082 struct delayed_work *dwork;
2083 struct wl1271 *wl;
2084 struct ieee80211_vif *vif;
2085 struct wl12xx_vif *wlvif;
2086 int ret;
2087
2088 dwork = container_of(work, struct delayed_work, work);
2089 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2090 wl = wlvif->wl;
2091
2092 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2093
2094 mutex_lock(&wl->mutex);
2095
2096 if (unlikely(wl->state != WLCORE_STATE_ON))
2097 goto out;
2098
2099 /* check the channel switch is still ongoing */
2100 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2101 goto out;
2102
2103 vif = wl12xx_wlvif_to_vif(wlvif);
2104 ieee80211_chswitch_done(vif, false);
2105
2106 ret = wl1271_ps_elp_wakeup(wl);
2107 if (ret < 0)
2108 goto out;
2109
2110 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2111
2112 wl1271_ps_elp_sleep(wl);
2113 out:
2114 mutex_unlock(&wl->mutex);
2115 }
2116
wlcore_connection_loss_work(struct work_struct * work)2117 static void wlcore_connection_loss_work(struct work_struct *work)
2118 {
2119 struct delayed_work *dwork;
2120 struct wl1271 *wl;
2121 struct ieee80211_vif *vif;
2122 struct wl12xx_vif *wlvif;
2123
2124 dwork = container_of(work, struct delayed_work, work);
2125 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2126 wl = wlvif->wl;
2127
2128 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2129
2130 mutex_lock(&wl->mutex);
2131
2132 if (unlikely(wl->state != WLCORE_STATE_ON))
2133 goto out;
2134
2135 /* Call mac80211 connection loss */
2136 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2137 goto out;
2138
2139 vif = wl12xx_wlvif_to_vif(wlvif);
2140 ieee80211_connection_loss(vif);
2141 out:
2142 mutex_unlock(&wl->mutex);
2143 }
2144
wlcore_pending_auth_complete_work(struct work_struct * work)2145 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2146 {
2147 struct delayed_work *dwork;
2148 struct wl1271 *wl;
2149 struct wl12xx_vif *wlvif;
2150 unsigned long time_spare;
2151 int ret;
2152
2153 dwork = container_of(work, struct delayed_work, work);
2154 wlvif = container_of(dwork, struct wl12xx_vif,
2155 pending_auth_complete_work);
2156 wl = wlvif->wl;
2157
2158 mutex_lock(&wl->mutex);
2159
2160 if (unlikely(wl->state != WLCORE_STATE_ON))
2161 goto out;
2162
2163 /*
2164 * Make sure a second really passed since the last auth reply. Maybe
2165 * a second auth reply arrived while we were stuck on the mutex.
2166 * Check for a little less than the timeout to protect from scheduler
2167 * irregularities.
2168 */
2169 time_spare = jiffies +
2170 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2171 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2172 goto out;
2173
2174 ret = wl1271_ps_elp_wakeup(wl);
2175 if (ret < 0)
2176 goto out;
2177
2178 /* cancel the ROC if active */
2179 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2180
2181 wl1271_ps_elp_sleep(wl);
2182 out:
2183 mutex_unlock(&wl->mutex);
2184 }
2185
wl12xx_allocate_rate_policy(struct wl1271 * wl,u8 * idx)2186 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2187 {
2188 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2189 WL12XX_MAX_RATE_POLICIES);
2190 if (policy >= WL12XX_MAX_RATE_POLICIES)
2191 return -EBUSY;
2192
2193 __set_bit(policy, wl->rate_policies_map);
2194 *idx = policy;
2195 return 0;
2196 }
2197
wl12xx_free_rate_policy(struct wl1271 * wl,u8 * idx)2198 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2199 {
2200 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2201 return;
2202
2203 __clear_bit(*idx, wl->rate_policies_map);
2204 *idx = WL12XX_MAX_RATE_POLICIES;
2205 }
2206
wlcore_allocate_klv_template(struct wl1271 * wl,u8 * idx)2207 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2208 {
2209 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2210 WLCORE_MAX_KLV_TEMPLATES);
2211 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2212 return -EBUSY;
2213
2214 __set_bit(policy, wl->klv_templates_map);
2215 *idx = policy;
2216 return 0;
2217 }
2218
wlcore_free_klv_template(struct wl1271 * wl,u8 * idx)2219 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2220 {
2221 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2222 return;
2223
2224 __clear_bit(*idx, wl->klv_templates_map);
2225 *idx = WLCORE_MAX_KLV_TEMPLATES;
2226 }
2227
wl12xx_get_role_type(struct wl1271 * wl,struct wl12xx_vif * wlvif)2228 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2229 {
2230 switch (wlvif->bss_type) {
2231 case BSS_TYPE_AP_BSS:
2232 if (wlvif->p2p)
2233 return WL1271_ROLE_P2P_GO;
2234 else
2235 return WL1271_ROLE_AP;
2236
2237 case BSS_TYPE_STA_BSS:
2238 if (wlvif->p2p)
2239 return WL1271_ROLE_P2P_CL;
2240 else
2241 return WL1271_ROLE_STA;
2242
2243 case BSS_TYPE_IBSS:
2244 return WL1271_ROLE_IBSS;
2245
2246 default:
2247 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2248 }
2249 return WL12XX_INVALID_ROLE_TYPE;
2250 }
2251
wl12xx_init_vif_data(struct wl1271 * wl,struct ieee80211_vif * vif)2252 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2253 {
2254 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2255 int i;
2256
2257 /* clear everything but the persistent data */
2258 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2259
2260 switch (ieee80211_vif_type_p2p(vif)) {
2261 case NL80211_IFTYPE_P2P_CLIENT:
2262 wlvif->p2p = 1;
2263 /* fall-through */
2264 case NL80211_IFTYPE_STATION:
2265 case NL80211_IFTYPE_P2P_DEVICE:
2266 wlvif->bss_type = BSS_TYPE_STA_BSS;
2267 break;
2268 case NL80211_IFTYPE_ADHOC:
2269 wlvif->bss_type = BSS_TYPE_IBSS;
2270 break;
2271 case NL80211_IFTYPE_P2P_GO:
2272 wlvif->p2p = 1;
2273 /* fall-through */
2274 case NL80211_IFTYPE_AP:
2275 wlvif->bss_type = BSS_TYPE_AP_BSS;
2276 break;
2277 default:
2278 wlvif->bss_type = MAX_BSS_TYPE;
2279 return -EOPNOTSUPP;
2280 }
2281
2282 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2283 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2284 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2285
2286 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2287 wlvif->bss_type == BSS_TYPE_IBSS) {
2288 /* init sta/ibss data */
2289 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2290 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2291 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2292 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2293 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2294 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2295 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2296 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2297 } else {
2298 /* init ap data */
2299 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2300 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2301 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2302 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2303 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2304 wl12xx_allocate_rate_policy(wl,
2305 &wlvif->ap.ucast_rate_idx[i]);
2306 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2307 /*
2308 * TODO: check if basic_rate shouldn't be
2309 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2310 * instead (the same thing for STA above).
2311 */
2312 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2313 /* TODO: this seems to be used only for STA, check it */
2314 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2315 }
2316
2317 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2318 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2319 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2320
2321 /*
2322 * mac80211 configures some values globally, while we treat them
2323 * per-interface. thus, on init, we have to copy them from wl
2324 */
2325 wlvif->band = wl->band;
2326 wlvif->channel = wl->channel;
2327 wlvif->power_level = wl->power_level;
2328 wlvif->channel_type = wl->channel_type;
2329
2330 INIT_WORK(&wlvif->rx_streaming_enable_work,
2331 wl1271_rx_streaming_enable_work);
2332 INIT_WORK(&wlvif->rx_streaming_disable_work,
2333 wl1271_rx_streaming_disable_work);
2334 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2335 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2336 wlcore_channel_switch_work);
2337 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2338 wlcore_connection_loss_work);
2339 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2340 wlcore_pending_auth_complete_work);
2341 INIT_LIST_HEAD(&wlvif->list);
2342
2343 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2344 (unsigned long) wlvif);
2345 return 0;
2346 }
2347
wl12xx_init_fw(struct wl1271 * wl)2348 static int wl12xx_init_fw(struct wl1271 *wl)
2349 {
2350 int retries = WL1271_BOOT_RETRIES;
2351 bool booted = false;
2352 struct wiphy *wiphy = wl->hw->wiphy;
2353 int ret;
2354
2355 while (retries) {
2356 retries--;
2357 ret = wl12xx_chip_wakeup(wl, false);
2358 if (ret < 0)
2359 goto power_off;
2360
2361 ret = wl->ops->boot(wl);
2362 if (ret < 0)
2363 goto power_off;
2364
2365 ret = wl1271_hw_init(wl);
2366 if (ret < 0)
2367 goto irq_disable;
2368
2369 booted = true;
2370 break;
2371
2372 irq_disable:
2373 mutex_unlock(&wl->mutex);
2374 /* Unlocking the mutex in the middle of handling is
2375 inherently unsafe. In this case we deem it safe to do,
2376 because we need to let any possibly pending IRQ out of
2377 the system (and while we are WLCORE_STATE_OFF the IRQ
2378 work function will not do anything.) Also, any other
2379 possible concurrent operations will fail due to the
2380 current state, hence the wl1271 struct should be safe. */
2381 wlcore_disable_interrupts(wl);
2382 wl1271_flush_deferred_work(wl);
2383 cancel_work_sync(&wl->netstack_work);
2384 mutex_lock(&wl->mutex);
2385 power_off:
2386 wl1271_power_off(wl);
2387 }
2388
2389 if (!booted) {
2390 wl1271_error("firmware boot failed despite %d retries",
2391 WL1271_BOOT_RETRIES);
2392 goto out;
2393 }
2394
2395 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2396
2397 /* update hw/fw version info in wiphy struct */
2398 wiphy->hw_version = wl->chip.id;
2399 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2400 sizeof(wiphy->fw_version));
2401
2402 /*
2403 * Now we know if 11a is supported (info from the NVS), so disable
2404 * 11a channels if not supported
2405 */
2406 if (!wl->enable_11a)
2407 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2408
2409 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2410 wl->enable_11a ? "" : "not ");
2411
2412 wl->state = WLCORE_STATE_ON;
2413 out:
2414 return ret;
2415 }
2416
wl12xx_dev_role_started(struct wl12xx_vif * wlvif)2417 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2418 {
2419 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2420 }
2421
2422 /*
2423 * Check whether a fw switch (i.e. moving from one loaded
2424 * fw to another) is needed. This function is also responsible
2425 * for updating wl->last_vif_count, so it must be called before
2426 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2427 * will be used).
2428 */
wl12xx_need_fw_change(struct wl1271 * wl,struct vif_counter_data vif_counter_data,bool add)2429 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2430 struct vif_counter_data vif_counter_data,
2431 bool add)
2432 {
2433 enum wl12xx_fw_type current_fw = wl->fw_type;
2434 u8 vif_count = vif_counter_data.counter;
2435
2436 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2437 return false;
2438
2439 /* increase the vif count if this is a new vif */
2440 if (add && !vif_counter_data.cur_vif_running)
2441 vif_count++;
2442
2443 wl->last_vif_count = vif_count;
2444
2445 /* no need for fw change if the device is OFF */
2446 if (wl->state == WLCORE_STATE_OFF)
2447 return false;
2448
2449 /* no need for fw change if a single fw is used */
2450 if (!wl->mr_fw_name)
2451 return false;
2452
2453 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2454 return true;
2455 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2456 return true;
2457
2458 return false;
2459 }
2460
2461 /*
2462 * Enter "forced psm". Make sure the sta is in psm against the ap,
2463 * to make the fw switch a bit more disconnection-persistent.
2464 */
wl12xx_force_active_psm(struct wl1271 * wl)2465 static void wl12xx_force_active_psm(struct wl1271 *wl)
2466 {
2467 struct wl12xx_vif *wlvif;
2468
2469 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2470 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2471 }
2472 }
2473
2474 struct wlcore_hw_queue_iter_data {
2475 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2476 /* current vif */
2477 struct ieee80211_vif *vif;
2478 /* is the current vif among those iterated */
2479 bool cur_running;
2480 };
2481
wlcore_hw_queue_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2482 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2483 struct ieee80211_vif *vif)
2484 {
2485 struct wlcore_hw_queue_iter_data *iter_data = data;
2486
2487 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2488 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2489 return;
2490
2491 if (iter_data->cur_running || vif == iter_data->vif) {
2492 iter_data->cur_running = true;
2493 return;
2494 }
2495
2496 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2497 }
2498
wlcore_allocate_hw_queue_base(struct wl1271 * wl,struct wl12xx_vif * wlvif)2499 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2500 struct wl12xx_vif *wlvif)
2501 {
2502 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2503 struct wlcore_hw_queue_iter_data iter_data = {};
2504 int i, q_base;
2505
2506 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2507 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2508 return 0;
2509 }
2510
2511 iter_data.vif = vif;
2512
2513 /* mark all bits taken by active interfaces */
2514 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2515 IEEE80211_IFACE_ITER_RESUME_ALL,
2516 wlcore_hw_queue_iter, &iter_data);
2517
2518 /* the current vif is already running in mac80211 (resume/recovery) */
2519 if (iter_data.cur_running) {
2520 wlvif->hw_queue_base = vif->hw_queue[0];
2521 wl1271_debug(DEBUG_MAC80211,
2522 "using pre-allocated hw queue base %d",
2523 wlvif->hw_queue_base);
2524
2525 /* interface type might have changed type */
2526 goto adjust_cab_queue;
2527 }
2528
2529 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2530 WLCORE_NUM_MAC_ADDRESSES);
2531 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2532 return -EBUSY;
2533
2534 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2535 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2536 wlvif->hw_queue_base);
2537
2538 for (i = 0; i < NUM_TX_QUEUES; i++) {
2539 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2540 /* register hw queues in mac80211 */
2541 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2542 }
2543
2544 adjust_cab_queue:
2545 /* the last places are reserved for cab queues per interface */
2546 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2547 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2548 wlvif->hw_queue_base / NUM_TX_QUEUES;
2549 else
2550 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2551
2552 return 0;
2553 }
2554
wl1271_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2555 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2556 struct ieee80211_vif *vif)
2557 {
2558 struct wl1271 *wl = hw->priv;
2559 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2560 struct vif_counter_data vif_count;
2561 int ret = 0;
2562 u8 role_type;
2563
2564 if (wl->plt) {
2565 wl1271_error("Adding Interface not allowed while in PLT mode");
2566 return -EBUSY;
2567 }
2568
2569 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2570 IEEE80211_VIF_SUPPORTS_UAPSD |
2571 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2572
2573 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2574 ieee80211_vif_type_p2p(vif), vif->addr);
2575
2576 wl12xx_get_vif_count(hw, vif, &vif_count);
2577
2578 mutex_lock(&wl->mutex);
2579 ret = wl1271_ps_elp_wakeup(wl);
2580 if (ret < 0)
2581 goto out_unlock;
2582
2583 /*
2584 * in some very corner case HW recovery scenarios its possible to
2585 * get here before __wl1271_op_remove_interface is complete, so
2586 * opt out if that is the case.
2587 */
2588 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2589 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2590 ret = -EBUSY;
2591 goto out;
2592 }
2593
2594
2595 ret = wl12xx_init_vif_data(wl, vif);
2596 if (ret < 0)
2597 goto out;
2598
2599 wlvif->wl = wl;
2600 role_type = wl12xx_get_role_type(wl, wlvif);
2601 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2602 ret = -EINVAL;
2603 goto out;
2604 }
2605
2606 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2607 if (ret < 0)
2608 goto out;
2609
2610 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2611 wl12xx_force_active_psm(wl);
2612 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2613 mutex_unlock(&wl->mutex);
2614 wl1271_recovery_work(&wl->recovery_work);
2615 return 0;
2616 }
2617
2618 /*
2619 * TODO: after the nvs issue will be solved, move this block
2620 * to start(), and make sure here the driver is ON.
2621 */
2622 if (wl->state == WLCORE_STATE_OFF) {
2623 /*
2624 * we still need this in order to configure the fw
2625 * while uploading the nvs
2626 */
2627 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2628
2629 ret = wl12xx_init_fw(wl);
2630 if (ret < 0)
2631 goto out;
2632 }
2633
2634 if (!wlcore_is_p2p_mgmt(wlvif)) {
2635 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2636 role_type, &wlvif->role_id);
2637 if (ret < 0)
2638 goto out;
2639
2640 ret = wl1271_init_vif_specific(wl, vif);
2641 if (ret < 0)
2642 goto out;
2643
2644 } else {
2645 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2646 &wlvif->dev_role_id);
2647 if (ret < 0)
2648 goto out;
2649
2650 /* needed mainly for configuring rate policies */
2651 ret = wl1271_sta_hw_init(wl, wlvif);
2652 if (ret < 0)
2653 goto out;
2654 }
2655
2656 list_add(&wlvif->list, &wl->wlvif_list);
2657 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2658
2659 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2660 wl->ap_count++;
2661 else
2662 wl->sta_count++;
2663 out:
2664 wl1271_ps_elp_sleep(wl);
2665 out_unlock:
2666 mutex_unlock(&wl->mutex);
2667
2668 return ret;
2669 }
2670
__wl1271_op_remove_interface(struct wl1271 * wl,struct ieee80211_vif * vif,bool reset_tx_queues)2671 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2672 struct ieee80211_vif *vif,
2673 bool reset_tx_queues)
2674 {
2675 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2676 int i, ret;
2677 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2678
2679 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2680
2681 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2682 return;
2683
2684 /* because of hardware recovery, we may get here twice */
2685 if (wl->state == WLCORE_STATE_OFF)
2686 return;
2687
2688 wl1271_info("down");
2689
2690 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2691 wl->scan_wlvif == wlvif) {
2692 /*
2693 * Rearm the tx watchdog just before idling scan. This
2694 * prevents just-finished scans from triggering the watchdog
2695 */
2696 wl12xx_rearm_tx_watchdog_locked(wl);
2697
2698 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2699 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2700 wl->scan_wlvif = NULL;
2701 wl->scan.req = NULL;
2702 ieee80211_scan_completed(wl->hw, true);
2703 }
2704
2705 if (wl->sched_vif == wlvif)
2706 wl->sched_vif = NULL;
2707
2708 if (wl->roc_vif == vif) {
2709 wl->roc_vif = NULL;
2710 ieee80211_remain_on_channel_expired(wl->hw);
2711 }
2712
2713 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2714 /* disable active roles */
2715 ret = wl1271_ps_elp_wakeup(wl);
2716 if (ret < 0)
2717 goto deinit;
2718
2719 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2720 wlvif->bss_type == BSS_TYPE_IBSS) {
2721 if (wl12xx_dev_role_started(wlvif))
2722 wl12xx_stop_dev(wl, wlvif);
2723 }
2724
2725 if (!wlcore_is_p2p_mgmt(wlvif)) {
2726 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2727 if (ret < 0)
2728 goto deinit;
2729 } else {
2730 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2731 if (ret < 0)
2732 goto deinit;
2733 }
2734
2735 wl1271_ps_elp_sleep(wl);
2736 }
2737 deinit:
2738 wl12xx_tx_reset_wlvif(wl, wlvif);
2739
2740 /* clear all hlids (except system_hlid) */
2741 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2742
2743 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2744 wlvif->bss_type == BSS_TYPE_IBSS) {
2745 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2746 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2747 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2748 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2749 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2750 } else {
2751 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2752 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2753 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2754 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2755 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2756 wl12xx_free_rate_policy(wl,
2757 &wlvif->ap.ucast_rate_idx[i]);
2758 wl1271_free_ap_keys(wl, wlvif);
2759 }
2760
2761 dev_kfree_skb(wlvif->probereq);
2762 wlvif->probereq = NULL;
2763 if (wl->last_wlvif == wlvif)
2764 wl->last_wlvif = NULL;
2765 list_del(&wlvif->list);
2766 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2767 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2768 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2769
2770 if (is_ap)
2771 wl->ap_count--;
2772 else
2773 wl->sta_count--;
2774
2775 /*
2776 * Last AP, have more stations. Configure sleep auth according to STA.
2777 * Don't do thin on unintended recovery.
2778 */
2779 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2780 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2781 goto unlock;
2782
2783 if (wl->ap_count == 0 && is_ap) {
2784 /* mask ap events */
2785 wl->event_mask &= ~wl->ap_event_mask;
2786 wl1271_event_unmask(wl);
2787 }
2788
2789 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2790 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2791 /* Configure for power according to debugfs */
2792 if (sta_auth != WL1271_PSM_ILLEGAL)
2793 wl1271_acx_sleep_auth(wl, sta_auth);
2794 /* Configure for ELP power saving */
2795 else
2796 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2797 }
2798
2799 unlock:
2800 mutex_unlock(&wl->mutex);
2801
2802 del_timer_sync(&wlvif->rx_streaming_timer);
2803 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2804 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2805 cancel_work_sync(&wlvif->rc_update_work);
2806 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2807 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2808 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2809
2810 mutex_lock(&wl->mutex);
2811 }
2812
wl1271_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2813 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2814 struct ieee80211_vif *vif)
2815 {
2816 struct wl1271 *wl = hw->priv;
2817 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2818 struct wl12xx_vif *iter;
2819 struct vif_counter_data vif_count;
2820
2821 wl12xx_get_vif_count(hw, vif, &vif_count);
2822 mutex_lock(&wl->mutex);
2823
2824 if (wl->state == WLCORE_STATE_OFF ||
2825 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2826 goto out;
2827
2828 /*
2829 * wl->vif can be null here if someone shuts down the interface
2830 * just when hardware recovery has been started.
2831 */
2832 wl12xx_for_each_wlvif(wl, iter) {
2833 if (iter != wlvif)
2834 continue;
2835
2836 __wl1271_op_remove_interface(wl, vif, true);
2837 break;
2838 }
2839 WARN_ON(iter != wlvif);
2840 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2841 wl12xx_force_active_psm(wl);
2842 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2843 wl12xx_queue_recovery_work(wl);
2844 }
2845 out:
2846 mutex_unlock(&wl->mutex);
2847 }
2848
wl12xx_op_change_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum nl80211_iftype new_type,bool p2p)2849 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2850 struct ieee80211_vif *vif,
2851 enum nl80211_iftype new_type, bool p2p)
2852 {
2853 struct wl1271 *wl = hw->priv;
2854 int ret;
2855
2856 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2857 wl1271_op_remove_interface(hw, vif);
2858
2859 vif->type = new_type;
2860 vif->p2p = p2p;
2861 ret = wl1271_op_add_interface(hw, vif);
2862
2863 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2864 return ret;
2865 }
2866
wlcore_join(struct wl1271 * wl,struct wl12xx_vif * wlvif)2867 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2868 {
2869 int ret;
2870 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2871
2872 /*
2873 * One of the side effects of the JOIN command is that is clears
2874 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2875 * to a WPA/WPA2 access point will therefore kill the data-path.
2876 * Currently the only valid scenario for JOIN during association
2877 * is on roaming, in which case we will also be given new keys.
2878 * Keep the below message for now, unless it starts bothering
2879 * users who really like to roam a lot :)
2880 */
2881 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2882 wl1271_info("JOIN while associated.");
2883
2884 /* clear encryption type */
2885 wlvif->encryption_type = KEY_NONE;
2886
2887 if (is_ibss)
2888 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2889 else {
2890 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2891 /*
2892 * TODO: this is an ugly workaround for wl12xx fw
2893 * bug - we are not able to tx/rx after the first
2894 * start_sta, so make dummy start+stop calls,
2895 * and then call start_sta again.
2896 * this should be fixed in the fw.
2897 */
2898 wl12xx_cmd_role_start_sta(wl, wlvif);
2899 wl12xx_cmd_role_stop_sta(wl, wlvif);
2900 }
2901
2902 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2903 }
2904
2905 return ret;
2906 }
2907
wl1271_ssid_set(struct wl12xx_vif * wlvif,struct sk_buff * skb,int offset)2908 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2909 int offset)
2910 {
2911 u8 ssid_len;
2912 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2913 skb->len - offset);
2914
2915 if (!ptr) {
2916 wl1271_error("No SSID in IEs!");
2917 return -ENOENT;
2918 }
2919
2920 ssid_len = ptr[1];
2921 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2922 wl1271_error("SSID is too long!");
2923 return -EINVAL;
2924 }
2925
2926 wlvif->ssid_len = ssid_len;
2927 memcpy(wlvif->ssid, ptr+2, ssid_len);
2928 return 0;
2929 }
2930
wlcore_set_ssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)2931 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2932 {
2933 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2934 struct sk_buff *skb;
2935 int ieoffset;
2936
2937 /* we currently only support setting the ssid from the ap probe req */
2938 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2939 return -EINVAL;
2940
2941 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2942 if (!skb)
2943 return -EINVAL;
2944
2945 ieoffset = offsetof(struct ieee80211_mgmt,
2946 u.probe_req.variable);
2947 wl1271_ssid_set(wlvif, skb, ieoffset);
2948 dev_kfree_skb(skb);
2949
2950 return 0;
2951 }
2952
wlcore_set_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)2953 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2954 struct ieee80211_bss_conf *bss_conf,
2955 u32 sta_rate_set)
2956 {
2957 int ieoffset;
2958 int ret;
2959
2960 wlvif->aid = bss_conf->aid;
2961 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2962 wlvif->beacon_int = bss_conf->beacon_int;
2963 wlvif->wmm_enabled = bss_conf->qos;
2964
2965 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2966
2967 /*
2968 * with wl1271, we don't need to update the
2969 * beacon_int and dtim_period, because the firmware
2970 * updates it by itself when the first beacon is
2971 * received after a join.
2972 */
2973 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2974 if (ret < 0)
2975 return ret;
2976
2977 /*
2978 * Get a template for hardware connection maintenance
2979 */
2980 dev_kfree_skb(wlvif->probereq);
2981 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2982 wlvif,
2983 NULL);
2984 ieoffset = offsetof(struct ieee80211_mgmt,
2985 u.probe_req.variable);
2986 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2987
2988 /* enable the connection monitoring feature */
2989 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2990 if (ret < 0)
2991 return ret;
2992
2993 /*
2994 * The join command disable the keep-alive mode, shut down its process,
2995 * and also clear the template config, so we need to reset it all after
2996 * the join. The acx_aid starts the keep-alive process, and the order
2997 * of the commands below is relevant.
2998 */
2999 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
3000 if (ret < 0)
3001 return ret;
3002
3003 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
3004 if (ret < 0)
3005 return ret;
3006
3007 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
3008 if (ret < 0)
3009 return ret;
3010
3011 ret = wl1271_acx_keep_alive_config(wl, wlvif,
3012 wlvif->sta.klv_template_id,
3013 ACX_KEEP_ALIVE_TPL_VALID);
3014 if (ret < 0)
3015 return ret;
3016
3017 /*
3018 * The default fw psm configuration is AUTO, while mac80211 default
3019 * setting is off (ACTIVE), so sync the fw with the correct value.
3020 */
3021 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3022 if (ret < 0)
3023 return ret;
3024
3025 if (sta_rate_set) {
3026 wlvif->rate_set =
3027 wl1271_tx_enabled_rates_get(wl,
3028 sta_rate_set,
3029 wlvif->band);
3030 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3031 if (ret < 0)
3032 return ret;
3033 }
3034
3035 return ret;
3036 }
3037
wlcore_unset_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3038 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3039 {
3040 int ret;
3041 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3042
3043 /* make sure we are connected (sta) joined */
3044 if (sta &&
3045 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3046 return false;
3047
3048 /* make sure we are joined (ibss) */
3049 if (!sta &&
3050 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3051 return false;
3052
3053 if (sta) {
3054 /* use defaults when not associated */
3055 wlvif->aid = 0;
3056
3057 /* free probe-request template */
3058 dev_kfree_skb(wlvif->probereq);
3059 wlvif->probereq = NULL;
3060
3061 /* disable connection monitor features */
3062 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3063 if (ret < 0)
3064 return ret;
3065
3066 /* Disable the keep-alive feature */
3067 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3068 if (ret < 0)
3069 return ret;
3070
3071 /* disable beacon filtering */
3072 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3073 if (ret < 0)
3074 return ret;
3075 }
3076
3077 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3078 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3079
3080 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3081 ieee80211_chswitch_done(vif, false);
3082 cancel_delayed_work(&wlvif->channel_switch_work);
3083 }
3084
3085 /* invalidate keep-alive template */
3086 wl1271_acx_keep_alive_config(wl, wlvif,
3087 wlvif->sta.klv_template_id,
3088 ACX_KEEP_ALIVE_TPL_INVALID);
3089
3090 return 0;
3091 }
3092
wl1271_set_band_rate(struct wl1271 * wl,struct wl12xx_vif * wlvif)3093 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3094 {
3095 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3096 wlvif->rate_set = wlvif->basic_rate_set;
3097 }
3098
wl1271_sta_handle_idle(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool idle)3099 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3100 bool idle)
3101 {
3102 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3103
3104 if (idle == cur_idle)
3105 return;
3106
3107 if (idle) {
3108 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3109 } else {
3110 /* The current firmware only supports sched_scan in idle */
3111 if (wl->sched_vif == wlvif)
3112 wl->ops->sched_scan_stop(wl, wlvif);
3113
3114 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3115 }
3116 }
3117
wl12xx_config_vif(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_conf * conf,u32 changed)3118 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3119 struct ieee80211_conf *conf, u32 changed)
3120 {
3121 int ret;
3122
3123 if (wlcore_is_p2p_mgmt(wlvif))
3124 return 0;
3125
3126 if (conf->power_level != wlvif->power_level) {
3127 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3128 if (ret < 0)
3129 return ret;
3130
3131 wlvif->power_level = conf->power_level;
3132 }
3133
3134 return 0;
3135 }
3136
wl1271_op_config(struct ieee80211_hw * hw,u32 changed)3137 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3138 {
3139 struct wl1271 *wl = hw->priv;
3140 struct wl12xx_vif *wlvif;
3141 struct ieee80211_conf *conf = &hw->conf;
3142 int ret = 0;
3143
3144 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3145 " changed 0x%x",
3146 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3147 conf->power_level,
3148 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3149 changed);
3150
3151 mutex_lock(&wl->mutex);
3152
3153 if (changed & IEEE80211_CONF_CHANGE_POWER)
3154 wl->power_level = conf->power_level;
3155
3156 if (unlikely(wl->state != WLCORE_STATE_ON))
3157 goto out;
3158
3159 ret = wl1271_ps_elp_wakeup(wl);
3160 if (ret < 0)
3161 goto out;
3162
3163 /* configure each interface */
3164 wl12xx_for_each_wlvif(wl, wlvif) {
3165 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3166 if (ret < 0)
3167 goto out_sleep;
3168 }
3169
3170 out_sleep:
3171 wl1271_ps_elp_sleep(wl);
3172
3173 out:
3174 mutex_unlock(&wl->mutex);
3175
3176 return ret;
3177 }
3178
3179 struct wl1271_filter_params {
3180 bool enabled;
3181 int mc_list_length;
3182 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3183 };
3184
wl1271_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)3185 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3186 struct netdev_hw_addr_list *mc_list)
3187 {
3188 struct wl1271_filter_params *fp;
3189 struct netdev_hw_addr *ha;
3190
3191 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3192 if (!fp) {
3193 wl1271_error("Out of memory setting filters.");
3194 return 0;
3195 }
3196
3197 /* update multicast filtering parameters */
3198 fp->mc_list_length = 0;
3199 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3200 fp->enabled = false;
3201 } else {
3202 fp->enabled = true;
3203 netdev_hw_addr_list_for_each(ha, mc_list) {
3204 memcpy(fp->mc_list[fp->mc_list_length],
3205 ha->addr, ETH_ALEN);
3206 fp->mc_list_length++;
3207 }
3208 }
3209
3210 return (u64)(unsigned long)fp;
3211 }
3212
3213 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3214 FIF_FCSFAIL | \
3215 FIF_BCN_PRBRESP_PROMISC | \
3216 FIF_CONTROL | \
3217 FIF_OTHER_BSS)
3218
wl1271_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed,unsigned int * total,u64 multicast)3219 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3220 unsigned int changed,
3221 unsigned int *total, u64 multicast)
3222 {
3223 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3224 struct wl1271 *wl = hw->priv;
3225 struct wl12xx_vif *wlvif;
3226
3227 int ret;
3228
3229 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3230 " total %x", changed, *total);
3231
3232 mutex_lock(&wl->mutex);
3233
3234 *total &= WL1271_SUPPORTED_FILTERS;
3235 changed &= WL1271_SUPPORTED_FILTERS;
3236
3237 if (unlikely(wl->state != WLCORE_STATE_ON))
3238 goto out;
3239
3240 ret = wl1271_ps_elp_wakeup(wl);
3241 if (ret < 0)
3242 goto out;
3243
3244 wl12xx_for_each_wlvif(wl, wlvif) {
3245 if (wlcore_is_p2p_mgmt(wlvif))
3246 continue;
3247
3248 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3249 if (*total & FIF_ALLMULTI)
3250 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3251 false,
3252 NULL, 0);
3253 else if (fp)
3254 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3255 fp->enabled,
3256 fp->mc_list,
3257 fp->mc_list_length);
3258 if (ret < 0)
3259 goto out_sleep;
3260 }
3261 }
3262
3263 /*
3264 * the fw doesn't provide an api to configure the filters. instead,
3265 * the filters configuration is based on the active roles / ROC
3266 * state.
3267 */
3268
3269 out_sleep:
3270 wl1271_ps_elp_sleep(wl);
3271
3272 out:
3273 mutex_unlock(&wl->mutex);
3274 kfree(fp);
3275 }
3276
wl1271_record_ap_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 id,u8 key_type,u8 key_size,const u8 * key,u8 hlid,u32 tx_seq_32,u16 tx_seq_16)3277 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3278 u8 id, u8 key_type, u8 key_size,
3279 const u8 *key, u8 hlid, u32 tx_seq_32,
3280 u16 tx_seq_16)
3281 {
3282 struct wl1271_ap_key *ap_key;
3283 int i;
3284
3285 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3286
3287 if (key_size > MAX_KEY_SIZE)
3288 return -EINVAL;
3289
3290 /*
3291 * Find next free entry in ap_keys. Also check we are not replacing
3292 * an existing key.
3293 */
3294 for (i = 0; i < MAX_NUM_KEYS; i++) {
3295 if (wlvif->ap.recorded_keys[i] == NULL)
3296 break;
3297
3298 if (wlvif->ap.recorded_keys[i]->id == id) {
3299 wl1271_warning("trying to record key replacement");
3300 return -EINVAL;
3301 }
3302 }
3303
3304 if (i == MAX_NUM_KEYS)
3305 return -EBUSY;
3306
3307 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3308 if (!ap_key)
3309 return -ENOMEM;
3310
3311 ap_key->id = id;
3312 ap_key->key_type = key_type;
3313 ap_key->key_size = key_size;
3314 memcpy(ap_key->key, key, key_size);
3315 ap_key->hlid = hlid;
3316 ap_key->tx_seq_32 = tx_seq_32;
3317 ap_key->tx_seq_16 = tx_seq_16;
3318
3319 wlvif->ap.recorded_keys[i] = ap_key;
3320 return 0;
3321 }
3322
wl1271_free_ap_keys(struct wl1271 * wl,struct wl12xx_vif * wlvif)3323 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3324 {
3325 int i;
3326
3327 for (i = 0; i < MAX_NUM_KEYS; i++) {
3328 kfree(wlvif->ap.recorded_keys[i]);
3329 wlvif->ap.recorded_keys[i] = NULL;
3330 }
3331 }
3332
wl1271_ap_init_hwenc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3333 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3334 {
3335 int i, ret = 0;
3336 struct wl1271_ap_key *key;
3337 bool wep_key_added = false;
3338
3339 for (i = 0; i < MAX_NUM_KEYS; i++) {
3340 u8 hlid;
3341 if (wlvif->ap.recorded_keys[i] == NULL)
3342 break;
3343
3344 key = wlvif->ap.recorded_keys[i];
3345 hlid = key->hlid;
3346 if (hlid == WL12XX_INVALID_LINK_ID)
3347 hlid = wlvif->ap.bcast_hlid;
3348
3349 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3350 key->id, key->key_type,
3351 key->key_size, key->key,
3352 hlid, key->tx_seq_32,
3353 key->tx_seq_16);
3354 if (ret < 0)
3355 goto out;
3356
3357 if (key->key_type == KEY_WEP)
3358 wep_key_added = true;
3359 }
3360
3361 if (wep_key_added) {
3362 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3363 wlvif->ap.bcast_hlid);
3364 if (ret < 0)
3365 goto out;
3366 }
3367
3368 out:
3369 wl1271_free_ap_keys(wl, wlvif);
3370 return ret;
3371 }
3372
wl1271_set_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u16 action,u8 id,u8 key_type,u8 key_size,const u8 * key,u32 tx_seq_32,u16 tx_seq_16,struct ieee80211_sta * sta)3373 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3374 u16 action, u8 id, u8 key_type,
3375 u8 key_size, const u8 *key, u32 tx_seq_32,
3376 u16 tx_seq_16, struct ieee80211_sta *sta)
3377 {
3378 int ret;
3379 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3380
3381 if (is_ap) {
3382 struct wl1271_station *wl_sta;
3383 u8 hlid;
3384
3385 if (sta) {
3386 wl_sta = (struct wl1271_station *)sta->drv_priv;
3387 hlid = wl_sta->hlid;
3388 } else {
3389 hlid = wlvif->ap.bcast_hlid;
3390 }
3391
3392 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3393 /*
3394 * We do not support removing keys after AP shutdown.
3395 * Pretend we do to make mac80211 happy.
3396 */
3397 if (action != KEY_ADD_OR_REPLACE)
3398 return 0;
3399
3400 ret = wl1271_record_ap_key(wl, wlvif, id,
3401 key_type, key_size,
3402 key, hlid, tx_seq_32,
3403 tx_seq_16);
3404 } else {
3405 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3406 id, key_type, key_size,
3407 key, hlid, tx_seq_32,
3408 tx_seq_16);
3409 }
3410
3411 if (ret < 0)
3412 return ret;
3413 } else {
3414 const u8 *addr;
3415 static const u8 bcast_addr[ETH_ALEN] = {
3416 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3417 };
3418
3419 addr = sta ? sta->addr : bcast_addr;
3420
3421 if (is_zero_ether_addr(addr)) {
3422 /* We dont support TX only encryption */
3423 return -EOPNOTSUPP;
3424 }
3425
3426 /* The wl1271 does not allow to remove unicast keys - they
3427 will be cleared automatically on next CMD_JOIN. Ignore the
3428 request silently, as we dont want the mac80211 to emit
3429 an error message. */
3430 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3431 return 0;
3432
3433 /* don't remove key if hlid was already deleted */
3434 if (action == KEY_REMOVE &&
3435 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3436 return 0;
3437
3438 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3439 id, key_type, key_size,
3440 key, addr, tx_seq_32,
3441 tx_seq_16);
3442 if (ret < 0)
3443 return ret;
3444
3445 }
3446
3447 return 0;
3448 }
3449
wlcore_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3450 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3451 struct ieee80211_vif *vif,
3452 struct ieee80211_sta *sta,
3453 struct ieee80211_key_conf *key_conf)
3454 {
3455 struct wl1271 *wl = hw->priv;
3456 int ret;
3457 bool might_change_spare =
3458 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3459 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3460
3461 if (might_change_spare) {
3462 /*
3463 * stop the queues and flush to ensure the next packets are
3464 * in sync with FW spare block accounting
3465 */
3466 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3467 wl1271_tx_flush(wl);
3468 }
3469
3470 mutex_lock(&wl->mutex);
3471
3472 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3473 ret = -EAGAIN;
3474 goto out_wake_queues;
3475 }
3476
3477 ret = wl1271_ps_elp_wakeup(wl);
3478 if (ret < 0)
3479 goto out_wake_queues;
3480
3481 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3482
3483 wl1271_ps_elp_sleep(wl);
3484
3485 out_wake_queues:
3486 if (might_change_spare)
3487 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3488
3489 mutex_unlock(&wl->mutex);
3490
3491 return ret;
3492 }
3493
wlcore_set_key(struct wl1271 * wl,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3494 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3495 struct ieee80211_vif *vif,
3496 struct ieee80211_sta *sta,
3497 struct ieee80211_key_conf *key_conf)
3498 {
3499 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3500 int ret;
3501 u32 tx_seq_32 = 0;
3502 u16 tx_seq_16 = 0;
3503 u8 key_type;
3504 u8 hlid;
3505
3506 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3507
3508 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3509 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3510 key_conf->cipher, key_conf->keyidx,
3511 key_conf->keylen, key_conf->flags);
3512 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3513
3514 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3515 if (sta) {
3516 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3517 hlid = wl_sta->hlid;
3518 } else {
3519 hlid = wlvif->ap.bcast_hlid;
3520 }
3521 else
3522 hlid = wlvif->sta.hlid;
3523
3524 if (hlid != WL12XX_INVALID_LINK_ID) {
3525 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3526 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3527 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3528 }
3529
3530 switch (key_conf->cipher) {
3531 case WLAN_CIPHER_SUITE_WEP40:
3532 case WLAN_CIPHER_SUITE_WEP104:
3533 key_type = KEY_WEP;
3534
3535 key_conf->hw_key_idx = key_conf->keyidx;
3536 break;
3537 case WLAN_CIPHER_SUITE_TKIP:
3538 key_type = KEY_TKIP;
3539 key_conf->hw_key_idx = key_conf->keyidx;
3540 break;
3541 case WLAN_CIPHER_SUITE_CCMP:
3542 key_type = KEY_AES;
3543 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3544 break;
3545 case WL1271_CIPHER_SUITE_GEM:
3546 key_type = KEY_GEM;
3547 break;
3548 default:
3549 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3550
3551 return -EOPNOTSUPP;
3552 }
3553
3554 switch (cmd) {
3555 case SET_KEY:
3556 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3557 key_conf->keyidx, key_type,
3558 key_conf->keylen, key_conf->key,
3559 tx_seq_32, tx_seq_16, sta);
3560 if (ret < 0) {
3561 wl1271_error("Could not add or replace key");
3562 return ret;
3563 }
3564
3565 /*
3566 * reconfiguring arp response if the unicast (or common)
3567 * encryption key type was changed
3568 */
3569 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3570 (sta || key_type == KEY_WEP) &&
3571 wlvif->encryption_type != key_type) {
3572 wlvif->encryption_type = key_type;
3573 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3574 if (ret < 0) {
3575 wl1271_warning("build arp rsp failed: %d", ret);
3576 return ret;
3577 }
3578 }
3579 break;
3580
3581 case DISABLE_KEY:
3582 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3583 key_conf->keyidx, key_type,
3584 key_conf->keylen, key_conf->key,
3585 0, 0, sta);
3586 if (ret < 0) {
3587 wl1271_error("Could not remove key");
3588 return ret;
3589 }
3590 break;
3591
3592 default:
3593 wl1271_error("Unsupported key cmd 0x%x", cmd);
3594 return -EOPNOTSUPP;
3595 }
3596
3597 return ret;
3598 }
3599 EXPORT_SYMBOL_GPL(wlcore_set_key);
3600
wl1271_op_set_default_key_idx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int key_idx)3601 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3602 struct ieee80211_vif *vif,
3603 int key_idx)
3604 {
3605 struct wl1271 *wl = hw->priv;
3606 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3607 int ret;
3608
3609 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3610 key_idx);
3611
3612 /* we don't handle unsetting of default key */
3613 if (key_idx == -1)
3614 return;
3615
3616 mutex_lock(&wl->mutex);
3617
3618 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3619 ret = -EAGAIN;
3620 goto out_unlock;
3621 }
3622
3623 ret = wl1271_ps_elp_wakeup(wl);
3624 if (ret < 0)
3625 goto out_unlock;
3626
3627 wlvif->default_key = key_idx;
3628
3629 /* the default WEP key needs to be configured at least once */
3630 if (wlvif->encryption_type == KEY_WEP) {
3631 ret = wl12xx_cmd_set_default_wep_key(wl,
3632 key_idx,
3633 wlvif->sta.hlid);
3634 if (ret < 0)
3635 goto out_sleep;
3636 }
3637
3638 out_sleep:
3639 wl1271_ps_elp_sleep(wl);
3640
3641 out_unlock:
3642 mutex_unlock(&wl->mutex);
3643 }
3644
wlcore_regdomain_config(struct wl1271 * wl)3645 void wlcore_regdomain_config(struct wl1271 *wl)
3646 {
3647 int ret;
3648
3649 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3650 return;
3651
3652 mutex_lock(&wl->mutex);
3653
3654 if (unlikely(wl->state != WLCORE_STATE_ON))
3655 goto out;
3656
3657 ret = wl1271_ps_elp_wakeup(wl);
3658 if (ret < 0)
3659 goto out;
3660
3661 ret = wlcore_cmd_regdomain_config_locked(wl);
3662 if (ret < 0) {
3663 wl12xx_queue_recovery_work(wl);
3664 goto out;
3665 }
3666
3667 wl1271_ps_elp_sleep(wl);
3668 out:
3669 mutex_unlock(&wl->mutex);
3670 }
3671
wl1271_op_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)3672 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3673 struct ieee80211_vif *vif,
3674 struct ieee80211_scan_request *hw_req)
3675 {
3676 struct cfg80211_scan_request *req = &hw_req->req;
3677 struct wl1271 *wl = hw->priv;
3678 int ret;
3679 u8 *ssid = NULL;
3680 size_t len = 0;
3681
3682 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3683
3684 if (req->n_ssids) {
3685 ssid = req->ssids[0].ssid;
3686 len = req->ssids[0].ssid_len;
3687 }
3688
3689 mutex_lock(&wl->mutex);
3690
3691 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3692 /*
3693 * We cannot return -EBUSY here because cfg80211 will expect
3694 * a call to ieee80211_scan_completed if we do - in this case
3695 * there won't be any call.
3696 */
3697 ret = -EAGAIN;
3698 goto out;
3699 }
3700
3701 ret = wl1271_ps_elp_wakeup(wl);
3702 if (ret < 0)
3703 goto out;
3704
3705 /* fail if there is any role in ROC */
3706 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3707 /* don't allow scanning right now */
3708 ret = -EBUSY;
3709 goto out_sleep;
3710 }
3711
3712 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3713 out_sleep:
3714 wl1271_ps_elp_sleep(wl);
3715 out:
3716 mutex_unlock(&wl->mutex);
3717
3718 return ret;
3719 }
3720
wl1271_op_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3721 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3722 struct ieee80211_vif *vif)
3723 {
3724 struct wl1271 *wl = hw->priv;
3725 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3726 int ret;
3727
3728 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3729
3730 mutex_lock(&wl->mutex);
3731
3732 if (unlikely(wl->state != WLCORE_STATE_ON))
3733 goto out;
3734
3735 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3736 goto out;
3737
3738 ret = wl1271_ps_elp_wakeup(wl);
3739 if (ret < 0)
3740 goto out;
3741
3742 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3743 ret = wl->ops->scan_stop(wl, wlvif);
3744 if (ret < 0)
3745 goto out_sleep;
3746 }
3747
3748 /*
3749 * Rearm the tx watchdog just before idling scan. This
3750 * prevents just-finished scans from triggering the watchdog
3751 */
3752 wl12xx_rearm_tx_watchdog_locked(wl);
3753
3754 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3755 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3756 wl->scan_wlvif = NULL;
3757 wl->scan.req = NULL;
3758 ieee80211_scan_completed(wl->hw, true);
3759
3760 out_sleep:
3761 wl1271_ps_elp_sleep(wl);
3762 out:
3763 mutex_unlock(&wl->mutex);
3764
3765 cancel_delayed_work_sync(&wl->scan_complete_work);
3766 }
3767
wl1271_op_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)3768 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3769 struct ieee80211_vif *vif,
3770 struct cfg80211_sched_scan_request *req,
3771 struct ieee80211_scan_ies *ies)
3772 {
3773 struct wl1271 *wl = hw->priv;
3774 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3775 int ret;
3776
3777 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3778
3779 mutex_lock(&wl->mutex);
3780
3781 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3782 ret = -EAGAIN;
3783 goto out;
3784 }
3785
3786 ret = wl1271_ps_elp_wakeup(wl);
3787 if (ret < 0)
3788 goto out;
3789
3790 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3791 if (ret < 0)
3792 goto out_sleep;
3793
3794 wl->sched_vif = wlvif;
3795
3796 out_sleep:
3797 wl1271_ps_elp_sleep(wl);
3798 out:
3799 mutex_unlock(&wl->mutex);
3800 return ret;
3801 }
3802
wl1271_op_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3803 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3804 struct ieee80211_vif *vif)
3805 {
3806 struct wl1271 *wl = hw->priv;
3807 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3808 int ret;
3809
3810 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3811
3812 mutex_lock(&wl->mutex);
3813
3814 if (unlikely(wl->state != WLCORE_STATE_ON))
3815 goto out;
3816
3817 ret = wl1271_ps_elp_wakeup(wl);
3818 if (ret < 0)
3819 goto out;
3820
3821 wl->ops->sched_scan_stop(wl, wlvif);
3822
3823 wl1271_ps_elp_sleep(wl);
3824 out:
3825 mutex_unlock(&wl->mutex);
3826
3827 return 0;
3828 }
3829
wl1271_op_set_frag_threshold(struct ieee80211_hw * hw,u32 value)3830 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3831 {
3832 struct wl1271 *wl = hw->priv;
3833 int ret = 0;
3834
3835 mutex_lock(&wl->mutex);
3836
3837 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3838 ret = -EAGAIN;
3839 goto out;
3840 }
3841
3842 ret = wl1271_ps_elp_wakeup(wl);
3843 if (ret < 0)
3844 goto out;
3845
3846 ret = wl1271_acx_frag_threshold(wl, value);
3847 if (ret < 0)
3848 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3849
3850 wl1271_ps_elp_sleep(wl);
3851
3852 out:
3853 mutex_unlock(&wl->mutex);
3854
3855 return ret;
3856 }
3857
wl1271_op_set_rts_threshold(struct ieee80211_hw * hw,u32 value)3858 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3859 {
3860 struct wl1271 *wl = hw->priv;
3861 struct wl12xx_vif *wlvif;
3862 int ret = 0;
3863
3864 mutex_lock(&wl->mutex);
3865
3866 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3867 ret = -EAGAIN;
3868 goto out;
3869 }
3870
3871 ret = wl1271_ps_elp_wakeup(wl);
3872 if (ret < 0)
3873 goto out;
3874
3875 wl12xx_for_each_wlvif(wl, wlvif) {
3876 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3877 if (ret < 0)
3878 wl1271_warning("set rts threshold failed: %d", ret);
3879 }
3880 wl1271_ps_elp_sleep(wl);
3881
3882 out:
3883 mutex_unlock(&wl->mutex);
3884
3885 return ret;
3886 }
3887
wl12xx_remove_ie(struct sk_buff * skb,u8 eid,int ieoffset)3888 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3889 {
3890 int len;
3891 const u8 *next, *end = skb->data + skb->len;
3892 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3893 skb->len - ieoffset);
3894 if (!ie)
3895 return;
3896 len = ie[1] + 2;
3897 next = ie + len;
3898 memmove(ie, next, end - next);
3899 skb_trim(skb, skb->len - len);
3900 }
3901
wl12xx_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,int ieoffset)3902 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3903 unsigned int oui, u8 oui_type,
3904 int ieoffset)
3905 {
3906 int len;
3907 const u8 *next, *end = skb->data + skb->len;
3908 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3909 skb->data + ieoffset,
3910 skb->len - ieoffset);
3911 if (!ie)
3912 return;
3913 len = ie[1] + 2;
3914 next = ie + len;
3915 memmove(ie, next, end - next);
3916 skb_trim(skb, skb->len - len);
3917 }
3918
wl1271_ap_set_probe_resp_tmpl(struct wl1271 * wl,u32 rates,struct ieee80211_vif * vif)3919 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3920 struct ieee80211_vif *vif)
3921 {
3922 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3923 struct sk_buff *skb;
3924 int ret;
3925
3926 skb = ieee80211_proberesp_get(wl->hw, vif);
3927 if (!skb)
3928 return -EOPNOTSUPP;
3929
3930 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3931 CMD_TEMPL_AP_PROBE_RESPONSE,
3932 skb->data,
3933 skb->len, 0,
3934 rates);
3935 dev_kfree_skb(skb);
3936
3937 if (ret < 0)
3938 goto out;
3939
3940 wl1271_debug(DEBUG_AP, "probe response updated");
3941 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3942
3943 out:
3944 return ret;
3945 }
3946
wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 * wl,struct ieee80211_vif * vif,u8 * probe_rsp_data,size_t probe_rsp_len,u32 rates)3947 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3948 struct ieee80211_vif *vif,
3949 u8 *probe_rsp_data,
3950 size_t probe_rsp_len,
3951 u32 rates)
3952 {
3953 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3954 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3955 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3956 int ssid_ie_offset, ie_offset, templ_len;
3957 const u8 *ptr;
3958
3959 /* no need to change probe response if the SSID is set correctly */
3960 if (wlvif->ssid_len > 0)
3961 return wl1271_cmd_template_set(wl, wlvif->role_id,
3962 CMD_TEMPL_AP_PROBE_RESPONSE,
3963 probe_rsp_data,
3964 probe_rsp_len, 0,
3965 rates);
3966
3967 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3968 wl1271_error("probe_rsp template too big");
3969 return -EINVAL;
3970 }
3971
3972 /* start searching from IE offset */
3973 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3974
3975 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3976 probe_rsp_len - ie_offset);
3977 if (!ptr) {
3978 wl1271_error("No SSID in beacon!");
3979 return -EINVAL;
3980 }
3981
3982 ssid_ie_offset = ptr - probe_rsp_data;
3983 ptr += (ptr[1] + 2);
3984
3985 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3986
3987 /* insert SSID from bss_conf */
3988 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3989 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3990 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3991 bss_conf->ssid, bss_conf->ssid_len);
3992 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3993
3994 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3995 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3996 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3997
3998 return wl1271_cmd_template_set(wl, wlvif->role_id,
3999 CMD_TEMPL_AP_PROBE_RESPONSE,
4000 probe_rsp_templ,
4001 templ_len, 0,
4002 rates);
4003 }
4004
wl1271_bss_erp_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4005 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4006 struct ieee80211_vif *vif,
4007 struct ieee80211_bss_conf *bss_conf,
4008 u32 changed)
4009 {
4010 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4011 int ret = 0;
4012
4013 if (changed & BSS_CHANGED_ERP_SLOT) {
4014 if (bss_conf->use_short_slot)
4015 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4016 else
4017 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4018 if (ret < 0) {
4019 wl1271_warning("Set slot time failed %d", ret);
4020 goto out;
4021 }
4022 }
4023
4024 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4025 if (bss_conf->use_short_preamble)
4026 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4027 else
4028 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4029 }
4030
4031 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4032 if (bss_conf->use_cts_prot)
4033 ret = wl1271_acx_cts_protect(wl, wlvif,
4034 CTSPROTECT_ENABLE);
4035 else
4036 ret = wl1271_acx_cts_protect(wl, wlvif,
4037 CTSPROTECT_DISABLE);
4038 if (ret < 0) {
4039 wl1271_warning("Set ctsprotect failed %d", ret);
4040 goto out;
4041 }
4042 }
4043
4044 out:
4045 return ret;
4046 }
4047
wlcore_set_beacon_template(struct wl1271 * wl,struct ieee80211_vif * vif,bool is_ap)4048 static int wlcore_set_beacon_template(struct wl1271 *wl,
4049 struct ieee80211_vif *vif,
4050 bool is_ap)
4051 {
4052 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4053 struct ieee80211_hdr *hdr;
4054 u32 min_rate;
4055 int ret;
4056 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4057 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4058 u16 tmpl_id;
4059
4060 if (!beacon) {
4061 ret = -EINVAL;
4062 goto out;
4063 }
4064
4065 wl1271_debug(DEBUG_MASTER, "beacon updated");
4066
4067 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4068 if (ret < 0) {
4069 dev_kfree_skb(beacon);
4070 goto out;
4071 }
4072 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4073 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4074 CMD_TEMPL_BEACON;
4075 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4076 beacon->data,
4077 beacon->len, 0,
4078 min_rate);
4079 if (ret < 0) {
4080 dev_kfree_skb(beacon);
4081 goto out;
4082 }
4083
4084 wlvif->wmm_enabled =
4085 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4086 WLAN_OUI_TYPE_MICROSOFT_WMM,
4087 beacon->data + ieoffset,
4088 beacon->len - ieoffset);
4089
4090 /*
4091 * In case we already have a probe-resp beacon set explicitly
4092 * by usermode, don't use the beacon data.
4093 */
4094 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4095 goto end_bcn;
4096
4097 /* remove TIM ie from probe response */
4098 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4099
4100 /*
4101 * remove p2p ie from probe response.
4102 * the fw reponds to probe requests that don't include
4103 * the p2p ie. probe requests with p2p ie will be passed,
4104 * and will be responded by the supplicant (the spec
4105 * forbids including the p2p ie when responding to probe
4106 * requests that didn't include it).
4107 */
4108 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4109 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4110
4111 hdr = (struct ieee80211_hdr *) beacon->data;
4112 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4113 IEEE80211_STYPE_PROBE_RESP);
4114 if (is_ap)
4115 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4116 beacon->data,
4117 beacon->len,
4118 min_rate);
4119 else
4120 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4121 CMD_TEMPL_PROBE_RESPONSE,
4122 beacon->data,
4123 beacon->len, 0,
4124 min_rate);
4125 end_bcn:
4126 dev_kfree_skb(beacon);
4127 if (ret < 0)
4128 goto out;
4129
4130 out:
4131 return ret;
4132 }
4133
wl1271_bss_beacon_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4134 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4135 struct ieee80211_vif *vif,
4136 struct ieee80211_bss_conf *bss_conf,
4137 u32 changed)
4138 {
4139 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4140 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4141 int ret = 0;
4142
4143 if (changed & BSS_CHANGED_BEACON_INT) {
4144 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4145 bss_conf->beacon_int);
4146
4147 wlvif->beacon_int = bss_conf->beacon_int;
4148 }
4149
4150 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4151 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4152
4153 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4154 }
4155
4156 if (changed & BSS_CHANGED_BEACON) {
4157 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4158 if (ret < 0)
4159 goto out;
4160
4161 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4162 &wlvif->flags)) {
4163 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4164 if (ret < 0)
4165 goto out;
4166 }
4167 }
4168 out:
4169 if (ret != 0)
4170 wl1271_error("beacon info change failed: %d", ret);
4171 return ret;
4172 }
4173
4174 /* AP mode changes */
wl1271_bss_info_changed_ap(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4175 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4176 struct ieee80211_vif *vif,
4177 struct ieee80211_bss_conf *bss_conf,
4178 u32 changed)
4179 {
4180 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4181 int ret = 0;
4182
4183 if (changed & BSS_CHANGED_BASIC_RATES) {
4184 u32 rates = bss_conf->basic_rates;
4185
4186 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4187 wlvif->band);
4188 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4189 wlvif->basic_rate_set);
4190
4191 ret = wl1271_init_ap_rates(wl, wlvif);
4192 if (ret < 0) {
4193 wl1271_error("AP rate policy change failed %d", ret);
4194 goto out;
4195 }
4196
4197 ret = wl1271_ap_init_templates(wl, vif);
4198 if (ret < 0)
4199 goto out;
4200
4201 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4202 if (ret < 0)
4203 goto out;
4204
4205 ret = wlcore_set_beacon_template(wl, vif, true);
4206 if (ret < 0)
4207 goto out;
4208 }
4209
4210 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4211 if (ret < 0)
4212 goto out;
4213
4214 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4215 if (bss_conf->enable_beacon) {
4216 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4217 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4218 if (ret < 0)
4219 goto out;
4220
4221 ret = wl1271_ap_init_hwenc(wl, wlvif);
4222 if (ret < 0)
4223 goto out;
4224
4225 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4226 wl1271_debug(DEBUG_AP, "started AP");
4227 }
4228 } else {
4229 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4230 /*
4231 * AP might be in ROC in case we have just
4232 * sent auth reply. handle it.
4233 */
4234 if (test_bit(wlvif->role_id, wl->roc_map))
4235 wl12xx_croc(wl, wlvif->role_id);
4236
4237 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4238 if (ret < 0)
4239 goto out;
4240
4241 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4242 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4243 &wlvif->flags);
4244 wl1271_debug(DEBUG_AP, "stopped AP");
4245 }
4246 }
4247 }
4248
4249 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4250 if (ret < 0)
4251 goto out;
4252
4253 /* Handle HT information change */
4254 if ((changed & BSS_CHANGED_HT) &&
4255 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4256 ret = wl1271_acx_set_ht_information(wl, wlvif,
4257 bss_conf->ht_operation_mode);
4258 if (ret < 0) {
4259 wl1271_warning("Set ht information failed %d", ret);
4260 goto out;
4261 }
4262 }
4263
4264 out:
4265 return;
4266 }
4267
wlcore_set_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)4268 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4269 struct ieee80211_bss_conf *bss_conf,
4270 u32 sta_rate_set)
4271 {
4272 u32 rates;
4273 int ret;
4274
4275 wl1271_debug(DEBUG_MAC80211,
4276 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4277 bss_conf->bssid, bss_conf->aid,
4278 bss_conf->beacon_int,
4279 bss_conf->basic_rates, sta_rate_set);
4280
4281 wlvif->beacon_int = bss_conf->beacon_int;
4282 rates = bss_conf->basic_rates;
4283 wlvif->basic_rate_set =
4284 wl1271_tx_enabled_rates_get(wl, rates,
4285 wlvif->band);
4286 wlvif->basic_rate =
4287 wl1271_tx_min_rate_get(wl,
4288 wlvif->basic_rate_set);
4289
4290 if (sta_rate_set)
4291 wlvif->rate_set =
4292 wl1271_tx_enabled_rates_get(wl,
4293 sta_rate_set,
4294 wlvif->band);
4295
4296 /* we only support sched_scan while not connected */
4297 if (wl->sched_vif == wlvif)
4298 wl->ops->sched_scan_stop(wl, wlvif);
4299
4300 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4301 if (ret < 0)
4302 return ret;
4303
4304 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4305 if (ret < 0)
4306 return ret;
4307
4308 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4309 if (ret < 0)
4310 return ret;
4311
4312 wlcore_set_ssid(wl, wlvif);
4313
4314 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4315
4316 return 0;
4317 }
4318
wlcore_clear_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)4319 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4320 {
4321 int ret;
4322
4323 /* revert back to minimum rates for the current band */
4324 wl1271_set_band_rate(wl, wlvif);
4325 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4326
4327 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4328 if (ret < 0)
4329 return ret;
4330
4331 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4332 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4333 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4334 if (ret < 0)
4335 return ret;
4336 }
4337
4338 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4339 return 0;
4340 }
4341 /* STA/IBSS mode changes */
wl1271_bss_info_changed_sta(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4342 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4343 struct ieee80211_vif *vif,
4344 struct ieee80211_bss_conf *bss_conf,
4345 u32 changed)
4346 {
4347 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4348 bool do_join = false;
4349 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4350 bool ibss_joined = false;
4351 u32 sta_rate_set = 0;
4352 int ret;
4353 struct ieee80211_sta *sta;
4354 bool sta_exists = false;
4355 struct ieee80211_sta_ht_cap sta_ht_cap;
4356
4357 if (is_ibss) {
4358 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4359 changed);
4360 if (ret < 0)
4361 goto out;
4362 }
4363
4364 if (changed & BSS_CHANGED_IBSS) {
4365 if (bss_conf->ibss_joined) {
4366 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4367 ibss_joined = true;
4368 } else {
4369 wlcore_unset_assoc(wl, wlvif);
4370 wl12xx_cmd_role_stop_sta(wl, wlvif);
4371 }
4372 }
4373
4374 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4375 do_join = true;
4376
4377 /* Need to update the SSID (for filtering etc) */
4378 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4379 do_join = true;
4380
4381 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4382 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4383 bss_conf->enable_beacon ? "enabled" : "disabled");
4384
4385 do_join = true;
4386 }
4387
4388 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4389 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4390
4391 if (changed & BSS_CHANGED_CQM) {
4392 bool enable = false;
4393 if (bss_conf->cqm_rssi_thold)
4394 enable = true;
4395 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4396 bss_conf->cqm_rssi_thold,
4397 bss_conf->cqm_rssi_hyst);
4398 if (ret < 0)
4399 goto out;
4400 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4401 }
4402
4403 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4404 BSS_CHANGED_ASSOC)) {
4405 rcu_read_lock();
4406 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4407 if (sta) {
4408 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4409
4410 /* save the supp_rates of the ap */
4411 sta_rate_set = sta->supp_rates[wlvif->band];
4412 if (sta->ht_cap.ht_supported)
4413 sta_rate_set |=
4414 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4415 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4416 sta_ht_cap = sta->ht_cap;
4417 sta_exists = true;
4418 }
4419
4420 rcu_read_unlock();
4421 }
4422
4423 if (changed & BSS_CHANGED_BSSID) {
4424 if (!is_zero_ether_addr(bss_conf->bssid)) {
4425 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4426 sta_rate_set);
4427 if (ret < 0)
4428 goto out;
4429
4430 /* Need to update the BSSID (for filtering etc) */
4431 do_join = true;
4432 } else {
4433 ret = wlcore_clear_bssid(wl, wlvif);
4434 if (ret < 0)
4435 goto out;
4436 }
4437 }
4438
4439 if (changed & BSS_CHANGED_IBSS) {
4440 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4441 bss_conf->ibss_joined);
4442
4443 if (bss_conf->ibss_joined) {
4444 u32 rates = bss_conf->basic_rates;
4445 wlvif->basic_rate_set =
4446 wl1271_tx_enabled_rates_get(wl, rates,
4447 wlvif->band);
4448 wlvif->basic_rate =
4449 wl1271_tx_min_rate_get(wl,
4450 wlvif->basic_rate_set);
4451
4452 /* by default, use 11b + OFDM rates */
4453 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4454 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4455 if (ret < 0)
4456 goto out;
4457 }
4458 }
4459
4460 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4461 /* enable beacon filtering */
4462 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4463 if (ret < 0)
4464 goto out;
4465 }
4466
4467 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4468 if (ret < 0)
4469 goto out;
4470
4471 if (do_join) {
4472 ret = wlcore_join(wl, wlvif);
4473 if (ret < 0) {
4474 wl1271_warning("cmd join failed %d", ret);
4475 goto out;
4476 }
4477 }
4478
4479 if (changed & BSS_CHANGED_ASSOC) {
4480 if (bss_conf->assoc) {
4481 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4482 sta_rate_set);
4483 if (ret < 0)
4484 goto out;
4485
4486 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4487 wl12xx_set_authorized(wl, wlvif);
4488 } else {
4489 wlcore_unset_assoc(wl, wlvif);
4490 }
4491 }
4492
4493 if (changed & BSS_CHANGED_PS) {
4494 if ((bss_conf->ps) &&
4495 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4496 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4497 int ps_mode;
4498 char *ps_mode_str;
4499
4500 if (wl->conf.conn.forced_ps) {
4501 ps_mode = STATION_POWER_SAVE_MODE;
4502 ps_mode_str = "forced";
4503 } else {
4504 ps_mode = STATION_AUTO_PS_MODE;
4505 ps_mode_str = "auto";
4506 }
4507
4508 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4509
4510 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4511 if (ret < 0)
4512 wl1271_warning("enter %s ps failed %d",
4513 ps_mode_str, ret);
4514 } else if (!bss_conf->ps &&
4515 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4516 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4517
4518 ret = wl1271_ps_set_mode(wl, wlvif,
4519 STATION_ACTIVE_MODE);
4520 if (ret < 0)
4521 wl1271_warning("exit auto ps failed %d", ret);
4522 }
4523 }
4524
4525 /* Handle new association with HT. Do this after join. */
4526 if (sta_exists) {
4527 bool enabled =
4528 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4529
4530 ret = wlcore_hw_set_peer_cap(wl,
4531 &sta_ht_cap,
4532 enabled,
4533 wlvif->rate_set,
4534 wlvif->sta.hlid);
4535 if (ret < 0) {
4536 wl1271_warning("Set ht cap failed %d", ret);
4537 goto out;
4538
4539 }
4540
4541 if (enabled) {
4542 ret = wl1271_acx_set_ht_information(wl, wlvif,
4543 bss_conf->ht_operation_mode);
4544 if (ret < 0) {
4545 wl1271_warning("Set ht information failed %d",
4546 ret);
4547 goto out;
4548 }
4549 }
4550 }
4551
4552 /* Handle arp filtering. Done after join. */
4553 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4554 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4555 __be32 addr = bss_conf->arp_addr_list[0];
4556 wlvif->sta.qos = bss_conf->qos;
4557 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4558
4559 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4560 wlvif->ip_addr = addr;
4561 /*
4562 * The template should have been configured only upon
4563 * association. however, it seems that the correct ip
4564 * isn't being set (when sending), so we have to
4565 * reconfigure the template upon every ip change.
4566 */
4567 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4568 if (ret < 0) {
4569 wl1271_warning("build arp rsp failed: %d", ret);
4570 goto out;
4571 }
4572
4573 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4574 (ACX_ARP_FILTER_ARP_FILTERING |
4575 ACX_ARP_FILTER_AUTO_ARP),
4576 addr);
4577 } else {
4578 wlvif->ip_addr = 0;
4579 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4580 }
4581
4582 if (ret < 0)
4583 goto out;
4584 }
4585
4586 out:
4587 return;
4588 }
4589
wl1271_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4590 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4591 struct ieee80211_vif *vif,
4592 struct ieee80211_bss_conf *bss_conf,
4593 u32 changed)
4594 {
4595 struct wl1271 *wl = hw->priv;
4596 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4597 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4598 int ret;
4599
4600 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4601 wlvif->role_id, (int)changed);
4602
4603 /*
4604 * make sure to cancel pending disconnections if our association
4605 * state changed
4606 */
4607 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4608 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4609
4610 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4611 !bss_conf->enable_beacon)
4612 wl1271_tx_flush(wl);
4613
4614 mutex_lock(&wl->mutex);
4615
4616 if (unlikely(wl->state != WLCORE_STATE_ON))
4617 goto out;
4618
4619 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4620 goto out;
4621
4622 ret = wl1271_ps_elp_wakeup(wl);
4623 if (ret < 0)
4624 goto out;
4625
4626 if ((changed & BSS_CHANGED_TXPOWER) &&
4627 bss_conf->txpower != wlvif->power_level) {
4628
4629 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4630 if (ret < 0)
4631 goto out;
4632
4633 wlvif->power_level = bss_conf->txpower;
4634 }
4635
4636 if (is_ap)
4637 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4638 else
4639 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4640
4641 wl1271_ps_elp_sleep(wl);
4642
4643 out:
4644 mutex_unlock(&wl->mutex);
4645 }
4646
wlcore_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4647 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4648 struct ieee80211_chanctx_conf *ctx)
4649 {
4650 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4651 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4652 cfg80211_get_chandef_type(&ctx->def));
4653 return 0;
4654 }
4655
wlcore_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4656 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4657 struct ieee80211_chanctx_conf *ctx)
4658 {
4659 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4660 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4661 cfg80211_get_chandef_type(&ctx->def));
4662 }
4663
wlcore_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)4664 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4665 struct ieee80211_chanctx_conf *ctx,
4666 u32 changed)
4667 {
4668 struct wl1271 *wl = hw->priv;
4669 struct wl12xx_vif *wlvif;
4670 int ret;
4671 int channel = ieee80211_frequency_to_channel(
4672 ctx->def.chan->center_freq);
4673
4674 wl1271_debug(DEBUG_MAC80211,
4675 "mac80211 change chanctx %d (type %d) changed 0x%x",
4676 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4677
4678 mutex_lock(&wl->mutex);
4679
4680 ret = wl1271_ps_elp_wakeup(wl);
4681 if (ret < 0)
4682 goto out;
4683
4684 wl12xx_for_each_wlvif(wl, wlvif) {
4685 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4686
4687 rcu_read_lock();
4688 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4689 rcu_read_unlock();
4690 continue;
4691 }
4692 rcu_read_unlock();
4693
4694 /* start radar if needed */
4695 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4696 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4697 ctx->radar_enabled && !wlvif->radar_enabled &&
4698 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4699 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4700 wlcore_hw_set_cac(wl, wlvif, true);
4701 wlvif->radar_enabled = true;
4702 }
4703 }
4704
4705 wl1271_ps_elp_sleep(wl);
4706 out:
4707 mutex_unlock(&wl->mutex);
4708 }
4709
wlcore_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4710 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4711 struct ieee80211_vif *vif,
4712 struct ieee80211_chanctx_conf *ctx)
4713 {
4714 struct wl1271 *wl = hw->priv;
4715 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4716 int channel = ieee80211_frequency_to_channel(
4717 ctx->def.chan->center_freq);
4718 int ret = -EINVAL;
4719
4720 wl1271_debug(DEBUG_MAC80211,
4721 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4722 wlvif->role_id, channel,
4723 cfg80211_get_chandef_type(&ctx->def),
4724 ctx->radar_enabled, ctx->def.chan->dfs_state);
4725
4726 mutex_lock(&wl->mutex);
4727
4728 if (unlikely(wl->state != WLCORE_STATE_ON))
4729 goto out;
4730
4731 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4732 goto out;
4733
4734 ret = wl1271_ps_elp_wakeup(wl);
4735 if (ret < 0)
4736 goto out;
4737
4738 wlvif->band = ctx->def.chan->band;
4739 wlvif->channel = channel;
4740 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4741
4742 /* update default rates according to the band */
4743 wl1271_set_band_rate(wl, wlvif);
4744
4745 if (ctx->radar_enabled &&
4746 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4747 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4748 wlcore_hw_set_cac(wl, wlvif, true);
4749 wlvif->radar_enabled = true;
4750 }
4751
4752 wl1271_ps_elp_sleep(wl);
4753 out:
4754 mutex_unlock(&wl->mutex);
4755
4756 return 0;
4757 }
4758
wlcore_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4759 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4760 struct ieee80211_vif *vif,
4761 struct ieee80211_chanctx_conf *ctx)
4762 {
4763 struct wl1271 *wl = hw->priv;
4764 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4765 int ret;
4766
4767 wl1271_debug(DEBUG_MAC80211,
4768 "mac80211 unassign chanctx (role %d) %d (type %d)",
4769 wlvif->role_id,
4770 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4771 cfg80211_get_chandef_type(&ctx->def));
4772
4773 wl1271_tx_flush(wl);
4774
4775 mutex_lock(&wl->mutex);
4776
4777 if (unlikely(wl->state != WLCORE_STATE_ON))
4778 goto out;
4779
4780 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4781 goto out;
4782
4783 ret = wl1271_ps_elp_wakeup(wl);
4784 if (ret < 0)
4785 goto out;
4786
4787 if (wlvif->radar_enabled) {
4788 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4789 wlcore_hw_set_cac(wl, wlvif, false);
4790 wlvif->radar_enabled = false;
4791 }
4792
4793 wl1271_ps_elp_sleep(wl);
4794 out:
4795 mutex_unlock(&wl->mutex);
4796 }
4797
__wlcore_switch_vif_chan(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_chanctx_conf * new_ctx)4798 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4799 struct wl12xx_vif *wlvif,
4800 struct ieee80211_chanctx_conf *new_ctx)
4801 {
4802 int channel = ieee80211_frequency_to_channel(
4803 new_ctx->def.chan->center_freq);
4804
4805 wl1271_debug(DEBUG_MAC80211,
4806 "switch vif (role %d) %d -> %d chan_type: %d",
4807 wlvif->role_id, wlvif->channel, channel,
4808 cfg80211_get_chandef_type(&new_ctx->def));
4809
4810 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4811 return 0;
4812
4813 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4814
4815 if (wlvif->radar_enabled) {
4816 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4817 wlcore_hw_set_cac(wl, wlvif, false);
4818 wlvif->radar_enabled = false;
4819 }
4820
4821 wlvif->band = new_ctx->def.chan->band;
4822 wlvif->channel = channel;
4823 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4824
4825 /* start radar if needed */
4826 if (new_ctx->radar_enabled) {
4827 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4828 wlcore_hw_set_cac(wl, wlvif, true);
4829 wlvif->radar_enabled = true;
4830 }
4831
4832 return 0;
4833 }
4834
4835 static int
wlcore_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)4836 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4837 struct ieee80211_vif_chanctx_switch *vifs,
4838 int n_vifs,
4839 enum ieee80211_chanctx_switch_mode mode)
4840 {
4841 struct wl1271 *wl = hw->priv;
4842 int i, ret;
4843
4844 wl1271_debug(DEBUG_MAC80211,
4845 "mac80211 switch chanctx n_vifs %d mode %d",
4846 n_vifs, mode);
4847
4848 mutex_lock(&wl->mutex);
4849
4850 ret = wl1271_ps_elp_wakeup(wl);
4851 if (ret < 0)
4852 goto out;
4853
4854 for (i = 0; i < n_vifs; i++) {
4855 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4856
4857 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4858 if (ret)
4859 goto out_sleep;
4860 }
4861 out_sleep:
4862 wl1271_ps_elp_sleep(wl);
4863 out:
4864 mutex_unlock(&wl->mutex);
4865
4866 return 0;
4867 }
4868
wl1271_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 queue,const struct ieee80211_tx_queue_params * params)4869 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4870 struct ieee80211_vif *vif, u16 queue,
4871 const struct ieee80211_tx_queue_params *params)
4872 {
4873 struct wl1271 *wl = hw->priv;
4874 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4875 u8 ps_scheme;
4876 int ret = 0;
4877
4878 if (wlcore_is_p2p_mgmt(wlvif))
4879 return 0;
4880
4881 mutex_lock(&wl->mutex);
4882
4883 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4884
4885 if (params->uapsd)
4886 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4887 else
4888 ps_scheme = CONF_PS_SCHEME_LEGACY;
4889
4890 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4891 goto out;
4892
4893 ret = wl1271_ps_elp_wakeup(wl);
4894 if (ret < 0)
4895 goto out;
4896
4897 /*
4898 * the txop is confed in units of 32us by the mac80211,
4899 * we need us
4900 */
4901 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4902 params->cw_min, params->cw_max,
4903 params->aifs, params->txop << 5);
4904 if (ret < 0)
4905 goto out_sleep;
4906
4907 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4908 CONF_CHANNEL_TYPE_EDCF,
4909 wl1271_tx_get_queue(queue),
4910 ps_scheme, CONF_ACK_POLICY_LEGACY,
4911 0, 0);
4912
4913 out_sleep:
4914 wl1271_ps_elp_sleep(wl);
4915
4916 out:
4917 mutex_unlock(&wl->mutex);
4918
4919 return ret;
4920 }
4921
wl1271_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4922 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4923 struct ieee80211_vif *vif)
4924 {
4925
4926 struct wl1271 *wl = hw->priv;
4927 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4928 u64 mactime = ULLONG_MAX;
4929 int ret;
4930
4931 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4932
4933 mutex_lock(&wl->mutex);
4934
4935 if (unlikely(wl->state != WLCORE_STATE_ON))
4936 goto out;
4937
4938 ret = wl1271_ps_elp_wakeup(wl);
4939 if (ret < 0)
4940 goto out;
4941
4942 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4943 if (ret < 0)
4944 goto out_sleep;
4945
4946 out_sleep:
4947 wl1271_ps_elp_sleep(wl);
4948
4949 out:
4950 mutex_unlock(&wl->mutex);
4951 return mactime;
4952 }
4953
wl1271_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)4954 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4955 struct survey_info *survey)
4956 {
4957 struct ieee80211_conf *conf = &hw->conf;
4958
4959 if (idx != 0)
4960 return -ENOENT;
4961
4962 survey->channel = conf->chandef.chan;
4963 survey->filled = 0;
4964 return 0;
4965 }
4966
wl1271_allocate_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)4967 static int wl1271_allocate_sta(struct wl1271 *wl,
4968 struct wl12xx_vif *wlvif,
4969 struct ieee80211_sta *sta)
4970 {
4971 struct wl1271_station *wl_sta;
4972 int ret;
4973
4974
4975 if (wl->active_sta_count >= wl->max_ap_stations) {
4976 wl1271_warning("could not allocate HLID - too much stations");
4977 return -EBUSY;
4978 }
4979
4980 wl_sta = (struct wl1271_station *)sta->drv_priv;
4981 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4982 if (ret < 0) {
4983 wl1271_warning("could not allocate HLID - too many links");
4984 return -EBUSY;
4985 }
4986
4987 /* use the previous security seq, if this is a recovery/resume */
4988 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4989
4990 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4991 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4992 wl->active_sta_count++;
4993 return 0;
4994 }
4995
wl1271_free_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid)4996 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4997 {
4998 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4999 return;
5000
5001 clear_bit(hlid, wlvif->ap.sta_hlid_map);
5002 __clear_bit(hlid, &wl->ap_ps_map);
5003 __clear_bit(hlid, &wl->ap_fw_ps_map);
5004
5005 /*
5006 * save the last used PN in the private part of iee80211_sta,
5007 * in case of recovery/suspend
5008 */
5009 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5010
5011 wl12xx_free_link(wl, wlvif, &hlid);
5012 wl->active_sta_count--;
5013
5014 /*
5015 * rearm the tx watchdog when the last STA is freed - give the FW a
5016 * chance to return STA-buffered packets before complaining.
5017 */
5018 if (wl->active_sta_count == 0)
5019 wl12xx_rearm_tx_watchdog_locked(wl);
5020 }
5021
wl12xx_sta_add(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5022 static int wl12xx_sta_add(struct wl1271 *wl,
5023 struct wl12xx_vif *wlvif,
5024 struct ieee80211_sta *sta)
5025 {
5026 struct wl1271_station *wl_sta;
5027 int ret = 0;
5028 u8 hlid;
5029
5030 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5031
5032 ret = wl1271_allocate_sta(wl, wlvif, sta);
5033 if (ret < 0)
5034 return ret;
5035
5036 wl_sta = (struct wl1271_station *)sta->drv_priv;
5037 hlid = wl_sta->hlid;
5038
5039 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5040 if (ret < 0)
5041 wl1271_free_sta(wl, wlvif, hlid);
5042
5043 return ret;
5044 }
5045
wl12xx_sta_remove(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5046 static int wl12xx_sta_remove(struct wl1271 *wl,
5047 struct wl12xx_vif *wlvif,
5048 struct ieee80211_sta *sta)
5049 {
5050 struct wl1271_station *wl_sta;
5051 int ret = 0, id;
5052
5053 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5054
5055 wl_sta = (struct wl1271_station *)sta->drv_priv;
5056 id = wl_sta->hlid;
5057 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5058 return -EINVAL;
5059
5060 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5061 if (ret < 0)
5062 return ret;
5063
5064 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5065 return ret;
5066 }
5067
wlcore_roc_if_possible(struct wl1271 * wl,struct wl12xx_vif * wlvif)5068 static void wlcore_roc_if_possible(struct wl1271 *wl,
5069 struct wl12xx_vif *wlvif)
5070 {
5071 if (find_first_bit(wl->roc_map,
5072 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5073 return;
5074
5075 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5076 return;
5077
5078 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5079 }
5080
5081 /*
5082 * when wl_sta is NULL, we treat this call as if coming from a
5083 * pending auth reply.
5084 * wl->mutex must be taken and the FW must be awake when the call
5085 * takes place.
5086 */
wlcore_update_inconn_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl1271_station * wl_sta,bool in_conn)5087 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5088 struct wl1271_station *wl_sta, bool in_conn)
5089 {
5090 if (in_conn) {
5091 if (WARN_ON(wl_sta && wl_sta->in_connection))
5092 return;
5093
5094 if (!wlvif->ap_pending_auth_reply &&
5095 !wlvif->inconn_count)
5096 wlcore_roc_if_possible(wl, wlvif);
5097
5098 if (wl_sta) {
5099 wl_sta->in_connection = true;
5100 wlvif->inconn_count++;
5101 } else {
5102 wlvif->ap_pending_auth_reply = true;
5103 }
5104 } else {
5105 if (wl_sta && !wl_sta->in_connection)
5106 return;
5107
5108 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5109 return;
5110
5111 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5112 return;
5113
5114 if (wl_sta) {
5115 wl_sta->in_connection = false;
5116 wlvif->inconn_count--;
5117 } else {
5118 wlvif->ap_pending_auth_reply = false;
5119 }
5120
5121 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5122 test_bit(wlvif->role_id, wl->roc_map))
5123 wl12xx_croc(wl, wlvif->role_id);
5124 }
5125 }
5126
wl12xx_update_sta_state(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5127 static int wl12xx_update_sta_state(struct wl1271 *wl,
5128 struct wl12xx_vif *wlvif,
5129 struct ieee80211_sta *sta,
5130 enum ieee80211_sta_state old_state,
5131 enum ieee80211_sta_state new_state)
5132 {
5133 struct wl1271_station *wl_sta;
5134 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5135 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5136 int ret;
5137
5138 wl_sta = (struct wl1271_station *)sta->drv_priv;
5139
5140 /* Add station (AP mode) */
5141 if (is_ap &&
5142 old_state == IEEE80211_STA_NOTEXIST &&
5143 new_state == IEEE80211_STA_NONE) {
5144 ret = wl12xx_sta_add(wl, wlvif, sta);
5145 if (ret)
5146 return ret;
5147
5148 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5149 }
5150
5151 /* Remove station (AP mode) */
5152 if (is_ap &&
5153 old_state == IEEE80211_STA_NONE &&
5154 new_state == IEEE80211_STA_NOTEXIST) {
5155 /* must not fail */
5156 wl12xx_sta_remove(wl, wlvif, sta);
5157
5158 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5159 }
5160
5161 /* Authorize station (AP mode) */
5162 if (is_ap &&
5163 new_state == IEEE80211_STA_AUTHORIZED) {
5164 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5165 if (ret < 0)
5166 return ret;
5167
5168 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5169 wl_sta->hlid);
5170 if (ret)
5171 return ret;
5172
5173 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5174 }
5175
5176 /* Authorize station */
5177 if (is_sta &&
5178 new_state == IEEE80211_STA_AUTHORIZED) {
5179 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5180 ret = wl12xx_set_authorized(wl, wlvif);
5181 if (ret)
5182 return ret;
5183 }
5184
5185 if (is_sta &&
5186 old_state == IEEE80211_STA_AUTHORIZED &&
5187 new_state == IEEE80211_STA_ASSOC) {
5188 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5189 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5190 }
5191
5192 /* save seq number on disassoc (suspend) */
5193 if (is_sta &&
5194 old_state == IEEE80211_STA_ASSOC &&
5195 new_state == IEEE80211_STA_AUTH) {
5196 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5197 wlvif->total_freed_pkts = 0;
5198 }
5199
5200 /* restore seq number on assoc (resume) */
5201 if (is_sta &&
5202 old_state == IEEE80211_STA_AUTH &&
5203 new_state == IEEE80211_STA_ASSOC) {
5204 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5205 }
5206
5207 /* clear ROCs on failure or authorization */
5208 if (is_sta &&
5209 (new_state == IEEE80211_STA_AUTHORIZED ||
5210 new_state == IEEE80211_STA_NOTEXIST)) {
5211 if (test_bit(wlvif->role_id, wl->roc_map))
5212 wl12xx_croc(wl, wlvif->role_id);
5213 }
5214
5215 if (is_sta &&
5216 old_state == IEEE80211_STA_NOTEXIST &&
5217 new_state == IEEE80211_STA_NONE) {
5218 if (find_first_bit(wl->roc_map,
5219 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5220 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5221 wl12xx_roc(wl, wlvif, wlvif->role_id,
5222 wlvif->band, wlvif->channel);
5223 }
5224 }
5225 return 0;
5226 }
5227
wl12xx_op_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5228 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5229 struct ieee80211_vif *vif,
5230 struct ieee80211_sta *sta,
5231 enum ieee80211_sta_state old_state,
5232 enum ieee80211_sta_state new_state)
5233 {
5234 struct wl1271 *wl = hw->priv;
5235 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5236 int ret;
5237
5238 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5239 sta->aid, old_state, new_state);
5240
5241 mutex_lock(&wl->mutex);
5242
5243 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5244 ret = -EBUSY;
5245 goto out;
5246 }
5247
5248 ret = wl1271_ps_elp_wakeup(wl);
5249 if (ret < 0)
5250 goto out;
5251
5252 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5253
5254 wl1271_ps_elp_sleep(wl);
5255 out:
5256 mutex_unlock(&wl->mutex);
5257 if (new_state < old_state)
5258 return 0;
5259 return ret;
5260 }
5261
wl1271_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum ieee80211_ampdu_mlme_action action,struct ieee80211_sta * sta,u16 tid,u16 * ssn,u8 buf_size,bool amsdu)5262 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5263 struct ieee80211_vif *vif,
5264 enum ieee80211_ampdu_mlme_action action,
5265 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5266 u8 buf_size, bool amsdu)
5267 {
5268 struct wl1271 *wl = hw->priv;
5269 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5270 int ret;
5271 u8 hlid, *ba_bitmap;
5272
5273 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5274 tid);
5275
5276 /* sanity check - the fields in FW are only 8bits wide */
5277 if (WARN_ON(tid > 0xFF))
5278 return -ENOTSUPP;
5279
5280 mutex_lock(&wl->mutex);
5281
5282 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5283 ret = -EAGAIN;
5284 goto out;
5285 }
5286
5287 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5288 hlid = wlvif->sta.hlid;
5289 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5290 struct wl1271_station *wl_sta;
5291
5292 wl_sta = (struct wl1271_station *)sta->drv_priv;
5293 hlid = wl_sta->hlid;
5294 } else {
5295 ret = -EINVAL;
5296 goto out;
5297 }
5298
5299 ba_bitmap = &wl->links[hlid].ba_bitmap;
5300
5301 ret = wl1271_ps_elp_wakeup(wl);
5302 if (ret < 0)
5303 goto out;
5304
5305 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5306 tid, action);
5307
5308 switch (action) {
5309 case IEEE80211_AMPDU_RX_START:
5310 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5311 ret = -ENOTSUPP;
5312 break;
5313 }
5314
5315 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5316 ret = -EBUSY;
5317 wl1271_error("exceeded max RX BA sessions");
5318 break;
5319 }
5320
5321 if (*ba_bitmap & BIT(tid)) {
5322 ret = -EINVAL;
5323 wl1271_error("cannot enable RX BA session on active "
5324 "tid: %d", tid);
5325 break;
5326 }
5327
5328 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5329 hlid);
5330 if (!ret) {
5331 *ba_bitmap |= BIT(tid);
5332 wl->ba_rx_session_count++;
5333 }
5334 break;
5335
5336 case IEEE80211_AMPDU_RX_STOP:
5337 if (!(*ba_bitmap & BIT(tid))) {
5338 /*
5339 * this happens on reconfig - so only output a debug
5340 * message for now, and don't fail the function.
5341 */
5342 wl1271_debug(DEBUG_MAC80211,
5343 "no active RX BA session on tid: %d",
5344 tid);
5345 ret = 0;
5346 break;
5347 }
5348
5349 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5350 hlid);
5351 if (!ret) {
5352 *ba_bitmap &= ~BIT(tid);
5353 wl->ba_rx_session_count--;
5354 }
5355 break;
5356
5357 /*
5358 * The BA initiator session management in FW independently.
5359 * Falling break here on purpose for all TX APDU commands.
5360 */
5361 case IEEE80211_AMPDU_TX_START:
5362 case IEEE80211_AMPDU_TX_STOP_CONT:
5363 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5364 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5365 case IEEE80211_AMPDU_TX_OPERATIONAL:
5366 ret = -EINVAL;
5367 break;
5368
5369 default:
5370 wl1271_error("Incorrect ampdu action id=%x\n", action);
5371 ret = -EINVAL;
5372 }
5373
5374 wl1271_ps_elp_sleep(wl);
5375
5376 out:
5377 mutex_unlock(&wl->mutex);
5378
5379 return ret;
5380 }
5381
wl12xx_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)5382 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5383 struct ieee80211_vif *vif,
5384 const struct cfg80211_bitrate_mask *mask)
5385 {
5386 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5387 struct wl1271 *wl = hw->priv;
5388 int i, ret = 0;
5389
5390 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5391 mask->control[NL80211_BAND_2GHZ].legacy,
5392 mask->control[NL80211_BAND_5GHZ].legacy);
5393
5394 mutex_lock(&wl->mutex);
5395
5396 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5397 wlvif->bitrate_masks[i] =
5398 wl1271_tx_enabled_rates_get(wl,
5399 mask->control[i].legacy,
5400 i);
5401
5402 if (unlikely(wl->state != WLCORE_STATE_ON))
5403 goto out;
5404
5405 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5406 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5407
5408 ret = wl1271_ps_elp_wakeup(wl);
5409 if (ret < 0)
5410 goto out;
5411
5412 wl1271_set_band_rate(wl, wlvif);
5413 wlvif->basic_rate =
5414 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5415 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5416
5417 wl1271_ps_elp_sleep(wl);
5418 }
5419 out:
5420 mutex_unlock(&wl->mutex);
5421
5422 return ret;
5423 }
5424
wl12xx_op_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * ch_switch)5425 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5426 struct ieee80211_vif *vif,
5427 struct ieee80211_channel_switch *ch_switch)
5428 {
5429 struct wl1271 *wl = hw->priv;
5430 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5431 int ret;
5432
5433 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5434
5435 wl1271_tx_flush(wl);
5436
5437 mutex_lock(&wl->mutex);
5438
5439 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5440 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5441 ieee80211_chswitch_done(vif, false);
5442 goto out;
5443 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5444 goto out;
5445 }
5446
5447 ret = wl1271_ps_elp_wakeup(wl);
5448 if (ret < 0)
5449 goto out;
5450
5451 /* TODO: change mac80211 to pass vif as param */
5452
5453 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5454 unsigned long delay_usec;
5455
5456 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5457 if (ret)
5458 goto out_sleep;
5459
5460 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5461
5462 /* indicate failure 5 seconds after channel switch time */
5463 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5464 ch_switch->count;
5465 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5466 usecs_to_jiffies(delay_usec) +
5467 msecs_to_jiffies(5000));
5468 }
5469
5470 out_sleep:
5471 wl1271_ps_elp_sleep(wl);
5472
5473 out:
5474 mutex_unlock(&wl->mutex);
5475 }
5476
wlcore_get_beacon_ie(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 eid)5477 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5478 struct wl12xx_vif *wlvif,
5479 u8 eid)
5480 {
5481 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5482 struct sk_buff *beacon =
5483 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5484
5485 if (!beacon)
5486 return NULL;
5487
5488 return cfg80211_find_ie(eid,
5489 beacon->data + ieoffset,
5490 beacon->len - ieoffset);
5491 }
5492
wlcore_get_csa_count(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 * csa_count)5493 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5494 u8 *csa_count)
5495 {
5496 const u8 *ie;
5497 const struct ieee80211_channel_sw_ie *ie_csa;
5498
5499 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5500 if (!ie)
5501 return -EINVAL;
5502
5503 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5504 *csa_count = ie_csa->count;
5505
5506 return 0;
5507 }
5508
wlcore_op_channel_switch_beacon(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_chan_def * chandef)5509 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5510 struct ieee80211_vif *vif,
5511 struct cfg80211_chan_def *chandef)
5512 {
5513 struct wl1271 *wl = hw->priv;
5514 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5515 struct ieee80211_channel_switch ch_switch = {
5516 .block_tx = true,
5517 .chandef = *chandef,
5518 };
5519 int ret;
5520
5521 wl1271_debug(DEBUG_MAC80211,
5522 "mac80211 channel switch beacon (role %d)",
5523 wlvif->role_id);
5524
5525 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5526 if (ret < 0) {
5527 wl1271_error("error getting beacon (for CSA counter)");
5528 return;
5529 }
5530
5531 mutex_lock(&wl->mutex);
5532
5533 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5534 ret = -EBUSY;
5535 goto out;
5536 }
5537
5538 ret = wl1271_ps_elp_wakeup(wl);
5539 if (ret < 0)
5540 goto out;
5541
5542 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5543 if (ret)
5544 goto out_sleep;
5545
5546 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5547
5548 out_sleep:
5549 wl1271_ps_elp_sleep(wl);
5550 out:
5551 mutex_unlock(&wl->mutex);
5552 }
5553
wlcore_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)5554 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5555 u32 queues, bool drop)
5556 {
5557 struct wl1271 *wl = hw->priv;
5558
5559 wl1271_tx_flush(wl);
5560 }
5561
wlcore_op_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)5562 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5563 struct ieee80211_vif *vif,
5564 struct ieee80211_channel *chan,
5565 int duration,
5566 enum ieee80211_roc_type type)
5567 {
5568 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5569 struct wl1271 *wl = hw->priv;
5570 int channel, ret = 0;
5571
5572 channel = ieee80211_frequency_to_channel(chan->center_freq);
5573
5574 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5575 channel, wlvif->role_id);
5576
5577 mutex_lock(&wl->mutex);
5578
5579 if (unlikely(wl->state != WLCORE_STATE_ON))
5580 goto out;
5581
5582 /* return EBUSY if we can't ROC right now */
5583 if (WARN_ON(wl->roc_vif ||
5584 find_first_bit(wl->roc_map,
5585 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5586 ret = -EBUSY;
5587 goto out;
5588 }
5589
5590 ret = wl1271_ps_elp_wakeup(wl);
5591 if (ret < 0)
5592 goto out;
5593
5594 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5595 if (ret < 0)
5596 goto out_sleep;
5597
5598 wl->roc_vif = vif;
5599 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5600 msecs_to_jiffies(duration));
5601 out_sleep:
5602 wl1271_ps_elp_sleep(wl);
5603 out:
5604 mutex_unlock(&wl->mutex);
5605 return ret;
5606 }
5607
__wlcore_roc_completed(struct wl1271 * wl)5608 static int __wlcore_roc_completed(struct wl1271 *wl)
5609 {
5610 struct wl12xx_vif *wlvif;
5611 int ret;
5612
5613 /* already completed */
5614 if (unlikely(!wl->roc_vif))
5615 return 0;
5616
5617 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5618
5619 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5620 return -EBUSY;
5621
5622 ret = wl12xx_stop_dev(wl, wlvif);
5623 if (ret < 0)
5624 return ret;
5625
5626 wl->roc_vif = NULL;
5627
5628 return 0;
5629 }
5630
wlcore_roc_completed(struct wl1271 * wl)5631 static int wlcore_roc_completed(struct wl1271 *wl)
5632 {
5633 int ret;
5634
5635 wl1271_debug(DEBUG_MAC80211, "roc complete");
5636
5637 mutex_lock(&wl->mutex);
5638
5639 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5640 ret = -EBUSY;
5641 goto out;
5642 }
5643
5644 ret = wl1271_ps_elp_wakeup(wl);
5645 if (ret < 0)
5646 goto out;
5647
5648 ret = __wlcore_roc_completed(wl);
5649
5650 wl1271_ps_elp_sleep(wl);
5651 out:
5652 mutex_unlock(&wl->mutex);
5653
5654 return ret;
5655 }
5656
wlcore_roc_complete_work(struct work_struct * work)5657 static void wlcore_roc_complete_work(struct work_struct *work)
5658 {
5659 struct delayed_work *dwork;
5660 struct wl1271 *wl;
5661 int ret;
5662
5663 dwork = container_of(work, struct delayed_work, work);
5664 wl = container_of(dwork, struct wl1271, roc_complete_work);
5665
5666 ret = wlcore_roc_completed(wl);
5667 if (!ret)
5668 ieee80211_remain_on_channel_expired(wl->hw);
5669 }
5670
wlcore_op_cancel_remain_on_channel(struct ieee80211_hw * hw)5671 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5672 {
5673 struct wl1271 *wl = hw->priv;
5674
5675 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5676
5677 /* TODO: per-vif */
5678 wl1271_tx_flush(wl);
5679
5680 /*
5681 * we can't just flush_work here, because it might deadlock
5682 * (as we might get called from the same workqueue)
5683 */
5684 cancel_delayed_work_sync(&wl->roc_complete_work);
5685 wlcore_roc_completed(wl);
5686
5687 return 0;
5688 }
5689
wlcore_op_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)5690 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5691 struct ieee80211_vif *vif,
5692 struct ieee80211_sta *sta,
5693 u32 changed)
5694 {
5695 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5696
5697 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5698
5699 if (!(changed & IEEE80211_RC_BW_CHANGED))
5700 return;
5701
5702 /* this callback is atomic, so schedule a new work */
5703 wlvif->rc_update_bw = sta->bandwidth;
5704 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5705 }
5706
wlcore_op_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)5707 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5708 struct ieee80211_vif *vif,
5709 struct ieee80211_sta *sta,
5710 struct station_info *sinfo)
5711 {
5712 struct wl1271 *wl = hw->priv;
5713 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5714 s8 rssi_dbm;
5715 int ret;
5716
5717 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5718
5719 mutex_lock(&wl->mutex);
5720
5721 if (unlikely(wl->state != WLCORE_STATE_ON))
5722 goto out;
5723
5724 ret = wl1271_ps_elp_wakeup(wl);
5725 if (ret < 0)
5726 goto out_sleep;
5727
5728 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5729 if (ret < 0)
5730 goto out_sleep;
5731
5732 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5733 sinfo->signal = rssi_dbm;
5734
5735 out_sleep:
5736 wl1271_ps_elp_sleep(wl);
5737
5738 out:
5739 mutex_unlock(&wl->mutex);
5740 }
5741
wl1271_tx_frames_pending(struct ieee80211_hw * hw)5742 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5743 {
5744 struct wl1271 *wl = hw->priv;
5745 bool ret = false;
5746
5747 mutex_lock(&wl->mutex);
5748
5749 if (unlikely(wl->state != WLCORE_STATE_ON))
5750 goto out;
5751
5752 /* packets are considered pending if in the TX queue or the FW */
5753 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5754 out:
5755 mutex_unlock(&wl->mutex);
5756
5757 return ret;
5758 }
5759
5760 /* can't be const, mac80211 writes to this */
5761 static struct ieee80211_rate wl1271_rates[] = {
5762 { .bitrate = 10,
5763 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5764 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5765 { .bitrate = 20,
5766 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5767 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5768 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5769 { .bitrate = 55,
5770 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5771 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5772 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5773 { .bitrate = 110,
5774 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5775 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5776 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5777 { .bitrate = 60,
5778 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5779 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5780 { .bitrate = 90,
5781 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5782 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5783 { .bitrate = 120,
5784 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5785 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5786 { .bitrate = 180,
5787 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5788 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5789 { .bitrate = 240,
5790 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5791 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5792 { .bitrate = 360,
5793 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5794 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5795 { .bitrate = 480,
5796 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5797 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5798 { .bitrate = 540,
5799 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5800 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5801 };
5802
5803 /* can't be const, mac80211 writes to this */
5804 static struct ieee80211_channel wl1271_channels[] = {
5805 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5806 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5807 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5808 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5809 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5810 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5811 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5812 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5813 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5814 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5815 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5816 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5817 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5818 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5819 };
5820
5821 /* can't be const, mac80211 writes to this */
5822 static struct ieee80211_supported_band wl1271_band_2ghz = {
5823 .channels = wl1271_channels,
5824 .n_channels = ARRAY_SIZE(wl1271_channels),
5825 .bitrates = wl1271_rates,
5826 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5827 };
5828
5829 /* 5 GHz data rates for WL1273 */
5830 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5831 { .bitrate = 60,
5832 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5833 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5834 { .bitrate = 90,
5835 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5836 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5837 { .bitrate = 120,
5838 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5839 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5840 { .bitrate = 180,
5841 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5842 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5843 { .bitrate = 240,
5844 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5845 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5846 { .bitrate = 360,
5847 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5848 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5849 { .bitrate = 480,
5850 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5851 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5852 { .bitrate = 540,
5853 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5854 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5855 };
5856
5857 /* 5 GHz band channels for WL1273 */
5858 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5859 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5860 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5861 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5862 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5863 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5864 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5865 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5866 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5867 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5868 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5869 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5870 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5871 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5872 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5873 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5874 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5875 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5876 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5877 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5878 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5879 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5880 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5881 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5882 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5883 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5884 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5885 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5886 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5887 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5888 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5889 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5890 };
5891
5892 static struct ieee80211_supported_band wl1271_band_5ghz = {
5893 .channels = wl1271_channels_5ghz,
5894 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5895 .bitrates = wl1271_rates_5ghz,
5896 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5897 };
5898
5899 static const struct ieee80211_ops wl1271_ops = {
5900 .start = wl1271_op_start,
5901 .stop = wlcore_op_stop,
5902 .add_interface = wl1271_op_add_interface,
5903 .remove_interface = wl1271_op_remove_interface,
5904 .change_interface = wl12xx_op_change_interface,
5905 #ifdef CONFIG_PM
5906 .suspend = wl1271_op_suspend,
5907 .resume = wl1271_op_resume,
5908 #endif
5909 .config = wl1271_op_config,
5910 .prepare_multicast = wl1271_op_prepare_multicast,
5911 .configure_filter = wl1271_op_configure_filter,
5912 .tx = wl1271_op_tx,
5913 .set_key = wlcore_op_set_key,
5914 .hw_scan = wl1271_op_hw_scan,
5915 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5916 .sched_scan_start = wl1271_op_sched_scan_start,
5917 .sched_scan_stop = wl1271_op_sched_scan_stop,
5918 .bss_info_changed = wl1271_op_bss_info_changed,
5919 .set_frag_threshold = wl1271_op_set_frag_threshold,
5920 .set_rts_threshold = wl1271_op_set_rts_threshold,
5921 .conf_tx = wl1271_op_conf_tx,
5922 .get_tsf = wl1271_op_get_tsf,
5923 .get_survey = wl1271_op_get_survey,
5924 .sta_state = wl12xx_op_sta_state,
5925 .ampdu_action = wl1271_op_ampdu_action,
5926 .tx_frames_pending = wl1271_tx_frames_pending,
5927 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5928 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5929 .channel_switch = wl12xx_op_channel_switch,
5930 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
5931 .flush = wlcore_op_flush,
5932 .remain_on_channel = wlcore_op_remain_on_channel,
5933 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5934 .add_chanctx = wlcore_op_add_chanctx,
5935 .remove_chanctx = wlcore_op_remove_chanctx,
5936 .change_chanctx = wlcore_op_change_chanctx,
5937 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5938 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5939 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5940 .sta_rc_update = wlcore_op_sta_rc_update,
5941 .sta_statistics = wlcore_op_sta_statistics,
5942 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5943 };
5944
5945
wlcore_rate_to_idx(struct wl1271 * wl,u8 rate,enum ieee80211_band band)5946 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5947 {
5948 u8 idx;
5949
5950 BUG_ON(band >= 2);
5951
5952 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5953 wl1271_error("Illegal RX rate from HW: %d", rate);
5954 return 0;
5955 }
5956
5957 idx = wl->band_rate_to_idx[band][rate];
5958 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5959 wl1271_error("Unsupported RX rate from HW: %d", rate);
5960 return 0;
5961 }
5962
5963 return idx;
5964 }
5965
wl12xx_derive_mac_addresses(struct wl1271 * wl,u32 oui,u32 nic)5966 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5967 {
5968 int i;
5969
5970 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5971 oui, nic);
5972
5973 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5974 wl1271_warning("NIC part of the MAC address wraps around!");
5975
5976 for (i = 0; i < wl->num_mac_addr; i++) {
5977 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5978 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5979 wl->addresses[i].addr[2] = (u8) oui;
5980 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5981 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5982 wl->addresses[i].addr[5] = (u8) nic;
5983 nic++;
5984 }
5985
5986 /* we may be one address short at the most */
5987 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5988
5989 /*
5990 * turn on the LAA bit in the first address and use it as
5991 * the last address.
5992 */
5993 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5994 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5995 memcpy(&wl->addresses[idx], &wl->addresses[0],
5996 sizeof(wl->addresses[0]));
5997 /* LAA bit */
5998 wl->addresses[idx].addr[0] |= BIT(1);
5999 }
6000
6001 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6002 wl->hw->wiphy->addresses = wl->addresses;
6003 }
6004
wl12xx_get_hw_info(struct wl1271 * wl)6005 static int wl12xx_get_hw_info(struct wl1271 *wl)
6006 {
6007 int ret;
6008
6009 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6010 if (ret < 0)
6011 goto out;
6012
6013 wl->fuse_oui_addr = 0;
6014 wl->fuse_nic_addr = 0;
6015
6016 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6017 if (ret < 0)
6018 goto out;
6019
6020 if (wl->ops->get_mac)
6021 ret = wl->ops->get_mac(wl);
6022
6023 out:
6024 return ret;
6025 }
6026
wl1271_register_hw(struct wl1271 * wl)6027 static int wl1271_register_hw(struct wl1271 *wl)
6028 {
6029 int ret;
6030 u32 oui_addr = 0, nic_addr = 0;
6031
6032 if (wl->mac80211_registered)
6033 return 0;
6034
6035 if (wl->nvs_len >= 12) {
6036 /* NOTE: The wl->nvs->nvs element must be first, in
6037 * order to simplify the casting, we assume it is at
6038 * the beginning of the wl->nvs structure.
6039 */
6040 u8 *nvs_ptr = (u8 *)wl->nvs;
6041
6042 oui_addr =
6043 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6044 nic_addr =
6045 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6046 }
6047
6048 /* if the MAC address is zeroed in the NVS derive from fuse */
6049 if (oui_addr == 0 && nic_addr == 0) {
6050 oui_addr = wl->fuse_oui_addr;
6051 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6052 nic_addr = wl->fuse_nic_addr + 1;
6053 }
6054
6055 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6056
6057 ret = ieee80211_register_hw(wl->hw);
6058 if (ret < 0) {
6059 wl1271_error("unable to register mac80211 hw: %d", ret);
6060 goto out;
6061 }
6062
6063 wl->mac80211_registered = true;
6064
6065 wl1271_debugfs_init(wl);
6066
6067 wl1271_notice("loaded");
6068
6069 out:
6070 return ret;
6071 }
6072
wl1271_unregister_hw(struct wl1271 * wl)6073 static void wl1271_unregister_hw(struct wl1271 *wl)
6074 {
6075 if (wl->plt)
6076 wl1271_plt_stop(wl);
6077
6078 ieee80211_unregister_hw(wl->hw);
6079 wl->mac80211_registered = false;
6080
6081 }
6082
wl1271_init_ieee80211(struct wl1271 * wl)6083 static int wl1271_init_ieee80211(struct wl1271 *wl)
6084 {
6085 int i;
6086 static const u32 cipher_suites[] = {
6087 WLAN_CIPHER_SUITE_WEP40,
6088 WLAN_CIPHER_SUITE_WEP104,
6089 WLAN_CIPHER_SUITE_TKIP,
6090 WLAN_CIPHER_SUITE_CCMP,
6091 WL1271_CIPHER_SUITE_GEM,
6092 };
6093
6094 /* The tx descriptor buffer */
6095 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6096
6097 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6098 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6099
6100 /* unit us */
6101 /* FIXME: find a proper value */
6102 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6103
6104 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6105 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6106 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6107 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6108 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6109 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6110 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6111 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6112 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6113 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6114 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6115 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6116 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6117
6118 wl->hw->wiphy->cipher_suites = cipher_suites;
6119 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6120
6121 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6122 BIT(NL80211_IFTYPE_AP) |
6123 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6124 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6125 BIT(NL80211_IFTYPE_P2P_GO);
6126 wl->hw->wiphy->max_scan_ssids = 1;
6127 wl->hw->wiphy->max_sched_scan_ssids = 16;
6128 wl->hw->wiphy->max_match_sets = 16;
6129 /*
6130 * Maximum length of elements in scanning probe request templates
6131 * should be the maximum length possible for a template, without
6132 * the IEEE80211 header of the template
6133 */
6134 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6135 sizeof(struct ieee80211_header);
6136
6137 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6138 sizeof(struct ieee80211_header);
6139
6140 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6141
6142 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6143 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6144 WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6145 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6146
6147 /* make sure all our channels fit in the scanned_ch bitmask */
6148 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6149 ARRAY_SIZE(wl1271_channels_5ghz) >
6150 WL1271_MAX_CHANNELS);
6151 /*
6152 * clear channel flags from the previous usage
6153 * and restore max_power & max_antenna_gain values.
6154 */
6155 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6156 wl1271_band_2ghz.channels[i].flags = 0;
6157 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6158 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6159 }
6160
6161 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6162 wl1271_band_5ghz.channels[i].flags = 0;
6163 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6164 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6165 }
6166
6167 /*
6168 * We keep local copies of the band structs because we need to
6169 * modify them on a per-device basis.
6170 */
6171 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
6172 sizeof(wl1271_band_2ghz));
6173 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
6174 &wl->ht_cap[IEEE80211_BAND_2GHZ],
6175 sizeof(*wl->ht_cap));
6176 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
6177 sizeof(wl1271_band_5ghz));
6178 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
6179 &wl->ht_cap[IEEE80211_BAND_5GHZ],
6180 sizeof(*wl->ht_cap));
6181
6182 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
6183 &wl->bands[IEEE80211_BAND_2GHZ];
6184 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
6185 &wl->bands[IEEE80211_BAND_5GHZ];
6186
6187 /*
6188 * allow 4 queues per mac address we support +
6189 * 1 cab queue per mac + one global offchannel Tx queue
6190 */
6191 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6192
6193 /* the last queue is the offchannel queue */
6194 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6195 wl->hw->max_rates = 1;
6196
6197 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6198
6199 /* the FW answers probe-requests in AP-mode */
6200 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6201 wl->hw->wiphy->probe_resp_offload =
6202 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6203 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6204 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6205
6206 /* allowed interface combinations */
6207 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6208 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6209
6210 /* register vendor commands */
6211 wlcore_set_vendor_commands(wl->hw->wiphy);
6212
6213 SET_IEEE80211_DEV(wl->hw, wl->dev);
6214
6215 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6216 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6217
6218 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6219
6220 return 0;
6221 }
6222
wlcore_alloc_hw(size_t priv_size,u32 aggr_buf_size,u32 mbox_size)6223 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6224 u32 mbox_size)
6225 {
6226 struct ieee80211_hw *hw;
6227 struct wl1271 *wl;
6228 int i, j, ret;
6229 unsigned int order;
6230
6231 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6232 if (!hw) {
6233 wl1271_error("could not alloc ieee80211_hw");
6234 ret = -ENOMEM;
6235 goto err_hw_alloc;
6236 }
6237
6238 wl = hw->priv;
6239 memset(wl, 0, sizeof(*wl));
6240
6241 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6242 if (!wl->priv) {
6243 wl1271_error("could not alloc wl priv");
6244 ret = -ENOMEM;
6245 goto err_priv_alloc;
6246 }
6247
6248 INIT_LIST_HEAD(&wl->wlvif_list);
6249
6250 wl->hw = hw;
6251
6252 /*
6253 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6254 * we don't allocate any additional resource here, so that's fine.
6255 */
6256 for (i = 0; i < NUM_TX_QUEUES; i++)
6257 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6258 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6259
6260 skb_queue_head_init(&wl->deferred_rx_queue);
6261 skb_queue_head_init(&wl->deferred_tx_queue);
6262
6263 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6264 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6265 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6266 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6267 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6268 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6269 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6270
6271 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6272 if (!wl->freezable_wq) {
6273 ret = -ENOMEM;
6274 goto err_hw;
6275 }
6276
6277 wl->channel = 0;
6278 wl->rx_counter = 0;
6279 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6280 wl->band = IEEE80211_BAND_2GHZ;
6281 wl->channel_type = NL80211_CHAN_NO_HT;
6282 wl->flags = 0;
6283 wl->sg_enabled = true;
6284 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6285 wl->recovery_count = 0;
6286 wl->hw_pg_ver = -1;
6287 wl->ap_ps_map = 0;
6288 wl->ap_fw_ps_map = 0;
6289 wl->quirks = 0;
6290 wl->system_hlid = WL12XX_SYSTEM_HLID;
6291 wl->active_sta_count = 0;
6292 wl->active_link_count = 0;
6293 wl->fwlog_size = 0;
6294 init_waitqueue_head(&wl->fwlog_waitq);
6295
6296 /* The system link is always allocated */
6297 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6298
6299 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6300 for (i = 0; i < wl->num_tx_desc; i++)
6301 wl->tx_frames[i] = NULL;
6302
6303 spin_lock_init(&wl->wl_lock);
6304
6305 wl->state = WLCORE_STATE_OFF;
6306 wl->fw_type = WL12XX_FW_TYPE_NONE;
6307 mutex_init(&wl->mutex);
6308 mutex_init(&wl->flush_mutex);
6309 init_completion(&wl->nvs_loading_complete);
6310
6311 order = get_order(aggr_buf_size);
6312 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6313 if (!wl->aggr_buf) {
6314 ret = -ENOMEM;
6315 goto err_wq;
6316 }
6317 wl->aggr_buf_size = aggr_buf_size;
6318
6319 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6320 if (!wl->dummy_packet) {
6321 ret = -ENOMEM;
6322 goto err_aggr;
6323 }
6324
6325 /* Allocate one page for the FW log */
6326 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6327 if (!wl->fwlog) {
6328 ret = -ENOMEM;
6329 goto err_dummy_packet;
6330 }
6331
6332 wl->mbox_size = mbox_size;
6333 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6334 if (!wl->mbox) {
6335 ret = -ENOMEM;
6336 goto err_fwlog;
6337 }
6338
6339 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6340 if (!wl->buffer_32) {
6341 ret = -ENOMEM;
6342 goto err_mbox;
6343 }
6344
6345 return hw;
6346
6347 err_mbox:
6348 kfree(wl->mbox);
6349
6350 err_fwlog:
6351 free_page((unsigned long)wl->fwlog);
6352
6353 err_dummy_packet:
6354 dev_kfree_skb(wl->dummy_packet);
6355
6356 err_aggr:
6357 free_pages((unsigned long)wl->aggr_buf, order);
6358
6359 err_wq:
6360 destroy_workqueue(wl->freezable_wq);
6361
6362 err_hw:
6363 wl1271_debugfs_exit(wl);
6364 kfree(wl->priv);
6365
6366 err_priv_alloc:
6367 ieee80211_free_hw(hw);
6368
6369 err_hw_alloc:
6370
6371 return ERR_PTR(ret);
6372 }
6373 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6374
wlcore_free_hw(struct wl1271 * wl)6375 int wlcore_free_hw(struct wl1271 *wl)
6376 {
6377 /* Unblock any fwlog readers */
6378 mutex_lock(&wl->mutex);
6379 wl->fwlog_size = -1;
6380 wake_up_interruptible_all(&wl->fwlog_waitq);
6381 mutex_unlock(&wl->mutex);
6382
6383 wlcore_sysfs_free(wl);
6384
6385 kfree(wl->buffer_32);
6386 kfree(wl->mbox);
6387 free_page((unsigned long)wl->fwlog);
6388 dev_kfree_skb(wl->dummy_packet);
6389 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6390
6391 wl1271_debugfs_exit(wl);
6392
6393 vfree(wl->fw);
6394 wl->fw = NULL;
6395 wl->fw_type = WL12XX_FW_TYPE_NONE;
6396 kfree(wl->nvs);
6397 wl->nvs = NULL;
6398
6399 kfree(wl->raw_fw_status);
6400 kfree(wl->fw_status);
6401 kfree(wl->tx_res_if);
6402 destroy_workqueue(wl->freezable_wq);
6403
6404 kfree(wl->priv);
6405 ieee80211_free_hw(wl->hw);
6406
6407 return 0;
6408 }
6409 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6410
6411 #ifdef CONFIG_PM
6412 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6413 .flags = WIPHY_WOWLAN_ANY,
6414 .n_patterns = WL1271_MAX_RX_FILTERS,
6415 .pattern_min_len = 1,
6416 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6417 };
6418 #endif
6419
wlcore_hardirq(int irq,void * cookie)6420 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6421 {
6422 return IRQ_WAKE_THREAD;
6423 }
6424
wlcore_nvs_cb(const struct firmware * fw,void * context)6425 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6426 {
6427 struct wl1271 *wl = context;
6428 struct platform_device *pdev = wl->pdev;
6429 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6430 struct resource *res;
6431
6432 int ret;
6433 irq_handler_t hardirq_fn = NULL;
6434
6435 if (fw) {
6436 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6437 if (!wl->nvs) {
6438 wl1271_error("Could not allocate nvs data");
6439 goto out;
6440 }
6441 wl->nvs_len = fw->size;
6442 } else {
6443 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6444 WL12XX_NVS_NAME);
6445 wl->nvs = NULL;
6446 wl->nvs_len = 0;
6447 }
6448
6449 ret = wl->ops->setup(wl);
6450 if (ret < 0)
6451 goto out_free_nvs;
6452
6453 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6454
6455 /* adjust some runtime configuration parameters */
6456 wlcore_adjust_conf(wl);
6457
6458 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6459 if (!res) {
6460 wl1271_error("Could not get IRQ resource");
6461 goto out_free_nvs;
6462 }
6463
6464 wl->irq = res->start;
6465 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6466 wl->if_ops = pdev_data->if_ops;
6467
6468 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6469 hardirq_fn = wlcore_hardirq;
6470 else
6471 wl->irq_flags |= IRQF_ONESHOT;
6472
6473 ret = wl12xx_set_power_on(wl);
6474 if (ret < 0)
6475 goto out_free_nvs;
6476
6477 ret = wl12xx_get_hw_info(wl);
6478 if (ret < 0) {
6479 wl1271_error("couldn't get hw info");
6480 wl1271_power_off(wl);
6481 goto out_free_nvs;
6482 }
6483
6484 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6485 wl->irq_flags, pdev->name, wl);
6486 if (ret < 0) {
6487 wl1271_error("interrupt configuration failed");
6488 wl1271_power_off(wl);
6489 goto out_free_nvs;
6490 }
6491
6492 #ifdef CONFIG_PM
6493 ret = enable_irq_wake(wl->irq);
6494 if (!ret) {
6495 wl->irq_wake_enabled = true;
6496 device_init_wakeup(wl->dev, 1);
6497 if (pdev_data->pwr_in_suspend)
6498 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6499 }
6500 #endif
6501 disable_irq(wl->irq);
6502 wl1271_power_off(wl);
6503
6504 ret = wl->ops->identify_chip(wl);
6505 if (ret < 0)
6506 goto out_irq;
6507
6508 ret = wl1271_init_ieee80211(wl);
6509 if (ret)
6510 goto out_irq;
6511
6512 ret = wl1271_register_hw(wl);
6513 if (ret)
6514 goto out_irq;
6515
6516 ret = wlcore_sysfs_init(wl);
6517 if (ret)
6518 goto out_unreg;
6519
6520 wl->initialized = true;
6521 goto out;
6522
6523 out_unreg:
6524 wl1271_unregister_hw(wl);
6525
6526 out_irq:
6527 free_irq(wl->irq, wl);
6528
6529 out_free_nvs:
6530 kfree(wl->nvs);
6531
6532 out:
6533 release_firmware(fw);
6534 complete_all(&wl->nvs_loading_complete);
6535 }
6536
wlcore_probe(struct wl1271 * wl,struct platform_device * pdev)6537 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6538 {
6539 int ret;
6540
6541 if (!wl->ops || !wl->ptable)
6542 return -EINVAL;
6543
6544 wl->dev = &pdev->dev;
6545 wl->pdev = pdev;
6546 platform_set_drvdata(pdev, wl);
6547
6548 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6549 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6550 wl, wlcore_nvs_cb);
6551 if (ret < 0) {
6552 wl1271_error("request_firmware_nowait failed: %d", ret);
6553 complete_all(&wl->nvs_loading_complete);
6554 }
6555
6556 return ret;
6557 }
6558 EXPORT_SYMBOL_GPL(wlcore_probe);
6559
wlcore_remove(struct platform_device * pdev)6560 int wlcore_remove(struct platform_device *pdev)
6561 {
6562 struct wl1271 *wl = platform_get_drvdata(pdev);
6563
6564 wait_for_completion(&wl->nvs_loading_complete);
6565 if (!wl->initialized)
6566 return 0;
6567
6568 if (wl->irq_wake_enabled) {
6569 device_init_wakeup(wl->dev, 0);
6570 disable_irq_wake(wl->irq);
6571 }
6572 wl1271_unregister_hw(wl);
6573 free_irq(wl->irq, wl);
6574 wlcore_free_hw(wl);
6575
6576 return 0;
6577 }
6578 EXPORT_SYMBOL_GPL(wlcore_remove);
6579
6580 u32 wl12xx_debug_level = DEBUG_NONE;
6581 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6582 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6583 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6584
6585 module_param_named(fwlog, fwlog_param, charp, 0);
6586 MODULE_PARM_DESC(fwlog,
6587 "FW logger options: continuous, ondemand, dbgpins or disable");
6588
6589 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6590 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6591
6592 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6593 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6594
6595 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6596 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6597
6598 MODULE_LICENSE("GPL");
6599 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6600 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6601 MODULE_FIRMWARE(WL12XX_NVS_NAME);
6602