This source file includes following definitions.
- iwl_mvm_add_sta_cmd_size
- iwl_mvm_find_free_sta_id
- iwl_mvm_sta_send_to_fw
- iwl_mvm_rx_agg_session_expired
- iwl_mvm_invalidate_sta_queue
- iwl_mvm_disable_txq
- iwl_mvm_get_queue_agg_tids
- iwl_mvm_remove_sta_queue_marking
- iwl_mvm_free_inactive_queue
- iwl_mvm_get_shared_queue
- iwl_mvm_redirect_queue
- iwl_mvm_find_free_queue
- iwl_mvm_tvqm_enable_txq
- iwl_mvm_sta_alloc_queue_tvqm
- iwl_mvm_update_txq_mapping
- iwl_mvm_enable_txq
- iwl_mvm_change_queue_tid
- iwl_mvm_unshare_queue
- iwl_mvm_remove_inactive_tids
- iwl_mvm_inactivity_check
- iwl_mvm_sta_alloc_queue
- iwl_mvm_tid_to_ac_queue
- iwl_mvm_add_new_dqa_stream_wk
- iwl_mvm_reserve_sta_stream
- iwl_mvm_realloc_queues_after_restart
- iwl_mvm_add_int_sta_common
- iwl_mvm_add_sta
- iwl_mvm_drain_sta
- iwl_mvm_rm_sta_common
- iwl_mvm_disable_sta_queues
- iwl_mvm_wait_sta_queues_empty
- iwl_mvm_rm_sta
- iwl_mvm_rm_sta_id
- iwl_mvm_allocate_int_sta
- iwl_mvm_dealloc_int_sta
- iwl_mvm_enable_aux_snif_queue
- iwl_mvm_enable_aux_snif_queue_tvqm
- iwl_mvm_add_int_sta_with_queue
- iwl_mvm_add_aux_sta
- iwl_mvm_add_snif_sta
- iwl_mvm_rm_snif_sta
- iwl_mvm_dealloc_snif_sta
- iwl_mvm_del_aux_sta
- iwl_mvm_send_add_bcast_sta
- iwl_mvm_free_bcast_sta_queues
- iwl_mvm_send_rm_bcast_sta
- iwl_mvm_alloc_bcast_sta
- iwl_mvm_add_p2p_bcast_sta
- iwl_mvm_dealloc_bcast_sta
- iwl_mvm_rm_p2p_bcast_sta
- iwl_mvm_add_mcast_sta
- __iwl_mvm_remove_sta_key
- iwl_mvm_rm_mcast_sta
- iwl_mvm_sync_rxq_del_ba
- iwl_mvm_free_reorder
- iwl_mvm_init_reorder_buffer
- iwl_mvm_sta_rx_agg
- iwl_mvm_sta_tx_agg
- iwl_mvm_sta_tx_agg_start
- iwl_mvm_sta_tx_agg_oper
- iwl_mvm_unreserve_agg_queue
- iwl_mvm_sta_tx_agg_stop
- iwl_mvm_sta_tx_agg_flush
- iwl_mvm_set_fw_key_idx
- iwl_mvm_get_key_sta
- iwl_mvm_send_sta_key
- iwl_mvm_send_sta_igtk
- iwl_mvm_get_mac_addr
- __iwl_mvm_set_sta_key
- iwl_mvm_set_sta_key
- iwl_mvm_remove_sta_key
- iwl_mvm_update_tkip_key
- iwl_mvm_sta_modify_ps_wake
- iwl_mvm_sta_modify_sleep_tx_count
- iwl_mvm_rx_eosp_notif
- iwl_mvm_sta_modify_disable_tx
- iwl_mvm_sta_modify_disable_tx_ap
- iwl_mvm_int_sta_modify_disable_tx
- iwl_mvm_modify_all_sta_disable_tx
- iwl_mvm_csa_client_absent
- iwl_mvm_tid_queued
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64 #include <net/mac80211.h>
65
66 #include "mvm.h"
67 #include "sta.h"
68 #include "rs.h"
69
70
71
72
73
74
75 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
76 {
77 if (iwl_mvm_has_new_rx_api(mvm) ||
78 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
79 return sizeof(struct iwl_mvm_add_sta_cmd);
80 else
81 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
82 }
83
84 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
85 enum nl80211_iftype iftype)
86 {
87 int sta_id;
88 u32 reserved_ids = 0;
89
90 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
91 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
92
93 lockdep_assert_held(&mvm->mutex);
94
95
96 if (iftype != NL80211_IFTYPE_STATION)
97 reserved_ids = BIT(0);
98
99
100 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
101 if (BIT(sta_id) & reserved_ids)
102 continue;
103
104 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
105 lockdep_is_held(&mvm->mutex)))
106 return sta_id;
107 }
108 return IWL_MVM_INVALID_STA;
109 }
110
111
112 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
113 bool update, unsigned int flags)
114 {
115 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
116 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
117 .sta_id = mvm_sta->sta_id,
118 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
119 .add_modify = update ? 1 : 0,
120 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
121 STA_FLG_MIMO_EN_MSK |
122 STA_FLG_RTS_MIMO_PROT),
123 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
124 };
125 int ret;
126 u32 status;
127 u32 agg_size = 0, mpdu_dens = 0;
128
129 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
130 add_sta_cmd.station_type = mvm_sta->sta_type;
131
132 if (!update || (flags & STA_MODIFY_QUEUES)) {
133 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
134
135 if (!iwl_mvm_has_new_tx_api(mvm)) {
136 add_sta_cmd.tfd_queue_msk =
137 cpu_to_le32(mvm_sta->tfd_queue_msk);
138
139 if (flags & STA_MODIFY_QUEUES)
140 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
141 } else {
142 WARN_ON(flags & STA_MODIFY_QUEUES);
143 }
144 }
145
146 switch (sta->bandwidth) {
147 case IEEE80211_STA_RX_BW_160:
148 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
149
150 case IEEE80211_STA_RX_BW_80:
151 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
152
153 case IEEE80211_STA_RX_BW_40:
154 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
155
156 case IEEE80211_STA_RX_BW_20:
157 if (sta->ht_cap.ht_supported)
158 add_sta_cmd.station_flags |=
159 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
160 break;
161 }
162
163 switch (sta->rx_nss) {
164 case 1:
165 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
166 break;
167 case 2:
168 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
169 break;
170 case 3 ... 8:
171 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
172 break;
173 }
174
175 switch (sta->smps_mode) {
176 case IEEE80211_SMPS_AUTOMATIC:
177 case IEEE80211_SMPS_NUM_MODES:
178 WARN_ON(1);
179 break;
180 case IEEE80211_SMPS_STATIC:
181
182 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
183 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
184 break;
185 case IEEE80211_SMPS_DYNAMIC:
186 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
187 break;
188 case IEEE80211_SMPS_OFF:
189
190 break;
191 }
192
193 if (sta->ht_cap.ht_supported) {
194 add_sta_cmd.station_flags_msk |=
195 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
196 STA_FLG_AGG_MPDU_DENS_MSK);
197
198 mpdu_dens = sta->ht_cap.ampdu_density;
199 }
200
201 if (sta->vht_cap.vht_supported) {
202 agg_size = sta->vht_cap.cap &
203 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
204 agg_size >>=
205 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
206 } else if (sta->ht_cap.ht_supported) {
207 agg_size = sta->ht_cap.ampdu_factor;
208 }
209
210 add_sta_cmd.station_flags |=
211 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
212 add_sta_cmd.station_flags |=
213 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
214 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
215 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
216
217 if (sta->wme) {
218 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
219
220 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
221 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
222 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
223 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
224 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
225 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
226 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
227 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
228 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
229 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
230 }
231
232 status = ADD_STA_SUCCESS;
233 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
234 iwl_mvm_add_sta_cmd_size(mvm),
235 &add_sta_cmd, &status);
236 if (ret)
237 return ret;
238
239 switch (status & IWL_ADD_STA_STATUS_MASK) {
240 case ADD_STA_SUCCESS:
241 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
242 break;
243 default:
244 ret = -EIO;
245 IWL_ERR(mvm, "ADD_STA failed\n");
246 break;
247 }
248
249 return ret;
250 }
251
252 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
253 {
254 struct iwl_mvm_baid_data *data =
255 from_timer(data, t, session_timer);
256 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
257 struct iwl_mvm_baid_data *ba_data;
258 struct ieee80211_sta *sta;
259 struct iwl_mvm_sta *mvm_sta;
260 unsigned long timeout;
261
262 rcu_read_lock();
263
264 ba_data = rcu_dereference(*rcu_ptr);
265
266 if (WARN_ON(!ba_data))
267 goto unlock;
268
269 if (!ba_data->timeout)
270 goto unlock;
271
272 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
273 if (time_is_after_jiffies(timeout)) {
274 mod_timer(&ba_data->session_timer, timeout);
275 goto unlock;
276 }
277
278
279 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
280
281
282
283
284
285
286
287
288
289 if (!sta)
290 goto unlock;
291
292 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
293 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
294 sta->addr, ba_data->tid);
295 unlock:
296 rcu_read_unlock();
297 }
298
299
300 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
301 unsigned long disable_agg_tids,
302 bool remove_queue)
303 {
304 struct iwl_mvm_add_sta_cmd cmd = {};
305 struct ieee80211_sta *sta;
306 struct iwl_mvm_sta *mvmsta;
307 u32 status;
308 u8 sta_id;
309
310 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
311 return -EINVAL;
312
313 sta_id = mvm->queue_info[queue].ra_sta_id;
314
315 rcu_read_lock();
316
317 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
318
319 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
320 rcu_read_unlock();
321 return -EINVAL;
322 }
323
324 mvmsta = iwl_mvm_sta_from_mac80211(sta);
325
326 mvmsta->tid_disable_agg |= disable_agg_tids;
327
328 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
329 cmd.sta_id = mvmsta->sta_id;
330 cmd.add_modify = STA_MODE_MODIFY;
331 cmd.modify_mask = STA_MODIFY_QUEUES;
332 if (disable_agg_tids)
333 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
334 if (remove_queue)
335 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
336 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
337 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
338
339 rcu_read_unlock();
340
341
342 status = ADD_STA_SUCCESS;
343 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
344 iwl_mvm_add_sta_cmd_size(mvm),
345 &cmd, &status);
346 }
347
348 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
349 int queue, u8 tid, u8 flags)
350 {
351 struct iwl_scd_txq_cfg_cmd cmd = {
352 .scd_queue = queue,
353 .action = SCD_CFG_DISABLE_QUEUE,
354 };
355 int ret;
356
357 if (iwl_mvm_has_new_tx_api(mvm)) {
358 iwl_trans_txq_free(mvm->trans, queue);
359
360 return 0;
361 }
362
363 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
364 return 0;
365
366 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
367
368 cmd.action = mvm->queue_info[queue].tid_bitmap ?
369 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
370 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
371 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
372
373 IWL_DEBUG_TX_QUEUES(mvm,
374 "Disabling TXQ #%d tids=0x%x\n",
375 queue,
376 mvm->queue_info[queue].tid_bitmap);
377
378
379 if (cmd.action == SCD_CFG_ENABLE_QUEUE)
380 return 0;
381
382 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
383 cmd.tid = mvm->queue_info[queue].txq_tid;
384
385
386 WARN(mvm->queue_info[queue].tid_bitmap,
387 "TXQ #%d info out-of-sync - tids=0x%x\n",
388 queue, mvm->queue_info[queue].tid_bitmap);
389
390
391 mvm->queue_info[queue].tid_bitmap = 0;
392
393 if (sta) {
394 struct iwl_mvm_txq *mvmtxq =
395 iwl_mvm_txq_from_tid(sta, tid);
396
397 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
398 }
399
400
401 mvm->queue_info[queue].reserved = false;
402
403 iwl_trans_txq_disable(mvm->trans, queue, false);
404 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
405 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
406
407 if (ret)
408 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
409 queue, ret);
410 return ret;
411 }
412
413 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
414 {
415 struct ieee80211_sta *sta;
416 struct iwl_mvm_sta *mvmsta;
417 unsigned long tid_bitmap;
418 unsigned long agg_tids = 0;
419 u8 sta_id;
420 int tid;
421
422 lockdep_assert_held(&mvm->mutex);
423
424 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
425 return -EINVAL;
426
427 sta_id = mvm->queue_info[queue].ra_sta_id;
428 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
429
430 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
431 lockdep_is_held(&mvm->mutex));
432
433 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
434 return -EINVAL;
435
436 mvmsta = iwl_mvm_sta_from_mac80211(sta);
437
438 spin_lock_bh(&mvmsta->lock);
439 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
440 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
441 agg_tids |= BIT(tid);
442 }
443 spin_unlock_bh(&mvmsta->lock);
444
445 return agg_tids;
446 }
447
448
449
450
451
452
453 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
454 {
455 struct ieee80211_sta *sta;
456 struct iwl_mvm_sta *mvmsta;
457 unsigned long tid_bitmap;
458 unsigned long disable_agg_tids = 0;
459 u8 sta_id;
460 int tid;
461
462 lockdep_assert_held(&mvm->mutex);
463
464 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
465 return -EINVAL;
466
467 sta_id = mvm->queue_info[queue].ra_sta_id;
468 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
469
470 rcu_read_lock();
471
472 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
473
474 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
475 rcu_read_unlock();
476 return 0;
477 }
478
479 mvmsta = iwl_mvm_sta_from_mac80211(sta);
480
481 spin_lock_bh(&mvmsta->lock);
482
483 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
484 struct iwl_mvm_txq *mvmtxq =
485 iwl_mvm_txq_from_tid(sta, tid);
486
487 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
488 disable_agg_tids |= BIT(tid);
489 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
490
491 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
492 }
493
494 mvmsta->tfd_queue_msk &= ~BIT(queue);
495 spin_unlock_bh(&mvmsta->lock);
496
497 rcu_read_unlock();
498
499
500
501
502
503
504
505
506
507 synchronize_net();
508
509 return disable_agg_tids;
510 }
511
512 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
513 struct ieee80211_sta *old_sta,
514 u8 new_sta_id)
515 {
516 struct iwl_mvm_sta *mvmsta;
517 u8 sta_id, tid;
518 unsigned long disable_agg_tids = 0;
519 bool same_sta;
520 int ret;
521
522 lockdep_assert_held(&mvm->mutex);
523
524 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
525 return -EINVAL;
526
527 sta_id = mvm->queue_info[queue].ra_sta_id;
528 tid = mvm->queue_info[queue].txq_tid;
529
530 same_sta = sta_id == new_sta_id;
531
532 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
533 if (WARN_ON(!mvmsta))
534 return -EINVAL;
535
536 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
537
538 if (disable_agg_tids)
539 iwl_mvm_invalidate_sta_queue(mvm, queue,
540 disable_agg_tids, false);
541
542 ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
543 if (ret) {
544 IWL_ERR(mvm,
545 "Failed to free inactive queue %d (ret=%d)\n",
546 queue, ret);
547
548 return ret;
549 }
550
551
552 if (!same_sta)
553 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
554
555 return 0;
556 }
557
558 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
559 unsigned long tfd_queue_mask, u8 ac)
560 {
561 int queue = 0;
562 u8 ac_to_queue[IEEE80211_NUM_ACS];
563 int i;
564
565
566
567
568
569 lockdep_assert_held(&mvm->mutex);
570
571 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
572 return -EINVAL;
573
574 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
575
576
577 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
578
579 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
580 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
581 continue;
582
583 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
584 }
585
586
587
588
589
590
591
592
593
594
595
596 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
597 queue = ac_to_queue[IEEE80211_AC_BE];
598
599 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
600 queue = ac_to_queue[ac];
601
602 else if (ac == IEEE80211_AC_VO &&
603 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
604 queue = ac_to_queue[IEEE80211_AC_VI];
605
606 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
607 queue = ac_to_queue[IEEE80211_AC_BK];
608
609 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
610 queue = ac_to_queue[IEEE80211_AC_VI];
611
612 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
613 queue = ac_to_queue[IEEE80211_AC_VO];
614
615
616 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
617 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
618 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
619 IWL_ERR(mvm, "No DATA queues available to share\n");
620 return -ENOSPC;
621 }
622
623 return queue;
624 }
625
626
627
628
629
630
631
632 static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
633 int ac, int ssn, unsigned int wdg_timeout,
634 bool force, struct iwl_mvm_txq *txq)
635 {
636 struct iwl_scd_txq_cfg_cmd cmd = {
637 .scd_queue = queue,
638 .action = SCD_CFG_DISABLE_QUEUE,
639 };
640 bool shared_queue;
641 int ret;
642
643 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
644 return -EINVAL;
645
646
647
648
649
650
651
652
653
654 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
655 IWL_DEBUG_TX_QUEUES(mvm,
656 "No redirection needed on TXQ #%d\n",
657 queue);
658 return 0;
659 }
660
661 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
662 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
663 cmd.tid = mvm->queue_info[queue].txq_tid;
664 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
665
666 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
667 queue, iwl_mvm_ac_to_tx_fifo[ac]);
668
669
670 txq->stopped = true;
671
672 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
673 if (ret) {
674 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
675 queue);
676 ret = -EIO;
677 goto out;
678 }
679
680
681 iwl_trans_txq_disable(mvm->trans, queue, false);
682 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
683 if (ret)
684 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
685 ret);
686
687
688 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
689
690
691 mvm->queue_info[queue].txq_tid = tid;
692
693
694
695
696 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
697 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
698
699
700 mvm->queue_info[queue].mac80211_ac = ac;
701
702
703
704
705
706
707
708 if (shared_queue)
709 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
710
711 out:
712
713 txq->stopped = false;
714
715 return ret;
716 }
717
718 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
719 u8 minq, u8 maxq)
720 {
721 int i;
722
723 lockdep_assert_held(&mvm->mutex);
724
725 if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
726 "max queue %d >= num_of_queues (%d)", maxq,
727 mvm->trans->trans_cfg->base_params->num_of_queues))
728 maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
729
730
731 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
732 return -ENOSPC;
733
734
735 for (i = minq; i <= maxq; i++)
736 if (mvm->queue_info[i].tid_bitmap == 0 &&
737 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
738 return i;
739
740 return -ENOSPC;
741 }
742
743 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
744 u8 sta_id, u8 tid, unsigned int timeout)
745 {
746 int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
747 mvm->trans->cfg->min_256_ba_txq_size);
748
749 if (tid == IWL_MAX_TID_COUNT) {
750 tid = IWL_MGMT_TID;
751 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
752 mvm->trans->cfg->min_txq_size);
753 }
754 queue = iwl_trans_txq_alloc(mvm->trans,
755 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
756 sta_id, tid, SCD_QUEUE_CFG, size, timeout);
757
758 if (queue < 0) {
759 IWL_DEBUG_TX_QUEUES(mvm,
760 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
761 sta_id, tid, queue);
762 return queue;
763 }
764
765 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
766 queue, sta_id, tid);
767
768 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
769
770 return queue;
771 }
772
773 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
774 struct ieee80211_sta *sta, u8 ac,
775 int tid)
776 {
777 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
778 struct iwl_mvm_txq *mvmtxq =
779 iwl_mvm_txq_from_tid(sta, tid);
780 unsigned int wdg_timeout =
781 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
782 int queue = -1;
783
784 lockdep_assert_held(&mvm->mutex);
785
786 IWL_DEBUG_TX_QUEUES(mvm,
787 "Allocating queue for sta %d on tid %d\n",
788 mvmsta->sta_id, tid);
789 queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
790 if (queue < 0)
791 return queue;
792
793 mvmtxq->txq_id = queue;
794 mvm->tvqm_info[queue].txq_tid = tid;
795 mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
796
797 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
798
799 spin_lock_bh(&mvmsta->lock);
800 mvmsta->tid_data[tid].txq_id = queue;
801 spin_unlock_bh(&mvmsta->lock);
802
803 return 0;
804 }
805
806 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
807 struct ieee80211_sta *sta,
808 int queue, u8 sta_id, u8 tid)
809 {
810 bool enable_queue = true;
811
812
813 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
814 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
815 queue, tid);
816 return false;
817 }
818
819
820 if (mvm->queue_info[queue].tid_bitmap)
821 enable_queue = false;
822
823 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
824 mvm->queue_info[queue].ra_sta_id = sta_id;
825
826 if (enable_queue) {
827 if (tid != IWL_MAX_TID_COUNT)
828 mvm->queue_info[queue].mac80211_ac =
829 tid_to_mac80211_ac[tid];
830 else
831 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
832
833 mvm->queue_info[queue].txq_tid = tid;
834 }
835
836 if (sta) {
837 struct iwl_mvm_txq *mvmtxq =
838 iwl_mvm_txq_from_tid(sta, tid);
839
840 mvmtxq->txq_id = queue;
841 }
842
843 IWL_DEBUG_TX_QUEUES(mvm,
844 "Enabling TXQ #%d tids=0x%x\n",
845 queue, mvm->queue_info[queue].tid_bitmap);
846
847 return enable_queue;
848 }
849
850 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
851 int queue, u16 ssn,
852 const struct iwl_trans_txq_scd_cfg *cfg,
853 unsigned int wdg_timeout)
854 {
855 struct iwl_scd_txq_cfg_cmd cmd = {
856 .scd_queue = queue,
857 .action = SCD_CFG_ENABLE_QUEUE,
858 .window = cfg->frame_limit,
859 .sta_id = cfg->sta_id,
860 .ssn = cpu_to_le16(ssn),
861 .tx_fifo = cfg->fifo,
862 .aggregate = cfg->aggregate,
863 .tid = cfg->tid,
864 };
865 bool inc_ssn;
866
867 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
868 return false;
869
870
871 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
872 return false;
873
874 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
875 NULL, wdg_timeout);
876 if (inc_ssn)
877 le16_add_cpu(&cmd.ssn, 1);
878
879 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
880 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
881
882 return inc_ssn;
883 }
884
885 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
886 {
887 struct iwl_scd_txq_cfg_cmd cmd = {
888 .scd_queue = queue,
889 .action = SCD_CFG_UPDATE_QUEUE_TID,
890 };
891 int tid;
892 unsigned long tid_bitmap;
893 int ret;
894
895 lockdep_assert_held(&mvm->mutex);
896
897 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
898 return;
899
900 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
901
902 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
903 return;
904
905
906 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
907 cmd.tid = tid;
908 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
909
910 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
911 if (ret) {
912 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
913 queue, ret);
914 return;
915 }
916
917 mvm->queue_info[queue].txq_tid = tid;
918 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
919 queue, tid);
920 }
921
922 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
923 {
924 struct ieee80211_sta *sta;
925 struct iwl_mvm_sta *mvmsta;
926 u8 sta_id;
927 int tid = -1;
928 unsigned long tid_bitmap;
929 unsigned int wdg_timeout;
930 int ssn;
931 int ret = true;
932
933
934 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
935 return;
936
937 lockdep_assert_held(&mvm->mutex);
938
939 sta_id = mvm->queue_info[queue].ra_sta_id;
940 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
941
942
943 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
944 if (tid_bitmap != BIT(tid)) {
945 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
946 queue, tid_bitmap);
947 return;
948 }
949
950 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
951 tid);
952
953 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
954 lockdep_is_held(&mvm->mutex));
955
956 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
957 return;
958
959 mvmsta = iwl_mvm_sta_from_mac80211(sta);
960 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
961
962 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
963
964 ret = iwl_mvm_redirect_queue(mvm, queue, tid,
965 tid_to_mac80211_ac[tid], ssn,
966 wdg_timeout, true,
967 iwl_mvm_txq_from_tid(sta, tid));
968 if (ret) {
969 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
970 return;
971 }
972
973
974 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
975 struct iwl_mvm_add_sta_cmd cmd = {0};
976
977 mvmsta->tid_disable_agg &= ~BIT(tid);
978
979 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
980 cmd.sta_id = mvmsta->sta_id;
981 cmd.add_modify = STA_MODE_MODIFY;
982 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
983 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
984 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
985
986 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
987 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
988 if (!ret) {
989 IWL_DEBUG_TX_QUEUES(mvm,
990 "TXQ #%d is now aggregated again\n",
991 queue);
992
993
994 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
995 }
996 }
997
998 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
999 }
1000
1001
1002
1003
1004
1005
1006
1007
1008 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1009 struct iwl_mvm_sta *mvmsta, int queue,
1010 unsigned long tid_bitmap,
1011 unsigned long *unshare_queues,
1012 unsigned long *changetid_queues)
1013 {
1014 int tid;
1015
1016 lockdep_assert_held(&mvmsta->lock);
1017 lockdep_assert_held(&mvm->mutex);
1018
1019 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1020 return false;
1021
1022
1023 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1024
1025 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1026 tid_bitmap &= ~BIT(tid);
1027
1028
1029 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1030 tid_bitmap &= ~BIT(tid);
1031 }
1032
1033
1034 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1035 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1036 return true;
1037 }
1038
1039
1040
1041
1042
1043 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1044 u16 tid_bitmap;
1045
1046 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1047 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1048
1049 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1063 set_bit(queue, changetid_queues);
1064
1065 IWL_DEBUG_TX_QUEUES(mvm,
1066 "Removing inactive TID %d from shared Q:%d\n",
1067 tid, queue);
1068 }
1069
1070 IWL_DEBUG_TX_QUEUES(mvm,
1071 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1072 mvm->queue_info[queue].tid_bitmap);
1073
1074
1075
1076
1077
1078 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1079
1080
1081 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1082 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1083 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1084 queue);
1085 set_bit(queue, unshare_queues);
1086 }
1087
1088 return false;
1089 }
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1101 {
1102 unsigned long now = jiffies;
1103 unsigned long unshare_queues = 0;
1104 unsigned long changetid_queues = 0;
1105 int i, ret, free_queue = -ENOSPC;
1106 struct ieee80211_sta *queue_owner = NULL;
1107
1108 lockdep_assert_held(&mvm->mutex);
1109
1110 if (iwl_mvm_has_new_tx_api(mvm))
1111 return -ENOSPC;
1112
1113 rcu_read_lock();
1114
1115
1116 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1117
1118 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1119 struct ieee80211_sta *sta;
1120 struct iwl_mvm_sta *mvmsta;
1121 u8 sta_id;
1122 int tid;
1123 unsigned long inactive_tid_bitmap = 0;
1124 unsigned long queue_tid_bitmap;
1125
1126 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1127 if (!queue_tid_bitmap)
1128 continue;
1129
1130
1131 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1132 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1133 continue;
1134
1135
1136 for_each_set_bit(tid, &queue_tid_bitmap,
1137 IWL_MAX_TID_COUNT + 1) {
1138 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1139 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1140 continue;
1141
1142 inactive_tid_bitmap |= BIT(tid);
1143 }
1144
1145
1146 if (!inactive_tid_bitmap)
1147 continue;
1148
1149
1150
1151
1152
1153
1154 sta_id = mvm->queue_info[i].ra_sta_id;
1155 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1156
1157
1158
1159
1160
1161
1162 if (IS_ERR_OR_NULL(sta))
1163 continue;
1164
1165 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1166
1167 spin_lock_bh(&mvmsta->lock);
1168 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1169 inactive_tid_bitmap,
1170 &unshare_queues,
1171 &changetid_queues);
1172 if (ret && free_queue < 0) {
1173 queue_owner = sta;
1174 free_queue = i;
1175 }
1176
1177 spin_unlock_bh(&mvmsta->lock);
1178 }
1179
1180
1181
1182 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1183 iwl_mvm_unshare_queue(mvm, i);
1184 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1185 iwl_mvm_change_queue_tid(mvm, i);
1186
1187 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1188 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1189 alloc_for_sta);
1190 if (ret) {
1191 rcu_read_unlock();
1192 return ret;
1193 }
1194 }
1195
1196 rcu_read_unlock();
1197
1198 return free_queue;
1199 }
1200
1201 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1202 struct ieee80211_sta *sta, u8 ac, int tid)
1203 {
1204 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1205 struct iwl_trans_txq_scd_cfg cfg = {
1206 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1207 .sta_id = mvmsta->sta_id,
1208 .tid = tid,
1209 .frame_limit = IWL_FRAME_LIMIT,
1210 };
1211 unsigned int wdg_timeout =
1212 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1213 int queue = -1;
1214 unsigned long disable_agg_tids = 0;
1215 enum iwl_mvm_agg_state queue_state;
1216 bool shared_queue = false, inc_ssn;
1217 int ssn;
1218 unsigned long tfd_queue_mask;
1219 int ret;
1220
1221 lockdep_assert_held(&mvm->mutex);
1222
1223 if (iwl_mvm_has_new_tx_api(mvm))
1224 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1225
1226 spin_lock_bh(&mvmsta->lock);
1227 tfd_queue_mask = mvmsta->tfd_queue_msk;
1228 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1229 spin_unlock_bh(&mvmsta->lock);
1230
1231 if (tid == IWL_MAX_TID_COUNT) {
1232 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1233 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1234 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1235 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1236 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1237 queue);
1238
1239
1240 }
1241
1242 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1243 (mvm->queue_info[mvmsta->reserved_queue].status ==
1244 IWL_MVM_QUEUE_RESERVED)) {
1245 queue = mvmsta->reserved_queue;
1246 mvm->queue_info[queue].reserved = true;
1247 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1248 }
1249
1250 if (queue < 0)
1251 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1252 IWL_MVM_DQA_MIN_DATA_QUEUE,
1253 IWL_MVM_DQA_MAX_DATA_QUEUE);
1254 if (queue < 0) {
1255
1256 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1257 }
1258
1259
1260 if (queue <= 0) {
1261 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1262 if (queue > 0) {
1263 shared_queue = true;
1264 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1265 }
1266 }
1267
1268
1269
1270
1271
1272
1273
1274 if (queue > 0 && !shared_queue)
1275 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1276
1277
1278 if (WARN_ON(queue <= 0)) {
1279 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1280 tid, cfg.sta_id);
1281 return queue;
1282 }
1283
1284
1285
1286
1287
1288
1289
1290 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1291 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1292
1293 IWL_DEBUG_TX_QUEUES(mvm,
1294 "Allocating %squeue #%d to sta %d on tid %d\n",
1295 shared_queue ? "shared " : "", queue,
1296 mvmsta->sta_id, tid);
1297
1298 if (shared_queue) {
1299
1300 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1301
1302 if (disable_agg_tids) {
1303 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1304 queue);
1305 iwl_mvm_invalidate_sta_queue(mvm, queue,
1306 disable_agg_tids, false);
1307 }
1308 }
1309
1310 inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1311
1312
1313
1314
1315
1316
1317
1318 if (shared_queue)
1319 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1320
1321 spin_lock_bh(&mvmsta->lock);
1322
1323
1324
1325
1326
1327 if (inc_ssn) {
1328 mvmsta->tid_data[tid].seq_number += 0x10;
1329 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1330 }
1331 mvmsta->tid_data[tid].txq_id = queue;
1332 mvmsta->tfd_queue_msk |= BIT(queue);
1333 queue_state = mvmsta->tid_data[tid].state;
1334
1335 if (mvmsta->reserved_queue == queue)
1336 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1337 spin_unlock_bh(&mvmsta->lock);
1338
1339 if (!shared_queue) {
1340 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1341 if (ret)
1342 goto out_err;
1343
1344
1345 if (queue_state == IWL_AGG_ON) {
1346 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1347 if (ret)
1348 goto out_err;
1349 }
1350 } else {
1351
1352 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1353 wdg_timeout, false,
1354 iwl_mvm_txq_from_tid(sta, tid));
1355 if (ret)
1356 goto out_err;
1357 }
1358
1359 return 0;
1360
1361 out_err:
1362 iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
1363
1364 return ret;
1365 }
1366
1367 static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1368 {
1369 if (tid == IWL_MAX_TID_COUNT)
1370 return IEEE80211_AC_VO;
1371
1372 return tid_to_mac80211_ac[tid];
1373 }
1374
1375 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1376 {
1377 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1378 add_stream_wk);
1379
1380 mutex_lock(&mvm->mutex);
1381
1382 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1383
1384 while (!list_empty(&mvm->add_stream_txqs)) {
1385 struct iwl_mvm_txq *mvmtxq;
1386 struct ieee80211_txq *txq;
1387 u8 tid;
1388
1389 mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1390 struct iwl_mvm_txq, list);
1391
1392 txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1393 drv_priv);
1394 tid = txq->tid;
1395 if (tid == IEEE80211_NUM_TIDS)
1396 tid = IWL_MAX_TID_COUNT;
1397
1398 iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
1399 list_del_init(&mvmtxq->list);
1400 local_bh_disable();
1401 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1402 local_bh_enable();
1403 }
1404
1405 mutex_unlock(&mvm->mutex);
1406 }
1407
1408 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1409 struct ieee80211_sta *sta,
1410 enum nl80211_iftype vif_type)
1411 {
1412 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1413 int queue;
1414
1415
1416 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1417 return 0;
1418
1419
1420 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1421
1422
1423 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1424 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1425 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1426 IWL_MVM_QUEUE_FREE))
1427 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1428 else
1429 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1430 IWL_MVM_DQA_MIN_DATA_QUEUE,
1431 IWL_MVM_DQA_MAX_DATA_QUEUE);
1432 if (queue < 0) {
1433
1434 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1435 if (queue < 0) {
1436 IWL_ERR(mvm, "No available queues for new station\n");
1437 return -ENOSPC;
1438 }
1439 }
1440 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1441
1442 mvmsta->reserved_queue = queue;
1443
1444 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1445 queue, mvmsta->sta_id);
1446
1447 return 0;
1448 }
1449
1450
1451
1452
1453
1454
1455
1456
1457 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1458 struct ieee80211_sta *sta)
1459 {
1460 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1461 unsigned int wdg =
1462 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1463 int i;
1464 struct iwl_trans_txq_scd_cfg cfg = {
1465 .sta_id = mvm_sta->sta_id,
1466 .frame_limit = IWL_FRAME_LIMIT,
1467 };
1468
1469
1470 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1471 mvm->queue_info[mvm_sta->reserved_queue].status =
1472 IWL_MVM_QUEUE_RESERVED;
1473
1474 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1475 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1476 int txq_id = tid_data->txq_id;
1477 int ac;
1478
1479 if (txq_id == IWL_MVM_INVALID_QUEUE)
1480 continue;
1481
1482 ac = tid_to_mac80211_ac[i];
1483
1484 if (iwl_mvm_has_new_tx_api(mvm)) {
1485 IWL_DEBUG_TX_QUEUES(mvm,
1486 "Re-mapping sta %d tid %d\n",
1487 mvm_sta->sta_id, i);
1488 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1489 i, wdg);
1490
1491
1492
1493
1494
1495 if (txq_id < 0)
1496 txq_id = IWL_MVM_INVALID_QUEUE;
1497 tid_data->txq_id = txq_id;
1498
1499
1500
1501
1502
1503
1504
1505 tid_data->seq_number = 0;
1506 } else {
1507 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1508
1509 cfg.tid = i;
1510 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1511 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1512 txq_id ==
1513 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1514
1515 IWL_DEBUG_TX_QUEUES(mvm,
1516 "Re-mapping sta %d tid %d to queue %d\n",
1517 mvm_sta->sta_id, i, txq_id);
1518
1519 iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1520 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1521 }
1522 }
1523 }
1524
1525 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1526 struct iwl_mvm_int_sta *sta,
1527 const u8 *addr,
1528 u16 mac_id, u16 color)
1529 {
1530 struct iwl_mvm_add_sta_cmd cmd;
1531 int ret;
1532 u32 status = ADD_STA_SUCCESS;
1533
1534 lockdep_assert_held(&mvm->mutex);
1535
1536 memset(&cmd, 0, sizeof(cmd));
1537 cmd.sta_id = sta->sta_id;
1538 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1539 color));
1540 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1541 cmd.station_type = sta->type;
1542
1543 if (!iwl_mvm_has_new_tx_api(mvm))
1544 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1545 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1546
1547 if (addr)
1548 memcpy(cmd.addr, addr, ETH_ALEN);
1549
1550 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1551 iwl_mvm_add_sta_cmd_size(mvm),
1552 &cmd, &status);
1553 if (ret)
1554 return ret;
1555
1556 switch (status & IWL_ADD_STA_STATUS_MASK) {
1557 case ADD_STA_SUCCESS:
1558 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1559 return 0;
1560 default:
1561 ret = -EIO;
1562 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1563 status);
1564 break;
1565 }
1566 return ret;
1567 }
1568
1569 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1570 struct ieee80211_vif *vif,
1571 struct ieee80211_sta *sta)
1572 {
1573 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1574 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1575 struct iwl_mvm_rxq_dup_data *dup_data;
1576 int i, ret, sta_id;
1577 bool sta_update = false;
1578 unsigned int sta_flags = 0;
1579
1580 lockdep_assert_held(&mvm->mutex);
1581
1582 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1583 sta_id = iwl_mvm_find_free_sta_id(mvm,
1584 ieee80211_vif_type_p2p(vif));
1585 else
1586 sta_id = mvm_sta->sta_id;
1587
1588 if (sta_id == IWL_MVM_INVALID_STA)
1589 return -ENOSPC;
1590
1591 spin_lock_init(&mvm_sta->lock);
1592
1593
1594 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1595 struct iwl_mvm_int_sta tmp_sta = {
1596 .sta_id = sta_id,
1597 .type = mvm_sta->sta_type,
1598 };
1599
1600
1601
1602
1603
1604 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1605 mvmvif->id, mvmvif->color);
1606 if (ret)
1607 goto err;
1608
1609 iwl_mvm_realloc_queues_after_restart(mvm, sta);
1610 sta_update = true;
1611 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1612 goto update_fw;
1613 }
1614
1615 mvm_sta->sta_id = sta_id;
1616 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1617 mvmvif->color);
1618 mvm_sta->vif = vif;
1619 if (!mvm->trans->trans_cfg->gen2)
1620 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1621 else
1622 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1623 mvm_sta->tx_protection = 0;
1624 mvm_sta->tt_tx_protection = false;
1625 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1626
1627
1628 mvm_sta->tid_disable_agg = 0xffff;
1629 mvm_sta->tfd_queue_msk = 0;
1630
1631
1632 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1633 u16 seq = mvm_sta->tid_data[i].seq_number;
1634 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1635 mvm_sta->tid_data[i].seq_number = seq;
1636
1637
1638
1639
1640
1641 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1642 }
1643
1644 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1645 struct iwl_mvm_txq *mvmtxq =
1646 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1647
1648 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1649 INIT_LIST_HEAD(&mvmtxq->list);
1650 atomic_set(&mvmtxq->tx_request, 0);
1651 }
1652
1653 mvm_sta->agg_tids = 0;
1654
1655 if (iwl_mvm_has_new_rx_api(mvm) &&
1656 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1657 int q;
1658
1659 dup_data = kcalloc(mvm->trans->num_rx_queues,
1660 sizeof(*dup_data), GFP_KERNEL);
1661 if (!dup_data)
1662 return -ENOMEM;
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1673 memset(dup_data[q].last_seq, 0xff,
1674 sizeof(dup_data[q].last_seq));
1675 mvm_sta->dup_data = dup_data;
1676 }
1677
1678 if (!iwl_mvm_has_new_tx_api(mvm)) {
1679 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1680 ieee80211_vif_type_p2p(vif));
1681 if (ret)
1682 goto err;
1683 }
1684
1685
1686
1687
1688
1689 if (iwl_mvm_has_tlc_offload(mvm))
1690 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1691 else
1692 spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1693
1694 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1695
1696 update_fw:
1697 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1698 if (ret)
1699 goto err;
1700
1701 if (vif->type == NL80211_IFTYPE_STATION) {
1702 if (!sta->tdls) {
1703 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1704 mvmvif->ap_sta_id = sta_id;
1705 } else {
1706 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1707 }
1708 }
1709
1710 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1711
1712 return 0;
1713
1714 err:
1715 return ret;
1716 }
1717
1718 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1719 bool drain)
1720 {
1721 struct iwl_mvm_add_sta_cmd cmd = {};
1722 int ret;
1723 u32 status;
1724
1725 lockdep_assert_held(&mvm->mutex);
1726
1727 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1728 cmd.sta_id = mvmsta->sta_id;
1729 cmd.add_modify = STA_MODE_MODIFY;
1730 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1731 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1732
1733 status = ADD_STA_SUCCESS;
1734 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1735 iwl_mvm_add_sta_cmd_size(mvm),
1736 &cmd, &status);
1737 if (ret)
1738 return ret;
1739
1740 switch (status & IWL_ADD_STA_STATUS_MASK) {
1741 case ADD_STA_SUCCESS:
1742 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1743 mvmsta->sta_id);
1744 break;
1745 default:
1746 ret = -EIO;
1747 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1748 mvmsta->sta_id);
1749 break;
1750 }
1751
1752 return ret;
1753 }
1754
1755
1756
1757
1758
1759
1760 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1761 {
1762 struct ieee80211_sta *sta;
1763 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1764 .sta_id = sta_id,
1765 };
1766 int ret;
1767
1768 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1769 lockdep_is_held(&mvm->mutex));
1770
1771
1772 if (!sta) {
1773 IWL_ERR(mvm, "Invalid station id\n");
1774 return -EINVAL;
1775 }
1776
1777 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1778 sizeof(rm_sta_cmd), &rm_sta_cmd);
1779 if (ret) {
1780 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1781 return ret;
1782 }
1783
1784 return 0;
1785 }
1786
1787 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1788 struct ieee80211_vif *vif,
1789 struct ieee80211_sta *sta)
1790 {
1791 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1792 int i;
1793
1794 lockdep_assert_held(&mvm->mutex);
1795
1796 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1797 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1798 continue;
1799
1800 iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
1801 0);
1802 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1803 }
1804
1805 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1806 struct iwl_mvm_txq *mvmtxq =
1807 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1808
1809 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1810 }
1811 }
1812
1813 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1814 struct iwl_mvm_sta *mvm_sta)
1815 {
1816 int i;
1817
1818 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1819 u16 txq_id;
1820 int ret;
1821
1822 spin_lock_bh(&mvm_sta->lock);
1823 txq_id = mvm_sta->tid_data[i].txq_id;
1824 spin_unlock_bh(&mvm_sta->lock);
1825
1826 if (txq_id == IWL_MVM_INVALID_QUEUE)
1827 continue;
1828
1829 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1830 if (ret)
1831 return ret;
1832 }
1833
1834 return 0;
1835 }
1836
1837 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1838 struct ieee80211_vif *vif,
1839 struct ieee80211_sta *sta)
1840 {
1841 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1842 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1843 u8 sta_id = mvm_sta->sta_id;
1844 int ret;
1845
1846 lockdep_assert_held(&mvm->mutex);
1847
1848 if (iwl_mvm_has_new_rx_api(mvm))
1849 kfree(mvm_sta->dup_data);
1850
1851 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1852 if (ret)
1853 return ret;
1854
1855
1856 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1857 if (ret)
1858 return ret;
1859 if (iwl_mvm_has_new_tx_api(mvm)) {
1860 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1861 } else {
1862 u32 q_mask = mvm_sta->tfd_queue_msk;
1863
1864 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1865 q_mask);
1866 }
1867 if (ret)
1868 return ret;
1869
1870 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1871
1872 iwl_mvm_disable_sta_queues(mvm, vif, sta);
1873
1874
1875 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1876 u8 reserved_txq = mvm_sta->reserved_queue;
1877 enum iwl_mvm_queue_status *status;
1878
1879
1880
1881
1882
1883
1884 status = &mvm->queue_info[reserved_txq].status;
1885 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1886 (*status != IWL_MVM_QUEUE_FREE),
1887 "sta_id %d reserved txq %d status %d",
1888 sta_id, reserved_txq, *status))
1889 return -EINVAL;
1890
1891 *status = IWL_MVM_QUEUE_FREE;
1892 }
1893
1894 if (vif->type == NL80211_IFTYPE_STATION &&
1895 mvmvif->ap_sta_id == sta_id) {
1896
1897 if (vif->bss_conf.assoc)
1898 return ret;
1899
1900
1901 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1902 }
1903
1904
1905
1906
1907
1908 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1909 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1910 cancel_delayed_work(&mvm->tdls_cs.dwork);
1911 }
1912
1913
1914
1915
1916
1917 spin_lock_bh(&mvm_sta->lock);
1918 spin_unlock_bh(&mvm_sta->lock);
1919
1920 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1921 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1922
1923 return ret;
1924 }
1925
1926 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1927 struct ieee80211_vif *vif,
1928 u8 sta_id)
1929 {
1930 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1931
1932 lockdep_assert_held(&mvm->mutex);
1933
1934 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1935 return ret;
1936 }
1937
1938 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1939 struct iwl_mvm_int_sta *sta,
1940 u32 qmask, enum nl80211_iftype iftype,
1941 enum iwl_sta_type type)
1942 {
1943 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1944 sta->sta_id == IWL_MVM_INVALID_STA) {
1945 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1946 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
1947 return -ENOSPC;
1948 }
1949
1950 sta->tfd_queue_msk = qmask;
1951 sta->type = type;
1952
1953
1954 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1955 return 0;
1956 }
1957
1958 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1959 {
1960 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1961 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1962 sta->sta_id = IWL_MVM_INVALID_STA;
1963 }
1964
1965 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
1966 u8 sta_id, u8 fifo)
1967 {
1968 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1969 mvm->trans->trans_cfg->base_params->wd_timeout :
1970 IWL_WATCHDOG_DISABLED;
1971 struct iwl_trans_txq_scd_cfg cfg = {
1972 .fifo = fifo,
1973 .sta_id = sta_id,
1974 .tid = IWL_MAX_TID_COUNT,
1975 .aggregate = false,
1976 .frame_limit = IWL_FRAME_LIMIT,
1977 };
1978
1979 WARN_ON(iwl_mvm_has_new_tx_api(mvm));
1980
1981 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
1982 }
1983
1984 static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
1985 {
1986 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1987 mvm->trans->trans_cfg->base_params->wd_timeout :
1988 IWL_WATCHDOG_DISABLED;
1989
1990 WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
1991
1992 return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
1993 wdg_timeout);
1994 }
1995
1996 static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
1997 int maccolor,
1998 struct iwl_mvm_int_sta *sta,
1999 u16 *queue, int fifo)
2000 {
2001 int ret;
2002
2003
2004 if (!iwl_mvm_has_new_tx_api(mvm))
2005 iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
2006
2007 ret = iwl_mvm_add_int_sta_common(mvm, sta, NULL, macidx, maccolor);
2008 if (ret) {
2009 if (!iwl_mvm_has_new_tx_api(mvm))
2010 iwl_mvm_disable_txq(mvm, NULL, *queue,
2011 IWL_MAX_TID_COUNT, 0);
2012 return ret;
2013 }
2014
2015
2016
2017
2018
2019 if (iwl_mvm_has_new_tx_api(mvm)) {
2020 int txq;
2021
2022 txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2023 if (txq < 0) {
2024 iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2025 return txq;
2026 }
2027
2028 *queue = txq;
2029 }
2030
2031 return 0;
2032 }
2033
2034 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
2035 {
2036 int ret;
2037
2038 lockdep_assert_held(&mvm->mutex);
2039
2040
2041 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2042 NL80211_IFTYPE_UNSPECIFIED,
2043 IWL_STA_AUX_ACTIVITY);
2044 if (ret)
2045 return ret;
2046
2047 ret = iwl_mvm_add_int_sta_with_queue(mvm, MAC_INDEX_AUX, 0,
2048 &mvm->aux_sta, &mvm->aux_queue,
2049 IWL_MVM_TX_FIFO_MCAST);
2050 if (ret) {
2051 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2052 return ret;
2053 }
2054
2055 return 0;
2056 }
2057
2058 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2059 {
2060 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2061
2062 lockdep_assert_held(&mvm->mutex);
2063
2064 return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2065 &mvm->snif_sta, &mvm->snif_queue,
2066 IWL_MVM_TX_FIFO_BE);
2067 }
2068
2069 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2070 {
2071 int ret;
2072
2073 lockdep_assert_held(&mvm->mutex);
2074
2075 iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
2076 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2077 if (ret)
2078 IWL_WARN(mvm, "Failed sending remove station\n");
2079
2080 return ret;
2081 }
2082
2083 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2084 {
2085 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2086 }
2087
2088 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
2089 {
2090 lockdep_assert_held(&mvm->mutex);
2091
2092 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2093 }
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2104 {
2105 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2106 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2107 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2108 const u8 *baddr = _baddr;
2109 int queue;
2110 int ret;
2111 unsigned int wdg_timeout =
2112 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2113 struct iwl_trans_txq_scd_cfg cfg = {
2114 .fifo = IWL_MVM_TX_FIFO_VO,
2115 .sta_id = mvmvif->bcast_sta.sta_id,
2116 .tid = IWL_MAX_TID_COUNT,
2117 .aggregate = false,
2118 .frame_limit = IWL_FRAME_LIMIT,
2119 };
2120
2121 lockdep_assert_held(&mvm->mutex);
2122
2123 if (!iwl_mvm_has_new_tx_api(mvm)) {
2124 if (vif->type == NL80211_IFTYPE_AP ||
2125 vif->type == NL80211_IFTYPE_ADHOC) {
2126 queue = mvm->probe_queue;
2127 } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2128 queue = mvm->p2p_dev_queue;
2129 } else {
2130 WARN(1, "Missing required TXQ for adding bcast STA\n");
2131 return -EINVAL;
2132 }
2133
2134 bsta->tfd_queue_msk |= BIT(queue);
2135
2136 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2137 }
2138
2139 if (vif->type == NL80211_IFTYPE_ADHOC)
2140 baddr = vif->bss_conf.bssid;
2141
2142 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2143 return -ENOSPC;
2144
2145 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2146 mvmvif->id, mvmvif->color);
2147 if (ret)
2148 return ret;
2149
2150
2151
2152
2153
2154 if (iwl_mvm_has_new_tx_api(mvm)) {
2155 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2156 IWL_MAX_TID_COUNT,
2157 wdg_timeout);
2158 if (queue < 0) {
2159 iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2160 return queue;
2161 }
2162
2163 if (vif->type == NL80211_IFTYPE_AP ||
2164 vif->type == NL80211_IFTYPE_ADHOC)
2165 mvm->probe_queue = queue;
2166 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2167 mvm->p2p_dev_queue = queue;
2168 }
2169
2170 return 0;
2171 }
2172
2173 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2174 struct ieee80211_vif *vif)
2175 {
2176 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2177 int queue;
2178
2179 lockdep_assert_held(&mvm->mutex);
2180
2181 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2182
2183 switch (vif->type) {
2184 case NL80211_IFTYPE_AP:
2185 case NL80211_IFTYPE_ADHOC:
2186 queue = mvm->probe_queue;
2187 break;
2188 case NL80211_IFTYPE_P2P_DEVICE:
2189 queue = mvm->p2p_dev_queue;
2190 break;
2191 default:
2192 WARN(1, "Can't free bcast queue on vif type %d\n",
2193 vif->type);
2194 return;
2195 }
2196
2197 iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
2198 if (iwl_mvm_has_new_tx_api(mvm))
2199 return;
2200
2201 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2202 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2203 }
2204
2205
2206
2207 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2208 {
2209 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2210 int ret;
2211
2212 lockdep_assert_held(&mvm->mutex);
2213
2214 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2215
2216 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2217 if (ret)
2218 IWL_WARN(mvm, "Failed sending remove station\n");
2219 return ret;
2220 }
2221
2222 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2223 {
2224 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2225
2226 lockdep_assert_held(&mvm->mutex);
2227
2228 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2229 ieee80211_vif_type_p2p(vif),
2230 IWL_STA_GENERAL_PURPOSE);
2231 }
2232
2233
2234
2235
2236
2237
2238
2239
2240 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2241 {
2242 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2243 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2244 int ret;
2245
2246 lockdep_assert_held(&mvm->mutex);
2247
2248 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2249 if (ret)
2250 return ret;
2251
2252 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2253
2254 if (ret)
2255 iwl_mvm_dealloc_int_sta(mvm, bsta);
2256
2257 return ret;
2258 }
2259
2260 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2261 {
2262 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2263
2264 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2265 }
2266
2267
2268
2269
2270
2271 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2272 {
2273 int ret;
2274
2275 lockdep_assert_held(&mvm->mutex);
2276
2277 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2278
2279 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2280
2281 return ret;
2282 }
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2293 {
2294 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2295 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2296 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2297 const u8 *maddr = _maddr;
2298 struct iwl_trans_txq_scd_cfg cfg = {
2299 .fifo = vif->type == NL80211_IFTYPE_AP ?
2300 IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2301 .sta_id = msta->sta_id,
2302 .tid = 0,
2303 .aggregate = false,
2304 .frame_limit = IWL_FRAME_LIMIT,
2305 };
2306 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2307 int ret;
2308
2309 lockdep_assert_held(&mvm->mutex);
2310
2311 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2312 vif->type != NL80211_IFTYPE_ADHOC))
2313 return -ENOTSUPP;
2314
2315
2316
2317
2318
2319
2320
2321 if (vif->type == NL80211_IFTYPE_ADHOC)
2322 mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2323
2324
2325
2326
2327
2328 if (!iwl_mvm_has_new_tx_api(mvm) &&
2329 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2330 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2331 timeout);
2332 msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2333 }
2334 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2335 mvmvif->id, mvmvif->color);
2336 if (ret)
2337 goto err;
2338
2339
2340
2341
2342
2343
2344
2345
2346 if (iwl_mvm_has_new_tx_api(mvm)) {
2347 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2348 0,
2349 timeout);
2350 if (queue < 0) {
2351 ret = queue;
2352 goto err;
2353 }
2354 mvmvif->cab_queue = queue;
2355 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2356 IWL_UCODE_TLV_API_STA_TYPE))
2357 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2358 timeout);
2359
2360 return 0;
2361 err:
2362 iwl_mvm_dealloc_int_sta(mvm, msta);
2363 return ret;
2364 }
2365
2366 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2367 struct ieee80211_key_conf *keyconf,
2368 bool mcast)
2369 {
2370 union {
2371 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2372 struct iwl_mvm_add_sta_key_cmd cmd;
2373 } u = {};
2374 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2375 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2376 __le16 key_flags;
2377 int ret, size;
2378 u32 status;
2379
2380
2381 if (sta_id == IWL_MVM_INVALID_STA)
2382 return 0;
2383
2384 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2385 STA_KEY_FLG_KEYID_MSK);
2386 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2387 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2388
2389 if (mcast)
2390 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2391
2392
2393
2394
2395
2396 u.cmd.common.key_flags = key_flags;
2397 u.cmd.common.key_offset = keyconf->hw_key_idx;
2398 u.cmd.common.sta_id = sta_id;
2399
2400 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2401
2402 status = ADD_STA_SUCCESS;
2403 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2404 &status);
2405
2406 switch (status) {
2407 case ADD_STA_SUCCESS:
2408 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2409 break;
2410 default:
2411 ret = -EIO;
2412 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2413 break;
2414 }
2415
2416 return ret;
2417 }
2418
2419
2420
2421
2422
2423 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2424 {
2425 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2426 int ret;
2427
2428 lockdep_assert_held(&mvm->mutex);
2429
2430 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2431
2432 iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
2433
2434 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2435 if (ret)
2436 IWL_WARN(mvm, "Failed sending remove station\n");
2437
2438 return ret;
2439 }
2440
2441 #define IWL_MAX_RX_BA_SESSIONS 16
2442
2443 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2444 {
2445 struct iwl_mvm_rss_sync_notif notif = {
2446 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2447 .metadata.sync = 1,
2448 .delba.baid = baid,
2449 };
2450 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
2451 };
2452
2453 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2454 struct iwl_mvm_baid_data *data)
2455 {
2456 int i;
2457
2458 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2459
2460 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2461 int j;
2462 struct iwl_mvm_reorder_buffer *reorder_buf =
2463 &data->reorder_buf[i];
2464 struct iwl_mvm_reorder_buf_entry *entries =
2465 &data->entries[i * data->entries_per_queue];
2466
2467 spin_lock_bh(&reorder_buf->lock);
2468 if (likely(!reorder_buf->num_stored)) {
2469 spin_unlock_bh(&reorder_buf->lock);
2470 continue;
2471 }
2472
2473
2474
2475
2476
2477
2478 WARN_ON(1);
2479
2480 for (j = 0; j < reorder_buf->buf_size; j++)
2481 __skb_queue_purge(&entries[j].e.frames);
2482
2483
2484
2485
2486
2487
2488
2489
2490 reorder_buf->removed = true;
2491 spin_unlock_bh(&reorder_buf->lock);
2492 del_timer_sync(&reorder_buf->reorder_timer);
2493 }
2494 }
2495
2496 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2497 struct iwl_mvm_baid_data *data,
2498 u16 ssn, u16 buf_size)
2499 {
2500 int i;
2501
2502 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2503 struct iwl_mvm_reorder_buffer *reorder_buf =
2504 &data->reorder_buf[i];
2505 struct iwl_mvm_reorder_buf_entry *entries =
2506 &data->entries[i * data->entries_per_queue];
2507 int j;
2508
2509 reorder_buf->num_stored = 0;
2510 reorder_buf->head_sn = ssn;
2511 reorder_buf->buf_size = buf_size;
2512
2513 timer_setup(&reorder_buf->reorder_timer,
2514 iwl_mvm_reorder_timer_expired, 0);
2515 spin_lock_init(&reorder_buf->lock);
2516 reorder_buf->mvm = mvm;
2517 reorder_buf->queue = i;
2518 reorder_buf->valid = false;
2519 for (j = 0; j < reorder_buf->buf_size; j++)
2520 __skb_queue_head_init(&entries[j].e.frames);
2521 }
2522 }
2523
2524 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2525 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2526 {
2527 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2528 struct iwl_mvm_add_sta_cmd cmd = {};
2529 struct iwl_mvm_baid_data *baid_data = NULL;
2530 int ret;
2531 u32 status;
2532
2533 lockdep_assert_held(&mvm->mutex);
2534
2535 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2536 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2537 return -ENOSPC;
2538 }
2539
2540 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2541 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2542
2543
2544 #ifndef __CHECKER__
2545
2546
2547
2548
2549
2550
2551 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2552 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2553 #endif
2554
2555
2556
2557
2558
2559
2560 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2561
2562
2563
2564
2565
2566 baid_data = kzalloc(sizeof(*baid_data) +
2567 mvm->trans->num_rx_queues *
2568 reorder_buf_size,
2569 GFP_KERNEL);
2570 if (!baid_data)
2571 return -ENOMEM;
2572
2573
2574
2575
2576
2577 baid_data->entries_per_queue =
2578 reorder_buf_size / sizeof(baid_data->entries[0]);
2579 }
2580
2581 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2582 cmd.sta_id = mvm_sta->sta_id;
2583 cmd.add_modify = STA_MODE_MODIFY;
2584 if (start) {
2585 cmd.add_immediate_ba_tid = (u8) tid;
2586 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2587 cmd.rx_ba_window = cpu_to_le16(buf_size);
2588 } else {
2589 cmd.remove_immediate_ba_tid = (u8) tid;
2590 }
2591 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2592 STA_MODIFY_REMOVE_BA_TID;
2593
2594 status = ADD_STA_SUCCESS;
2595 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2596 iwl_mvm_add_sta_cmd_size(mvm),
2597 &cmd, &status);
2598 if (ret)
2599 goto out_free;
2600
2601 switch (status & IWL_ADD_STA_STATUS_MASK) {
2602 case ADD_STA_SUCCESS:
2603 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2604 start ? "start" : "stopp");
2605 break;
2606 case ADD_STA_IMMEDIATE_BA_FAILURE:
2607 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2608 ret = -ENOSPC;
2609 break;
2610 default:
2611 ret = -EIO;
2612 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2613 start ? "start" : "stopp", status);
2614 break;
2615 }
2616
2617 if (ret)
2618 goto out_free;
2619
2620 if (start) {
2621 u8 baid;
2622
2623 mvm->rx_ba_sessions++;
2624
2625 if (!iwl_mvm_has_new_rx_api(mvm))
2626 return 0;
2627
2628 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2629 ret = -EINVAL;
2630 goto out_free;
2631 }
2632 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2633 IWL_ADD_STA_BAID_SHIFT);
2634 baid_data->baid = baid;
2635 baid_data->timeout = timeout;
2636 baid_data->last_rx = jiffies;
2637 baid_data->rcu_ptr = &mvm->baid_map[baid];
2638 timer_setup(&baid_data->session_timer,
2639 iwl_mvm_rx_agg_session_expired, 0);
2640 baid_data->mvm = mvm;
2641 baid_data->tid = tid;
2642 baid_data->sta_id = mvm_sta->sta_id;
2643
2644 mvm_sta->tid_to_baid[tid] = baid;
2645 if (timeout)
2646 mod_timer(&baid_data->session_timer,
2647 TU_TO_EXP_TIME(timeout * 2));
2648
2649 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2650
2651
2652
2653
2654
2655
2656 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2657 mvm_sta->sta_id, tid, baid);
2658 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2659 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2660 } else {
2661 u8 baid = mvm_sta->tid_to_baid[tid];
2662
2663 if (mvm->rx_ba_sessions > 0)
2664
2665 mvm->rx_ba_sessions--;
2666 if (!iwl_mvm_has_new_rx_api(mvm))
2667 return 0;
2668
2669 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2670 return -EINVAL;
2671
2672 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2673 if (WARN_ON(!baid_data))
2674 return -EINVAL;
2675
2676
2677 iwl_mvm_free_reorder(mvm, baid_data);
2678 del_timer_sync(&baid_data->session_timer);
2679 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2680 kfree_rcu(baid_data, rcu_head);
2681 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2682 }
2683 return 0;
2684
2685 out_free:
2686 kfree(baid_data);
2687 return ret;
2688 }
2689
2690 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2691 int tid, u8 queue, bool start)
2692 {
2693 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2694 struct iwl_mvm_add_sta_cmd cmd = {};
2695 int ret;
2696 u32 status;
2697
2698 lockdep_assert_held(&mvm->mutex);
2699
2700 if (start) {
2701 mvm_sta->tfd_queue_msk |= BIT(queue);
2702 mvm_sta->tid_disable_agg &= ~BIT(tid);
2703 } else {
2704
2705 mvm_sta->tid_disable_agg |= BIT(tid);
2706 }
2707
2708 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2709 cmd.sta_id = mvm_sta->sta_id;
2710 cmd.add_modify = STA_MODE_MODIFY;
2711 if (!iwl_mvm_has_new_tx_api(mvm))
2712 cmd.modify_mask = STA_MODIFY_QUEUES;
2713 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2714 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2715 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2716
2717 status = ADD_STA_SUCCESS;
2718 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2719 iwl_mvm_add_sta_cmd_size(mvm),
2720 &cmd, &status);
2721 if (ret)
2722 return ret;
2723
2724 switch (status & IWL_ADD_STA_STATUS_MASK) {
2725 case ADD_STA_SUCCESS:
2726 break;
2727 default:
2728 ret = -EIO;
2729 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2730 start ? "start" : "stopp", status);
2731 break;
2732 }
2733
2734 return ret;
2735 }
2736
2737 const u8 tid_to_mac80211_ac[] = {
2738 IEEE80211_AC_BE,
2739 IEEE80211_AC_BK,
2740 IEEE80211_AC_BK,
2741 IEEE80211_AC_BE,
2742 IEEE80211_AC_VI,
2743 IEEE80211_AC_VI,
2744 IEEE80211_AC_VO,
2745 IEEE80211_AC_VO,
2746 IEEE80211_AC_VO,
2747 };
2748
2749 static const u8 tid_to_ucode_ac[] = {
2750 AC_BE,
2751 AC_BK,
2752 AC_BK,
2753 AC_BE,
2754 AC_VI,
2755 AC_VI,
2756 AC_VO,
2757 AC_VO,
2758 };
2759
2760 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2761 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2762 {
2763 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2764 struct iwl_mvm_tid_data *tid_data;
2765 u16 normalized_ssn;
2766 u16 txq_id;
2767 int ret;
2768
2769 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2770 return -EINVAL;
2771
2772 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2773 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2774 IWL_ERR(mvm,
2775 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2776 mvmsta->tid_data[tid].state);
2777 return -ENXIO;
2778 }
2779
2780 lockdep_assert_held(&mvm->mutex);
2781
2782 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2783 iwl_mvm_has_new_tx_api(mvm)) {
2784 u8 ac = tid_to_mac80211_ac[tid];
2785
2786 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2787 if (ret)
2788 return ret;
2789 }
2790
2791 spin_lock_bh(&mvmsta->lock);
2792
2793
2794
2795
2796
2797
2798
2799 txq_id = mvmsta->tid_data[tid].txq_id;
2800 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2801 ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2802 IWL_MVM_DQA_MIN_DATA_QUEUE,
2803 IWL_MVM_DQA_MAX_DATA_QUEUE);
2804 if (ret < 0) {
2805 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2806 goto out;
2807 }
2808
2809 txq_id = ret;
2810
2811
2812 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2813 } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2814 ret = -ENXIO;
2815 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2816 tid, IWL_MAX_HW_QUEUES - 1);
2817 goto out;
2818
2819 } else if (unlikely(mvm->queue_info[txq_id].status ==
2820 IWL_MVM_QUEUE_SHARED)) {
2821 ret = -ENXIO;
2822 IWL_DEBUG_TX_QUEUES(mvm,
2823 "Can't start tid %d agg on shared queue!\n",
2824 tid);
2825 goto out;
2826 }
2827
2828 IWL_DEBUG_TX_QUEUES(mvm,
2829 "AGG for tid %d will be on queue #%d\n",
2830 tid, txq_id);
2831
2832 tid_data = &mvmsta->tid_data[tid];
2833 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2834 tid_data->txq_id = txq_id;
2835 *ssn = tid_data->ssn;
2836
2837 IWL_DEBUG_TX_QUEUES(mvm,
2838 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2839 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2840 tid_data->next_reclaimed);
2841
2842
2843
2844
2845
2846 normalized_ssn = tid_data->ssn;
2847 if (mvm->trans->trans_cfg->gen2)
2848 normalized_ssn &= 0xff;
2849
2850 if (normalized_ssn == tid_data->next_reclaimed) {
2851 tid_data->state = IWL_AGG_STARTING;
2852 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2853 } else {
2854 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2855 }
2856
2857 ret = 0;
2858
2859 out:
2860 spin_unlock_bh(&mvmsta->lock);
2861
2862 return ret;
2863 }
2864
2865 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2866 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
2867 bool amsdu)
2868 {
2869 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2870 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2871 unsigned int wdg_timeout =
2872 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2873 int queue, ret;
2874 bool alloc_queue = true;
2875 enum iwl_mvm_queue_status queue_status;
2876 u16 ssn;
2877
2878 struct iwl_trans_txq_scd_cfg cfg = {
2879 .sta_id = mvmsta->sta_id,
2880 .tid = tid,
2881 .frame_limit = buf_size,
2882 .aggregate = true,
2883 };
2884
2885
2886
2887
2888
2889 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
2890 return -EINVAL;
2891
2892 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2893 != IWL_MAX_TID_COUNT);
2894
2895 spin_lock_bh(&mvmsta->lock);
2896 ssn = tid_data->ssn;
2897 queue = tid_data->txq_id;
2898 tid_data->state = IWL_AGG_ON;
2899 mvmsta->agg_tids |= BIT(tid);
2900 tid_data->ssn = 0xffff;
2901 tid_data->amsdu_in_ampdu_allowed = amsdu;
2902 spin_unlock_bh(&mvmsta->lock);
2903
2904 if (iwl_mvm_has_new_tx_api(mvm)) {
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916 if (buf_size < IWL_FRAME_LIMIT)
2917 return -ENOTSUPP;
2918
2919 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2920 if (ret)
2921 return -EIO;
2922 goto out;
2923 }
2924
2925 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2926
2927 queue_status = mvm->queue_info[queue].status;
2928
2929
2930 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2931 alloc_queue = false;
2932
2933
2934
2935
2936
2937 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
2938
2939
2940
2941
2942 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2943 BIT(queue));
2944 if (ret) {
2945 IWL_ERR(mvm,
2946 "Error draining queue before reconfig\n");
2947 return ret;
2948 }
2949
2950 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2951 mvmsta->sta_id, tid,
2952 buf_size, ssn);
2953 if (ret) {
2954 IWL_ERR(mvm,
2955 "Error reconfiguring TXQ #%d\n", queue);
2956 return ret;
2957 }
2958 }
2959
2960 if (alloc_queue)
2961 iwl_mvm_enable_txq(mvm, sta, queue, ssn,
2962 &cfg, wdg_timeout);
2963
2964
2965 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2966 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2967 if (ret)
2968 return -EIO;
2969 }
2970
2971
2972 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2973
2974 out:
2975
2976
2977
2978
2979
2980
2981
2982 mvmsta->max_agg_bufsize =
2983 min(mvmsta->max_agg_bufsize, buf_size);
2984 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2985
2986 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2987 sta->addr, tid);
2988
2989 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
2990 }
2991
2992 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
2993 struct iwl_mvm_sta *mvmsta,
2994 struct iwl_mvm_tid_data *tid_data)
2995 {
2996 u16 txq_id = tid_data->txq_id;
2997
2998 lockdep_assert_held(&mvm->mutex);
2999
3000 if (iwl_mvm_has_new_tx_api(mvm))
3001 return;
3002
3003
3004
3005
3006
3007
3008
3009
3010 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3011 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3012 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3013 }
3014 }
3015
3016 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3017 struct ieee80211_sta *sta, u16 tid)
3018 {
3019 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3020 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3021 u16 txq_id;
3022 int err;
3023
3024
3025
3026
3027
3028 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3029 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3030 return 0;
3031 }
3032
3033 spin_lock_bh(&mvmsta->lock);
3034
3035 txq_id = tid_data->txq_id;
3036
3037 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3038 mvmsta->sta_id, tid, txq_id, tid_data->state);
3039
3040 mvmsta->agg_tids &= ~BIT(tid);
3041
3042 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3043
3044 switch (tid_data->state) {
3045 case IWL_AGG_ON:
3046 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3047
3048 IWL_DEBUG_TX_QUEUES(mvm,
3049 "ssn = %d, next_recl = %d\n",
3050 tid_data->ssn, tid_data->next_reclaimed);
3051
3052 tid_data->ssn = 0xffff;
3053 tid_data->state = IWL_AGG_OFF;
3054 spin_unlock_bh(&mvmsta->lock);
3055
3056 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3057
3058 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3059 return 0;
3060 case IWL_AGG_STARTING:
3061 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3062
3063
3064
3065
3066
3067
3068 lockdep_assert_held(&mvm->mutex);
3069
3070 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3071 tid_data->state = IWL_AGG_OFF;
3072 err = 0;
3073 break;
3074 default:
3075 IWL_ERR(mvm,
3076 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3077 mvmsta->sta_id, tid, tid_data->state);
3078 IWL_ERR(mvm,
3079 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3080 err = -EINVAL;
3081 }
3082
3083 spin_unlock_bh(&mvmsta->lock);
3084
3085 return err;
3086 }
3087
3088 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3089 struct ieee80211_sta *sta, u16 tid)
3090 {
3091 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3092 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3093 u16 txq_id;
3094 enum iwl_mvm_agg_state old_state;
3095
3096
3097
3098
3099
3100 spin_lock_bh(&mvmsta->lock);
3101 txq_id = tid_data->txq_id;
3102 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3103 mvmsta->sta_id, tid, txq_id, tid_data->state);
3104 old_state = tid_data->state;
3105 tid_data->state = IWL_AGG_OFF;
3106 mvmsta->agg_tids &= ~BIT(tid);
3107 spin_unlock_bh(&mvmsta->lock);
3108
3109 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3110
3111 if (old_state >= IWL_AGG_ON) {
3112 iwl_mvm_drain_sta(mvm, mvmsta, true);
3113
3114 if (iwl_mvm_has_new_tx_api(mvm)) {
3115 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3116 BIT(tid), 0))
3117 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3118 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3119 } else {
3120 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3121 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3122 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3123 }
3124
3125 iwl_mvm_drain_sta(mvm, mvmsta, false);
3126
3127 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3128 }
3129
3130 return 0;
3131 }
3132
3133 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3134 {
3135 int i, max = -1, max_offs = -1;
3136
3137 lockdep_assert_held(&mvm->mutex);
3138
3139
3140
3141
3142
3143
3144
3145 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3146 if (test_bit(i, mvm->fw_key_table))
3147 continue;
3148 if (mvm->fw_key_deleted[i] > max) {
3149 max = mvm->fw_key_deleted[i];
3150 max_offs = i;
3151 }
3152 }
3153
3154 if (max_offs < 0)
3155 return STA_KEY_IDX_INVALID;
3156
3157 return max_offs;
3158 }
3159
3160 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3161 struct ieee80211_vif *vif,
3162 struct ieee80211_sta *sta)
3163 {
3164 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3165
3166 if (sta)
3167 return iwl_mvm_sta_from_mac80211(sta);
3168
3169
3170
3171
3172
3173
3174 if (vif->type == NL80211_IFTYPE_STATION &&
3175 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3176 u8 sta_id = mvmvif->ap_sta_id;
3177
3178 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3179 lockdep_is_held(&mvm->mutex));
3180
3181
3182
3183
3184
3185
3186 if (IS_ERR_OR_NULL(sta))
3187 return NULL;
3188
3189 return iwl_mvm_sta_from_mac80211(sta);
3190 }
3191
3192 return NULL;
3193 }
3194
3195 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3196 u32 sta_id,
3197 struct ieee80211_key_conf *key, bool mcast,
3198 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3199 u8 key_offset, bool mfp)
3200 {
3201 union {
3202 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3203 struct iwl_mvm_add_sta_key_cmd cmd;
3204 } u = {};
3205 __le16 key_flags;
3206 int ret;
3207 u32 status;
3208 u16 keyidx;
3209 u64 pn = 0;
3210 int i, size;
3211 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3212 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3213
3214 if (sta_id == IWL_MVM_INVALID_STA)
3215 return -EINVAL;
3216
3217 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3218 STA_KEY_FLG_KEYID_MSK;
3219 key_flags = cpu_to_le16(keyidx);
3220 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3221
3222 switch (key->cipher) {
3223 case WLAN_CIPHER_SUITE_TKIP:
3224 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3225 if (new_api) {
3226 memcpy((void *)&u.cmd.tx_mic_key,
3227 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3228 IWL_MIC_KEY_SIZE);
3229
3230 memcpy((void *)&u.cmd.rx_mic_key,
3231 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3232 IWL_MIC_KEY_SIZE);
3233 pn = atomic64_read(&key->tx_pn);
3234
3235 } else {
3236 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3237 for (i = 0; i < 5; i++)
3238 u.cmd_v1.tkip_rx_ttak[i] =
3239 cpu_to_le16(tkip_p1k[i]);
3240 }
3241 memcpy(u.cmd.common.key, key->key, key->keylen);
3242 break;
3243 case WLAN_CIPHER_SUITE_CCMP:
3244 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3245 memcpy(u.cmd.common.key, key->key, key->keylen);
3246 if (new_api)
3247 pn = atomic64_read(&key->tx_pn);
3248 break;
3249 case WLAN_CIPHER_SUITE_WEP104:
3250 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3251
3252 case WLAN_CIPHER_SUITE_WEP40:
3253 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3254 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3255 break;
3256 case WLAN_CIPHER_SUITE_GCMP_256:
3257 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3258
3259 case WLAN_CIPHER_SUITE_GCMP:
3260 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3261 memcpy(u.cmd.common.key, key->key, key->keylen);
3262 if (new_api)
3263 pn = atomic64_read(&key->tx_pn);
3264 break;
3265 default:
3266 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3267 memcpy(u.cmd.common.key, key->key, key->keylen);
3268 }
3269
3270 if (mcast)
3271 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3272 if (mfp)
3273 key_flags |= cpu_to_le16(STA_KEY_MFP);
3274
3275 u.cmd.common.key_offset = key_offset;
3276 u.cmd.common.key_flags = key_flags;
3277 u.cmd.common.sta_id = sta_id;
3278
3279 if (new_api) {
3280 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3281 size = sizeof(u.cmd);
3282 } else {
3283 size = sizeof(u.cmd_v1);
3284 }
3285
3286 status = ADD_STA_SUCCESS;
3287 if (cmd_flags & CMD_ASYNC)
3288 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3289 &u.cmd);
3290 else
3291 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3292 &u.cmd, &status);
3293
3294 switch (status) {
3295 case ADD_STA_SUCCESS:
3296 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3297 break;
3298 default:
3299 ret = -EIO;
3300 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3301 break;
3302 }
3303
3304 return ret;
3305 }
3306
3307 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3308 struct ieee80211_key_conf *keyconf,
3309 u8 sta_id, bool remove_key)
3310 {
3311 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3312
3313
3314 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3315 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3316 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3317 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3318 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3319 return -EINVAL;
3320
3321 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3322 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3323 return -EINVAL;
3324
3325 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3326 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3327
3328 if (remove_key) {
3329
3330 if (sta_id == IWL_MVM_INVALID_STA)
3331 return 0;
3332
3333 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3334 } else {
3335 struct ieee80211_key_seq seq;
3336 const u8 *pn;
3337
3338 switch (keyconf->cipher) {
3339 case WLAN_CIPHER_SUITE_AES_CMAC:
3340 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3341 break;
3342 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3343 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3344 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3345 break;
3346 default:
3347 return -EINVAL;
3348 }
3349
3350 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3351 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3352 igtk_cmd.ctrl_flags |=
3353 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3354 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3355 pn = seq.aes_cmac.pn;
3356 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3357 ((u64) pn[4] << 8) |
3358 ((u64) pn[3] << 16) |
3359 ((u64) pn[2] << 24) |
3360 ((u64) pn[1] << 32) |
3361 ((u64) pn[0] << 40));
3362 }
3363
3364 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3365 remove_key ? "removing" : "installing",
3366 igtk_cmd.sta_id);
3367
3368 if (!iwl_mvm_has_new_rx_api(mvm)) {
3369 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3370 .ctrl_flags = igtk_cmd.ctrl_flags,
3371 .key_id = igtk_cmd.key_id,
3372 .sta_id = igtk_cmd.sta_id,
3373 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3374 };
3375
3376 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3377 ARRAY_SIZE(igtk_cmd_v1.igtk));
3378 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3379 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3380 }
3381 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3382 sizeof(igtk_cmd), &igtk_cmd);
3383 }
3384
3385
3386 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3387 struct ieee80211_vif *vif,
3388 struct ieee80211_sta *sta)
3389 {
3390 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3391
3392 if (sta)
3393 return sta->addr;
3394
3395 if (vif->type == NL80211_IFTYPE_STATION &&
3396 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3397 u8 sta_id = mvmvif->ap_sta_id;
3398 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3399 lockdep_is_held(&mvm->mutex));
3400 return sta->addr;
3401 }
3402
3403
3404 return NULL;
3405 }
3406
3407 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3408 struct ieee80211_vif *vif,
3409 struct ieee80211_sta *sta,
3410 struct ieee80211_key_conf *keyconf,
3411 u8 key_offset,
3412 bool mcast)
3413 {
3414 int ret;
3415 const u8 *addr;
3416 struct ieee80211_key_seq seq;
3417 u16 p1k[5];
3418 u32 sta_id;
3419 bool mfp = false;
3420
3421 if (sta) {
3422 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3423
3424 sta_id = mvm_sta->sta_id;
3425 mfp = sta->mfp;
3426 } else if (vif->type == NL80211_IFTYPE_AP &&
3427 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3428 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3429
3430 sta_id = mvmvif->mcast_sta.sta_id;
3431 } else {
3432 IWL_ERR(mvm, "Failed to find station id\n");
3433 return -EINVAL;
3434 }
3435
3436 switch (keyconf->cipher) {
3437 case WLAN_CIPHER_SUITE_TKIP:
3438 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3439
3440 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3441 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3442 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3443 seq.tkip.iv32, p1k, 0, key_offset,
3444 mfp);
3445 break;
3446 case WLAN_CIPHER_SUITE_CCMP:
3447 case WLAN_CIPHER_SUITE_WEP40:
3448 case WLAN_CIPHER_SUITE_WEP104:
3449 case WLAN_CIPHER_SUITE_GCMP:
3450 case WLAN_CIPHER_SUITE_GCMP_256:
3451 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3452 0, NULL, 0, key_offset, mfp);
3453 break;
3454 default:
3455 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3456 0, NULL, 0, key_offset, mfp);
3457 }
3458
3459 return ret;
3460 }
3461
3462 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3463 struct ieee80211_vif *vif,
3464 struct ieee80211_sta *sta,
3465 struct ieee80211_key_conf *keyconf,
3466 u8 key_offset)
3467 {
3468 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3469 struct iwl_mvm_sta *mvm_sta;
3470 u8 sta_id = IWL_MVM_INVALID_STA;
3471 int ret;
3472 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3473
3474 lockdep_assert_held(&mvm->mutex);
3475
3476 if (vif->type != NL80211_IFTYPE_AP ||
3477 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3478
3479 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3480 if (!mvm_sta) {
3481 IWL_ERR(mvm, "Failed to find station\n");
3482 return -EINVAL;
3483 }
3484 sta_id = mvm_sta->sta_id;
3485
3486
3487
3488
3489
3490
3491 if (!sta) {
3492 sta = rcu_dereference_protected(
3493 mvm->fw_id_to_mac_id[sta_id],
3494 lockdep_is_held(&mvm->mutex));
3495 if (IS_ERR_OR_NULL(sta)) {
3496 IWL_ERR(mvm, "Invalid station id\n");
3497 return -EINVAL;
3498 }
3499 }
3500
3501 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3502 return -EINVAL;
3503 } else {
3504 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3505
3506 sta_id = mvmvif->mcast_sta.sta_id;
3507 }
3508
3509 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3510 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3511 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3512 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3513 goto end;
3514 }
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527 if (key_offset == STA_KEY_IDX_INVALID) {
3528 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3529 if (key_offset == STA_KEY_IDX_INVALID)
3530 return -ENOSPC;
3531 keyconf->hw_key_idx = key_offset;
3532 }
3533
3534 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3535 if (ret)
3536 goto end;
3537
3538
3539
3540
3541
3542
3543
3544 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3545 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3546 sta) {
3547 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3548 key_offset, !mcast);
3549 if (ret) {
3550 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3551 goto end;
3552 }
3553 }
3554
3555 __set_bit(key_offset, mvm->fw_key_table);
3556
3557 end:
3558 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3559 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3560 sta ? sta->addr : zero_addr, ret);
3561 return ret;
3562 }
3563
3564 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3565 struct ieee80211_vif *vif,
3566 struct ieee80211_sta *sta,
3567 struct ieee80211_key_conf *keyconf)
3568 {
3569 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3570 struct iwl_mvm_sta *mvm_sta;
3571 u8 sta_id = IWL_MVM_INVALID_STA;
3572 int ret, i;
3573
3574 lockdep_assert_held(&mvm->mutex);
3575
3576
3577 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3578 if (mvm_sta)
3579 sta_id = mvm_sta->sta_id;
3580 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3581 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3582
3583
3584 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3585 keyconf->keyidx, sta_id);
3586
3587 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3588 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3589 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3590 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3591
3592 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3593 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3594 keyconf->hw_key_idx);
3595 return -ENOENT;
3596 }
3597
3598
3599 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3600 if (mvm->fw_key_deleted[i] < U8_MAX)
3601 mvm->fw_key_deleted[i]++;
3602 }
3603 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3604
3605 if (sta && !mvm_sta) {
3606 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3607 return 0;
3608 }
3609
3610 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3611 if (ret)
3612 return ret;
3613
3614
3615 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3616 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3617 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3618
3619 return ret;
3620 }
3621
3622 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3623 struct ieee80211_vif *vif,
3624 struct ieee80211_key_conf *keyconf,
3625 struct ieee80211_sta *sta, u32 iv32,
3626 u16 *phase1key)
3627 {
3628 struct iwl_mvm_sta *mvm_sta;
3629 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3630 bool mfp = sta ? sta->mfp : false;
3631
3632 rcu_read_lock();
3633
3634 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3635 if (WARN_ON_ONCE(!mvm_sta))
3636 goto unlock;
3637 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3638 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3639 mfp);
3640
3641 unlock:
3642 rcu_read_unlock();
3643 }
3644
3645 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3646 struct ieee80211_sta *sta)
3647 {
3648 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3649 struct iwl_mvm_add_sta_cmd cmd = {
3650 .add_modify = STA_MODE_MODIFY,
3651 .sta_id = mvmsta->sta_id,
3652 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3653 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3654 };
3655 int ret;
3656
3657 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3658 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3659 if (ret)
3660 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3661 }
3662
3663 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3664 struct ieee80211_sta *sta,
3665 enum ieee80211_frame_release_type reason,
3666 u16 cnt, u16 tids, bool more_data,
3667 bool single_sta_queue)
3668 {
3669 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3670 struct iwl_mvm_add_sta_cmd cmd = {
3671 .add_modify = STA_MODE_MODIFY,
3672 .sta_id = mvmsta->sta_id,
3673 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3674 .sleep_tx_count = cpu_to_le16(cnt),
3675 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3676 };
3677 int tid, ret;
3678 unsigned long _tids = tids;
3679
3680
3681
3682
3683
3684 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3685 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3686
3687
3688
3689
3690
3691
3692
3693
3694 if (single_sta_queue) {
3695 int remaining = cnt;
3696 int sleep_tx_count;
3697
3698 spin_lock_bh(&mvmsta->lock);
3699 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3700 struct iwl_mvm_tid_data *tid_data;
3701 u16 n_queued;
3702
3703 tid_data = &mvmsta->tid_data[tid];
3704
3705 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3706 if (n_queued > remaining) {
3707 more_data = true;
3708 remaining = 0;
3709 break;
3710 }
3711 remaining -= n_queued;
3712 }
3713 sleep_tx_count = cnt - remaining;
3714 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3715 mvmsta->sleep_tx_count = sleep_tx_count;
3716 spin_unlock_bh(&mvmsta->lock);
3717
3718 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3719 if (WARN_ON(cnt - remaining == 0)) {
3720 ieee80211_sta_eosp(sta);
3721 return;
3722 }
3723 }
3724
3725
3726 if (more_data)
3727 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3728
3729 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3730 mvmsta->next_status_eosp = true;
3731 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3732 } else {
3733 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3734 }
3735
3736
3737 iwl_trans_block_txq_ptrs(mvm->trans, true);
3738
3739 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3740 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3741 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3742 if (ret)
3743 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3744 }
3745
3746 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3747 struct iwl_rx_cmd_buffer *rxb)
3748 {
3749 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3750 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3751 struct ieee80211_sta *sta;
3752 u32 sta_id = le32_to_cpu(notif->sta_id);
3753
3754 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3755 return;
3756
3757 rcu_read_lock();
3758 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3759 if (!IS_ERR_OR_NULL(sta))
3760 ieee80211_sta_eosp(sta);
3761 rcu_read_unlock();
3762 }
3763
3764 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3765 struct iwl_mvm_sta *mvmsta, bool disable)
3766 {
3767 struct iwl_mvm_add_sta_cmd cmd = {
3768 .add_modify = STA_MODE_MODIFY,
3769 .sta_id = mvmsta->sta_id,
3770 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3771 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3772 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3773 };
3774 int ret;
3775
3776 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3777 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3778 if (ret)
3779 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3780 }
3781
3782 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3783 struct ieee80211_sta *sta,
3784 bool disable)
3785 {
3786 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3787
3788 spin_lock_bh(&mvm_sta->lock);
3789
3790 if (mvm_sta->disable_tx == disable) {
3791 spin_unlock_bh(&mvm_sta->lock);
3792 return;
3793 }
3794
3795 mvm_sta->disable_tx = disable;
3796
3797
3798 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3799
3800 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3801
3802 spin_unlock_bh(&mvm_sta->lock);
3803 }
3804
3805 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3806 struct iwl_mvm_vif *mvmvif,
3807 struct iwl_mvm_int_sta *sta,
3808 bool disable)
3809 {
3810 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3811 struct iwl_mvm_add_sta_cmd cmd = {
3812 .add_modify = STA_MODE_MODIFY,
3813 .sta_id = sta->sta_id,
3814 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3815 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3816 .mac_id_n_color = cpu_to_le32(id),
3817 };
3818 int ret;
3819
3820 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3821 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3822 if (ret)
3823 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3824 }
3825
3826 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3827 struct iwl_mvm_vif *mvmvif,
3828 bool disable)
3829 {
3830 struct ieee80211_sta *sta;
3831 struct iwl_mvm_sta *mvm_sta;
3832 int i;
3833
3834 lockdep_assert_held(&mvm->mutex);
3835
3836
3837 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
3838 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3839 lockdep_is_held(&mvm->mutex));
3840 if (IS_ERR_OR_NULL(sta))
3841 continue;
3842
3843 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3844 if (mvm_sta->mac_id_n_color !=
3845 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3846 continue;
3847
3848 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3849 }
3850
3851 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3852 return;
3853
3854
3855 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3856 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3857 &mvmvif->mcast_sta, disable);
3858
3859
3860
3861
3862
3863 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3864 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3865 &mvmvif->bcast_sta, disable);
3866 }
3867
3868 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3869 {
3870 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3871 struct iwl_mvm_sta *mvmsta;
3872
3873 rcu_read_lock();
3874
3875 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3876
3877 if (!WARN_ON(!mvmsta))
3878 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3879
3880 rcu_read_unlock();
3881 }
3882
3883 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3884 {
3885 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3886
3887
3888
3889
3890
3891 if (mvm->trans->trans_cfg->gen2)
3892 sn &= 0xff;
3893
3894 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
3895 }