Lines Matching refs:ar
48 static inline unsigned int __carl9170_get_queue(struct ar9170 *ar, in __carl9170_get_queue() argument
64 static inline unsigned int carl9170_get_queue(struct ar9170 *ar, in carl9170_get_queue() argument
67 return __carl9170_get_queue(ar, skb_get_queue_mapping(skb)); in carl9170_get_queue()
70 static bool is_mem_full(struct ar9170 *ar) in is_mem_full() argument
72 return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) > in is_mem_full()
73 atomic_read(&ar->mem_free_blocks)); in is_mem_full()
76 static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_accounting() argument
81 atomic_inc(&ar->tx_total_queued); in carl9170_tx_accounting()
84 spin_lock_bh(&ar->tx_stats_lock); in carl9170_tx_accounting()
92 ar->tx_stats[queue].len++; in carl9170_tx_accounting()
93 ar->tx_stats[queue].count++; in carl9170_tx_accounting()
95 mem_full = is_mem_full(ar); in carl9170_tx_accounting()
96 for (i = 0; i < ar->hw->queues; i++) { in carl9170_tx_accounting()
97 if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) { in carl9170_tx_accounting()
98 ieee80211_stop_queue(ar->hw, i); in carl9170_tx_accounting()
99 ar->queue_stop_timeout[i] = jiffies; in carl9170_tx_accounting()
103 spin_unlock_bh(&ar->tx_stats_lock); in carl9170_tx_accounting()
107 static struct ieee80211_sta *__carl9170_get_tx_sta(struct ar9170 *ar, in __carl9170_get_tx_sta() argument
121 vif = rcu_dereference(ar->vif_priv[vif_id].vif); in __carl9170_get_tx_sta()
138 static void carl9170_tx_ps_unblock(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_ps_unblock() argument
144 sta = __carl9170_get_tx_sta(ar, skb); in carl9170_tx_ps_unblock()
150 ieee80211_sta_block_awake(ar->hw, sta, false); in carl9170_tx_ps_unblock()
156 static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_accounting_free() argument
162 spin_lock_bh(&ar->tx_stats_lock); in carl9170_tx_accounting_free()
164 ar->tx_stats[queue].len--; in carl9170_tx_accounting_free()
166 if (!is_mem_full(ar)) { in carl9170_tx_accounting_free()
168 for (i = 0; i < ar->hw->queues; i++) { in carl9170_tx_accounting_free()
169 if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT) in carl9170_tx_accounting_free()
172 if (ieee80211_queue_stopped(ar->hw, i)) { in carl9170_tx_accounting_free()
175 tmp = jiffies - ar->queue_stop_timeout[i]; in carl9170_tx_accounting_free()
176 if (tmp > ar->max_queue_stop_timeout[i]) in carl9170_tx_accounting_free()
177 ar->max_queue_stop_timeout[i] = tmp; in carl9170_tx_accounting_free()
180 ieee80211_wake_queue(ar->hw, i); in carl9170_tx_accounting_free()
184 spin_unlock_bh(&ar->tx_stats_lock); in carl9170_tx_accounting_free()
186 if (atomic_dec_and_test(&ar->tx_total_queued)) in carl9170_tx_accounting_free()
187 complete(&ar->tx_flush); in carl9170_tx_accounting_free()
190 static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb) in carl9170_alloc_dev_space() argument
196 atomic_inc(&ar->mem_allocs); in carl9170_alloc_dev_space()
198 chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size); in carl9170_alloc_dev_space()
199 if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) { in carl9170_alloc_dev_space()
200 atomic_add(chunks, &ar->mem_free_blocks); in carl9170_alloc_dev_space()
204 spin_lock_bh(&ar->mem_lock); in carl9170_alloc_dev_space()
205 cookie = bitmap_find_free_region(ar->mem_bitmap, ar->fw.mem_blocks, 0); in carl9170_alloc_dev_space()
206 spin_unlock_bh(&ar->mem_lock); in carl9170_alloc_dev_space()
209 atomic_add(chunks, &ar->mem_free_blocks); in carl9170_alloc_dev_space()
226 static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb) in carl9170_release_dev_space() argument
250 WARN_ON_ONCE(cookie > ar->fw.mem_blocks))) in carl9170_release_dev_space()
253 atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size), in carl9170_release_dev_space()
254 &ar->mem_free_blocks); in carl9170_release_dev_space()
256 spin_lock_bh(&ar->mem_lock); in carl9170_release_dev_space()
257 bitmap_release_region(ar->mem_bitmap, cookie - 1, 0); in carl9170_release_dev_space()
258 spin_unlock_bh(&ar->mem_lock); in carl9170_release_dev_space()
264 struct ar9170 *ar; in carl9170_tx_release() local
274 ar = arinfo->ar; in carl9170_tx_release()
275 if (WARN_ON_ONCE(!ar)) in carl9170_tx_release()
285 if (atomic_read(&ar->tx_total_queued)) in carl9170_tx_release()
286 ar->tx_schedule = true; in carl9170_tx_release()
289 if (!atomic_read(&ar->tx_ampdu_upload)) in carl9170_tx_release()
290 ar->tx_ampdu_schedule = true; in carl9170_tx_release()
316 ieee80211_free_txskb(ar->hw, skb); in carl9170_tx_release()
327 ieee80211_tx_status_irqsafe(ar->hw, skb); in carl9170_tx_release()
346 static void carl9170_tx_shift_bm(struct ar9170 *ar, in carl9170_tx_shift_bm() argument
378 static void carl9170_tx_status_process_ampdu(struct ar9170 *ar, in carl9170_tx_status_process_ampdu() argument
393 sta = __carl9170_get_tx_sta(ar, skb); in carl9170_tx_status_process_ampdu()
406 carl9170_tx_shift_bm(ar, tid_info, get_seq_h(hdr)); in carl9170_tx_status_process_ampdu()
437 static void carl9170_tx_bar_status(struct ar9170 *ar, struct sk_buff *skb, in carl9170_tx_bar_status() argument
458 list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) { in carl9170_tx_bar_status()
460 spin_lock_bh(&ar->bar_list_lock[queue]); in carl9170_tx_bar_status()
462 spin_unlock_bh(&ar->bar_list_lock[queue]); in carl9170_tx_bar_status()
476 void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb, in carl9170_tx_status() argument
481 carl9170_tx_accounting_free(ar, skb); in carl9170_tx_status()
485 carl9170_tx_bar_status(ar, skb, txinfo); in carl9170_tx_status()
490 ar->tx_ack_failures++; in carl9170_tx_status()
493 carl9170_tx_status_process_ampdu(ar, skb, txinfo); in carl9170_tx_status()
495 carl9170_tx_ps_unblock(ar, skb); in carl9170_tx_status()
500 void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_callback() argument
504 atomic_dec(&ar->tx_total_pending); in carl9170_tx_callback()
507 atomic_dec(&ar->tx_ampdu_upload); in carl9170_tx_callback()
510 tasklet_hi_schedule(&ar->usb_tasklet); in carl9170_tx_callback()
513 static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie, in carl9170_get_queued_skb() argument
528 carl9170_release_dev_space(ar, skb); in carl9170_get_queued_skb()
536 static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix, in carl9170_tx_fill_rateinfo() argument
558 static void carl9170_check_queue_stop_timeout(struct ar9170 *ar) in carl9170_check_queue_stop_timeout() argument
566 for (i = 0; i < ar->hw->queues; i++) { in carl9170_check_queue_stop_timeout()
567 spin_lock_bh(&ar->tx_status[i].lock); in carl9170_check_queue_stop_timeout()
569 skb = skb_peek(&ar->tx_status[i]); in carl9170_check_queue_stop_timeout()
582 spin_unlock_bh(&ar->tx_status[i].lock); in carl9170_check_queue_stop_timeout()
599 carl9170_restart(ar, CARL9170_RR_STUCK_TX); in carl9170_check_queue_stop_timeout()
603 static void carl9170_tx_ampdu_timeout(struct ar9170 *ar) in carl9170_tx_ampdu_timeout() argument
612 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) { in carl9170_tx_ampdu_timeout()
641 struct ar9170 *ar = container_of(work, struct ar9170, in carl9170_tx_janitor() local
643 if (!IS_STARTED(ar)) in carl9170_tx_janitor()
646 ar->tx_janitor_last_run = jiffies; in carl9170_tx_janitor()
648 carl9170_check_queue_stop_timeout(ar); in carl9170_tx_janitor()
649 carl9170_tx_ampdu_timeout(ar); in carl9170_tx_janitor()
651 if (!atomic_read(&ar->tx_total_queued)) in carl9170_tx_janitor()
654 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor, in carl9170_tx_janitor()
658 static void __carl9170_tx_process_status(struct ar9170 *ar, in __carl9170_tx_process_status() argument
668 skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]); in __carl9170_tx_process_status()
685 carl9170_tx_fill_rateinfo(ar, r, t, txinfo); in __carl9170_tx_process_status()
686 carl9170_tx_status(ar, skb, success); in __carl9170_tx_process_status()
689 void carl9170_tx_process_status(struct ar9170 *ar, in carl9170_tx_process_status() argument
701 __carl9170_tx_process_status(ar, cmd->_tx_status[i].cookie, in carl9170_tx_process_status()
706 static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar, in carl9170_tx_rate_tpc_chains() argument
724 txpower = ar->power_2G_ht40; in carl9170_tx_rate_tpc_chains()
726 txpower = ar->power_5G_ht40; in carl9170_tx_rate_tpc_chains()
729 txpower = ar->power_2G_ht20; in carl9170_tx_rate_tpc_chains()
731 txpower = ar->power_5G_ht20; in carl9170_tx_rate_tpc_chains()
739 txpower = ar->power_2G_cck; in carl9170_tx_rate_tpc_chains()
741 txpower = ar->power_2G_ofdm; in carl9170_tx_rate_tpc_chains()
743 txpower = ar->power_5G_leg; in carl9170_tx_rate_tpc_chains()
752 if (ar->eeprom.tx_mask == 1) { in carl9170_tx_rate_tpc_chains()
762 *tpc = min_t(unsigned int, *tpc, ar->hw->conf.power_level * 2); in carl9170_tx_rate_tpc_chains()
765 static __le32 carl9170_tx_physet(struct ar9170 *ar, in carl9170_tx_physet() argument
816 carl9170_tx_rate_tpc_chains(ar, info, txrate, in carl9170_tx_physet()
825 static bool carl9170_tx_rts_check(struct ar9170 *ar, in carl9170_tx_rts_check() argument
829 switch (ar->erp_mode) { in carl9170_tx_rts_check()
849 static bool carl9170_tx_cts_check(struct ar9170 *ar, in carl9170_tx_cts_check() argument
852 switch (ar->erp_mode) { in carl9170_tx_cts_check()
868 static void carl9170_tx_get_rates(struct ar9170 *ar, in carl9170_tx_get_rates() argument
885 static void carl9170_tx_apply_rateset(struct ar9170 *ar, in carl9170_tx_apply_rateset() argument
917 phy_set = carl9170_tx_physet(ar, info, txrate); in carl9170_tx_apply_rateset()
927 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) in carl9170_tx_apply_rateset()
929 else if (carl9170_tx_cts_check(ar, txrate)) in carl9170_tx_apply_rateset()
943 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) in carl9170_tx_apply_rateset()
946 else if (carl9170_tx_cts_check(ar, txrate)) in carl9170_tx_apply_rateset()
955 static int carl9170_tx_prepare(struct ar9170 *ar, in carl9170_tx_prepare() argument
979 hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)]; in carl9170_tx_prepare()
1070 arinfo->ar = ar; in carl9170_tx_prepare()
1079 static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb) in carl9170_set_immba() argument
1087 static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb) in carl9170_set_ampdu_params() argument
1108 if (tmp != ar->current_density) { in carl9170_set_ampdu_params()
1109 ar->current_density = tmp; in carl9170_set_ampdu_params()
1117 if (tmp != ar->current_factor) { in carl9170_set_ampdu_params()
1118 ar->current_factor = tmp; in carl9170_set_ampdu_params()
1124 static void carl9170_tx_ampdu(struct ar9170 *ar) in carl9170_tx_ampdu() argument
1133 atomic_inc(&ar->tx_ampdu_scheduler); in carl9170_tx_ampdu()
1134 ar->tx_ampdu_schedule = false; in carl9170_tx_ampdu()
1136 if (atomic_read(&ar->tx_ampdu_upload)) in carl9170_tx_ampdu()
1139 if (!ar->tx_ampdu_list_len) in carl9170_tx_ampdu()
1145 tid_info = rcu_dereference(ar->tx_ampdu_iter); in carl9170_tx_ampdu()
1152 list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) { in carl9170_tx_ampdu()
1187 carl9170_tx_get_rates(ar, tid_info->vif, in carl9170_tx_ampdu()
1192 carl9170_tx_apply_rateset(ar, tx_info_first, skb); in carl9170_tx_ampdu()
1194 atomic_inc(&ar->tx_ampdu_upload); in carl9170_tx_ampdu()
1222 carl9170_set_ampdu_params(ar, skb_peek(&agg)); in carl9170_tx_ampdu()
1225 carl9170_set_immba(ar, skb_peek_tail(&agg)); in carl9170_tx_ampdu()
1227 spin_lock_bh(&ar->tx_pending[queue].lock); in carl9170_tx_ampdu()
1228 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]); in carl9170_tx_ampdu()
1229 spin_unlock_bh(&ar->tx_pending[queue].lock); in carl9170_tx_ampdu()
1230 ar->tx_schedule = true; in carl9170_tx_ampdu()
1235 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info); in carl9170_tx_ampdu()
1239 static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar, in carl9170_tx_pick_skb() argument
1253 if (carl9170_alloc_dev_space(ar, skb)) in carl9170_tx_pick_skb()
1270 void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_drop() argument
1275 ar->tx_dropped++; in carl9170_tx_drop()
1279 ar9170_qmap[carl9170_get_queue(ar, skb)]); in carl9170_tx_drop()
1280 __carl9170_tx_process_status(ar, super->s.cookie, q); in carl9170_tx_drop()
1283 static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_ps_drop() argument
1290 sta = __carl9170_get_tx_sta(ar, skb); in carl9170_tx_ps_drop()
1303 atomic_dec(&ar->tx_ampdu_upload); in carl9170_tx_ps_drop()
1306 carl9170_release_dev_space(ar, skb); in carl9170_tx_ps_drop()
1307 carl9170_tx_status(ar, skb, false); in carl9170_tx_ps_drop()
1316 static void carl9170_bar_check(struct ar9170 *ar, struct sk_buff *skb) in carl9170_bar_check() argument
1329 spin_lock_bh(&ar->bar_list_lock[queue]); in carl9170_bar_check()
1330 list_add_tail_rcu(&entry->list, &ar->bar_list[queue]); in carl9170_bar_check()
1331 spin_unlock_bh(&ar->bar_list_lock[queue]); in carl9170_bar_check()
1336 static void carl9170_tx(struct ar9170 *ar) in carl9170_tx() argument
1342 ar->tx_schedule = false; in carl9170_tx()
1344 if (unlikely(!IS_STARTED(ar))) in carl9170_tx()
1347 carl9170_usb_handle_tx_err(ar); in carl9170_tx()
1349 for (i = 0; i < ar->hw->queues; i++) { in carl9170_tx()
1350 while (!skb_queue_empty(&ar->tx_pending[i])) { in carl9170_tx()
1351 skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]); in carl9170_tx()
1355 if (unlikely(carl9170_tx_ps_drop(ar, skb))) in carl9170_tx()
1358 carl9170_bar_check(ar, skb); in carl9170_tx()
1360 atomic_inc(&ar->tx_total_pending); in carl9170_tx()
1362 q = __carl9170_get_queue(ar, i); in carl9170_tx()
1367 skb_queue_tail(&ar->tx_status[q], skb); in carl9170_tx()
1379 carl9170_usb_tx(ar, skb); in carl9170_tx()
1387 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor, in carl9170_tx()
1391 static bool carl9170_tx_ampdu_queue(struct ar9170 *ar, in carl9170_tx_ampdu_queue() argument
1462 carl9170_tx_status(ar, skb, false); in carl9170_tx_ampdu_queue()
1463 ar->tx_dropped++; in carl9170_tx_ampdu_queue()
1471 struct ar9170 *ar = hw->priv; in carl9170_op_tx() local
1477 if (unlikely(!IS_STARTED(ar))) in carl9170_op_tx()
1483 if (unlikely(carl9170_tx_prepare(ar, sta, skb))) in carl9170_op_tx()
1486 carl9170_tx_accounting(ar, skb); in carl9170_op_tx()
1505 run = carl9170_tx_ampdu_queue(ar, sta, skb, info); in carl9170_op_tx()
1507 carl9170_tx_ampdu(ar); in carl9170_op_tx()
1512 carl9170_tx_get_rates(ar, vif, sta, skb); in carl9170_op_tx()
1513 carl9170_tx_apply_rateset(ar, info, skb); in carl9170_op_tx()
1514 skb_queue_tail(&ar->tx_pending[queue], skb); in carl9170_op_tx()
1517 carl9170_tx(ar); in carl9170_op_tx()
1521 ar->tx_dropped++; in carl9170_op_tx()
1522 ieee80211_free_txskb(ar->hw, skb); in carl9170_op_tx()
1525 void carl9170_tx_scheduler(struct ar9170 *ar) in carl9170_tx_scheduler() argument
1528 if (ar->tx_ampdu_schedule) in carl9170_tx_scheduler()
1529 carl9170_tx_ampdu(ar); in carl9170_tx_scheduler()
1531 if (ar->tx_schedule) in carl9170_tx_scheduler()
1532 carl9170_tx(ar); in carl9170_tx_scheduler()
1536 static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar) in carl9170_pick_beaconing_vif() argument
1548 cvif = rcu_dereference(ar->beacon_iter); in carl9170_pick_beaconing_vif()
1549 if (ar->vifs > 0 && cvif) { in carl9170_pick_beaconing_vif()
1551 list_for_each_entry_continue_rcu(cvif, &ar->vif_list, in carl9170_pick_beaconing_vif()
1556 } while (ar->beacon_enabled && i--); in carl9170_pick_beaconing_vif()
1560 RCU_INIT_POINTER(ar->beacon_iter, cvif); in carl9170_pick_beaconing_vif()
1564 static bool carl9170_tx_beacon_physet(struct ar9170 *ar, struct sk_buff *skb, in carl9170_tx_beacon_physet() argument
1575 carl9170_tx_rate_tpc_chains(ar, txinfo, rate, plcp, &power, &chains); in carl9170_tx_beacon_physet()
1608 int carl9170_update_beacon(struct ar9170 *ar, const bool submit) in carl9170_update_beacon() argument
1618 cvif = carl9170_pick_beaconing_vif(ar); in carl9170_update_beacon()
1622 skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif), in carl9170_update_beacon()
1630 spin_lock_bh(&ar->beacon_lock); in carl9170_update_beacon()
1636 addr = ar->fw.beacon_addr + off; in carl9170_update_beacon()
1639 if ((off + len) > ar->fw.beacon_max_len) { in carl9170_update_beacon()
1641 wiphy_err(ar->hw->wiphy, "beacon does not " in carl9170_update_beacon()
1650 wiphy_err(ar->hw->wiphy, "no support for beacons " in carl9170_update_beacon()
1659 ht_rate = carl9170_tx_beacon_physet(ar, skb, &ht1, &plcp); in carl9170_update_beacon()
1661 carl9170_async_regwrite_begin(ar); in carl9170_update_beacon()
1688 spin_unlock_bh(&ar->beacon_lock); in carl9170_update_beacon()
1693 err = carl9170_bcn_ctrl(ar, cvif->id, in carl9170_update_beacon()
1705 spin_unlock_bh(&ar->beacon_lock); in carl9170_update_beacon()