Lines Matching refs:mvm

76 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,  in iwl_mvm_set_tx_cmd()  argument
124 tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << in iwl_mvm_set_tx_cmd()
143 if (ieee80211_is_data(fc) && len > mvm->rts_threshold && in iwl_mvm_set_tx_cmd()
147 if ((mvm->fw->ucode_capa.capa[0] & in iwl_mvm_set_tx_cmd()
163 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, in iwl_mvm_set_tx_cmd_rate() argument
210 &mvm->nvm_data->bands[info->band], sta); in iwl_mvm_set_tx_cmd_rate()
222 mvm->mgmt_last_antenna_idx = in iwl_mvm_set_tx_cmd_rate()
223 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), in iwl_mvm_set_tx_cmd_rate()
224 mvm->mgmt_last_antenna_idx); in iwl_mvm_set_tx_cmd_rate()
227 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) in iwl_mvm_set_tx_cmd_rate()
228 rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; in iwl_mvm_set_tx_cmd_rate()
231 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; in iwl_mvm_set_tx_cmd_rate()
244 void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, in iwl_mvm_set_tx_cmd_crypto() argument
283 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, in iwl_mvm_set_tx_params() argument
291 dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); in iwl_mvm_set_tx_params()
301 iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb); in iwl_mvm_set_tx_params()
303 iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id); in iwl_mvm_set_tx_params()
305 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); in iwl_mvm_set_tx_params()
315 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) in iwl_mvm_tx_skb_non_sta() argument
339 IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; in iwl_mvm_tx_skb_non_sta()
353 sta_id = mvm->aux_sta.sta_id; in iwl_mvm_tx_skb_non_sta()
356 IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue); in iwl_mvm_tx_skb_non_sta()
358 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, NULL, sta_id); in iwl_mvm_tx_skb_non_sta()
368 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) { in iwl_mvm_tx_skb_non_sta()
369 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); in iwl_mvm_tx_skb_non_sta()
380 atomic_inc(&mvm->pending_frames[sta_id]); in iwl_mvm_tx_skb_non_sta()
388 int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, in iwl_mvm_tx_skb() argument
411 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id); in iwl_mvm_tx_skb()
460 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, in iwl_mvm_tx_skb()
463 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) in iwl_mvm_tx_skb()
471 if (txq_id < mvm->first_agg_queue) in iwl_mvm_tx_skb()
472 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); in iwl_mvm_tx_skb()
477 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); in iwl_mvm_tx_skb()
483 static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, in iwl_mvm_check_ratid_empty() argument
508 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_check_ratid_empty()
516 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_check_ratid_empty()
519 iwl_mvm_disable_txq(mvm, tid_data->txq_id, CMD_ASYNC); in iwl_mvm_check_ratid_empty()
526 mvm->queue_to_mac80211[tid_data->txq_id] = in iwl_mvm_check_ratid_empty()
625 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, in iwl_mvm_rx_tx_cmd_single() argument
646 iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs); in iwl_mvm_rx_tx_cmd_single()
654 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); in iwl_mvm_rx_tx_cmd_single()
680 if (txq_id >= mvm->first_agg_queue && in iwl_mvm_rx_tx_cmd_single()
701 ieee80211_tx_status(mvm->hw, skb); in iwl_mvm_rx_tx_cmd_single()
704 if (txq_id >= mvm->first_agg_queue) { in iwl_mvm_rx_tx_cmd_single()
725 IWL_DEBUG_TX_REPLY(mvm, in iwl_mvm_rx_tx_cmd_single()
729 IWL_DEBUG_TX_REPLY(mvm, in iwl_mvm_rx_tx_cmd_single()
737 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_rx_tx_cmd_single()
754 IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n", in iwl_mvm_rx_tx_cmd_single()
756 iwl_mvm_check_ratid_empty(mvm, sta, tid); in iwl_mvm_rx_tx_cmd_single()
772 if (txq_id >= mvm->first_agg_queue) in iwl_mvm_rx_tx_cmd_single()
779 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) in iwl_mvm_rx_tx_cmd_single()
795 ieee80211_sta_block_awake(mvm->hw, sta, false); in iwl_mvm_rx_tx_cmd_single()
807 set_bit(sta_id, mvm->sta_drained); in iwl_mvm_rx_tx_cmd_single()
808 schedule_work(&mvm->sta_drained_wk); in iwl_mvm_rx_tx_cmd_single()
838 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, in iwl_mvm_rx_tx_cmd_agg_dbg() argument
848 IWL_DEBUG_TX_REPLY(mvm, in iwl_mvm_rx_tx_cmd_agg_dbg()
858 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, in iwl_mvm_rx_tx_cmd_agg_dbg() argument
863 static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, in iwl_mvm_rx_tx_cmd_agg() argument
872 if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue)) in iwl_mvm_rx_tx_cmd_agg()
878 iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); in iwl_mvm_rx_tx_cmd_agg()
882 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_rx_tx_cmd_agg()
896 int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, in iwl_mvm_rx_tx_cmd() argument
903 iwl_mvm_rx_tx_cmd_single(mvm, pkt); in iwl_mvm_rx_tx_cmd()
905 iwl_mvm_rx_tx_cmd_agg(mvm, pkt); in iwl_mvm_rx_tx_cmd()
927 int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, in iwl_mvm_rx_ba_notif() argument
954 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_rx_ba_notif()
966 IWL_ERR(mvm, in iwl_mvm_rx_ba_notif()
982 iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn, in iwl_mvm_rx_ba_notif()
985 IWL_DEBUG_TX_REPLY(mvm, in iwl_mvm_rx_ba_notif()
989 IWL_DEBUG_TX_REPLY(mvm, in iwl_mvm_rx_ba_notif()
998 iwl_mvm_check_ratid_empty(mvm, sta, tid); in iwl_mvm_rx_ba_notif()
1011 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); in iwl_mvm_rx_ba_notif()
1046 IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n"); in iwl_mvm_rx_ba_notif()
1047 iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info); in iwl_mvm_rx_ba_notif()
1055 ieee80211_tx_status(mvm->hw, skb); in iwl_mvm_rx_ba_notif()
1069 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync) in iwl_mvm_flush_tx_path() argument
1079 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags, in iwl_mvm_flush_tx_path()
1082 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); in iwl_mvm_flush_tx_path()