root/drivers/net/wireless/ath/ath10k/htt_tx.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ath10k_htt_tx_txq_calc_size
  2. __ath10k_htt_tx_txq_recalc
  3. __ath10k_htt_tx_txq_sync
  4. ath10k_htt_tx_txq_recalc
  5. ath10k_htt_tx_txq_sync
  6. ath10k_htt_tx_txq_update
  7. ath10k_htt_tx_dec_pending
  8. ath10k_htt_tx_inc_pending
  9. ath10k_htt_tx_mgmt_inc_pending
  10. ath10k_htt_tx_mgmt_dec_pending
  11. ath10k_htt_tx_alloc_msdu_id
  12. ath10k_htt_tx_free_msdu_id
  13. ath10k_htt_tx_free_cont_txbuf_32
  14. ath10k_htt_tx_alloc_cont_txbuf_32
  15. ath10k_htt_tx_free_cont_txbuf_64
  16. ath10k_htt_tx_alloc_cont_txbuf_64
  17. ath10k_htt_tx_free_cont_frag_desc_32
  18. ath10k_htt_tx_alloc_cont_frag_desc_32
  19. ath10k_htt_tx_free_cont_frag_desc_64
  20. ath10k_htt_tx_alloc_cont_frag_desc_64
  21. ath10k_htt_tx_free_txq
  22. ath10k_htt_tx_alloc_txq
  23. ath10k_htt_tx_free_txdone_fifo
  24. ath10k_htt_tx_alloc_txdone_fifo
  25. ath10k_htt_tx_alloc_buf
  26. ath10k_htt_tx_start
  27. ath10k_htt_tx_clean_up_pending
  28. ath10k_htt_tx_destroy
  29. ath10k_htt_tx_stop
  30. ath10k_htt_tx_free
  31. ath10k_htt_htc_tx_complete
  32. ath10k_htt_hif_tx_complete
  33. ath10k_htt_h2t_ver_req_msg
  34. ath10k_htt_h2t_stats_req
  35. ath10k_htt_send_frag_desc_bank_cfg_32
  36. ath10k_htt_send_frag_desc_bank_cfg_64
  37. ath10k_htt_fill_rx_desc_offset_32
  38. ath10k_htt_fill_rx_desc_offset_64
  39. ath10k_htt_send_rx_ring_cfg_32
  40. ath10k_htt_send_rx_ring_cfg_64
  41. ath10k_htt_send_rx_ring_cfg_hl
  42. ath10k_htt_h2t_aggr_cfg_msg_32
  43. ath10k_htt_h2t_aggr_cfg_msg_v2
  44. ath10k_htt_tx_fetch_resp
  45. ath10k_htt_tx_get_vdev_id
  46. ath10k_htt_tx_get_tid
  47. ath10k_htt_mgmt_tx
  48. ath10k_htt_tx_32
  49. ath10k_htt_tx_64
  50. ath10k_htt_set_tx_ops

   1 // SPDX-License-Identifier: ISC
   2 /*
   3  * Copyright (c) 2005-2011 Atheros Communications Inc.
   4  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
   5  */
   6 
   7 #include <linux/etherdevice.h>
   8 #include "htt.h"
   9 #include "mac.h"
  10 #include "hif.h"
  11 #include "txrx.h"
  12 #include "debug.h"
  13 
  14 static u8 ath10k_htt_tx_txq_calc_size(size_t count)
  15 {
  16         int exp;
  17         int factor;
  18 
  19         exp = 0;
  20         factor = count >> 7;
  21 
  22         while (factor >= 64 && exp < 4) {
  23                 factor >>= 3;
  24                 exp++;
  25         }
  26 
  27         if (exp == 4)
  28                 return 0xff;
  29 
  30         if (count > 0)
  31                 factor = max(1, factor);
  32 
  33         return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
  34                SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
  35 }
  36 
  37 static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
  38                                        struct ieee80211_txq *txq)
  39 {
  40         struct ath10k *ar = hw->priv;
  41         struct ath10k_sta *arsta;
  42         struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
  43         unsigned long frame_cnt;
  44         unsigned long byte_cnt;
  45         int idx;
  46         u32 bit;
  47         u16 peer_id;
  48         u8 tid;
  49         u8 count;
  50 
  51         lockdep_assert_held(&ar->htt.tx_lock);
  52 
  53         if (!ar->htt.tx_q_state.enabled)
  54                 return;
  55 
  56         if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
  57                 return;
  58 
  59         if (txq->sta) {
  60                 arsta = (void *)txq->sta->drv_priv;
  61                 peer_id = arsta->peer_id;
  62         } else {
  63                 peer_id = arvif->peer_id;
  64         }
  65 
  66         tid = txq->tid;
  67         bit = BIT(peer_id % 32);
  68         idx = peer_id / 32;
  69 
  70         ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
  71         count = ath10k_htt_tx_txq_calc_size(byte_cnt);
  72 
  73         if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  74             unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  75                 ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
  76                             peer_id, tid);
  77                 return;
  78         }
  79 
  80         ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
  81         ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
  82         ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
  83 
  84         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
  85                    peer_id, tid, count);
  86 }
  87 
  88 static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
  89 {
  90         u32 seq;
  91         size_t size;
  92 
  93         lockdep_assert_held(&ar->htt.tx_lock);
  94 
  95         if (!ar->htt.tx_q_state.enabled)
  96                 return;
  97 
  98         if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
  99                 return;
 100 
 101         seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
 102         seq++;
 103         ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
 104 
 105         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
 106                    seq);
 107 
 108         size = sizeof(*ar->htt.tx_q_state.vaddr);
 109         dma_sync_single_for_device(ar->dev,
 110                                    ar->htt.tx_q_state.paddr,
 111                                    size,
 112                                    DMA_TO_DEVICE);
 113 }
 114 
 115 void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
 116                               struct ieee80211_txq *txq)
 117 {
 118         struct ath10k *ar = hw->priv;
 119 
 120         spin_lock_bh(&ar->htt.tx_lock);
 121         __ath10k_htt_tx_txq_recalc(hw, txq);
 122         spin_unlock_bh(&ar->htt.tx_lock);
 123 }
 124 
 125 void ath10k_htt_tx_txq_sync(struct ath10k *ar)
 126 {
 127         spin_lock_bh(&ar->htt.tx_lock);
 128         __ath10k_htt_tx_txq_sync(ar);
 129         spin_unlock_bh(&ar->htt.tx_lock);
 130 }
 131 
 132 void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
 133                               struct ieee80211_txq *txq)
 134 {
 135         struct ath10k *ar = hw->priv;
 136 
 137         spin_lock_bh(&ar->htt.tx_lock);
 138         __ath10k_htt_tx_txq_recalc(hw, txq);
 139         __ath10k_htt_tx_txq_sync(ar);
 140         spin_unlock_bh(&ar->htt.tx_lock);
 141 }
 142 
 143 void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
 144 {
 145         lockdep_assert_held(&htt->tx_lock);
 146 
 147         htt->num_pending_tx--;
 148         if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
 149                 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 150 }
 151 
 152 int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
 153 {
 154         lockdep_assert_held(&htt->tx_lock);
 155 
 156         if (htt->num_pending_tx >= htt->max_num_pending_tx)
 157                 return -EBUSY;
 158 
 159         htt->num_pending_tx++;
 160         if (htt->num_pending_tx == htt->max_num_pending_tx)
 161                 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 162 
 163         return 0;
 164 }
 165 
 166 int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
 167                                    bool is_presp)
 168 {
 169         struct ath10k *ar = htt->ar;
 170 
 171         lockdep_assert_held(&htt->tx_lock);
 172 
 173         if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
 174                 return 0;
 175 
 176         if (is_presp &&
 177             ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
 178                 return -EBUSY;
 179 
 180         htt->num_pending_mgmt_tx++;
 181 
 182         return 0;
 183 }
 184 
 185 void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
 186 {
 187         lockdep_assert_held(&htt->tx_lock);
 188 
 189         if (!htt->ar->hw_params.max_probe_resp_desc_thres)
 190                 return;
 191 
 192         htt->num_pending_mgmt_tx--;
 193 }
 194 
 195 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
 196 {
 197         struct ath10k *ar = htt->ar;
 198         int ret;
 199 
 200         spin_lock_bh(&htt->tx_lock);
 201         ret = idr_alloc(&htt->pending_tx, skb, 0,
 202                         htt->max_num_pending_tx, GFP_ATOMIC);
 203         spin_unlock_bh(&htt->tx_lock);
 204 
 205         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
 206 
 207         return ret;
 208 }
 209 
 210 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
 211 {
 212         struct ath10k *ar = htt->ar;
 213 
 214         lockdep_assert_held(&htt->tx_lock);
 215 
 216         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
 217 
 218         idr_remove(&htt->pending_tx, msdu_id);
 219 }
 220 
 221 static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt)
 222 {
 223         struct ath10k *ar = htt->ar;
 224         size_t size;
 225 
 226         if (!htt->txbuf.vaddr_txbuff_32)
 227                 return;
 228 
 229         size = htt->txbuf.size;
 230         dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32,
 231                           htt->txbuf.paddr);
 232         htt->txbuf.vaddr_txbuff_32 = NULL;
 233 }
 234 
 235 static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt)
 236 {
 237         struct ath10k *ar = htt->ar;
 238         size_t size;
 239 
 240         size = htt->max_num_pending_tx *
 241                         sizeof(struct ath10k_htt_txbuf_32);
 242 
 243         htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size,
 244                                                         &htt->txbuf.paddr,
 245                                                         GFP_KERNEL);
 246         if (!htt->txbuf.vaddr_txbuff_32)
 247                 return -ENOMEM;
 248 
 249         htt->txbuf.size = size;
 250 
 251         return 0;
 252 }
 253 
 254 static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt)
 255 {
 256         struct ath10k *ar = htt->ar;
 257         size_t size;
 258 
 259         if (!htt->txbuf.vaddr_txbuff_64)
 260                 return;
 261 
 262         size = htt->txbuf.size;
 263         dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64,
 264                           htt->txbuf.paddr);
 265         htt->txbuf.vaddr_txbuff_64 = NULL;
 266 }
 267 
 268 static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt)
 269 {
 270         struct ath10k *ar = htt->ar;
 271         size_t size;
 272 
 273         size = htt->max_num_pending_tx *
 274                         sizeof(struct ath10k_htt_txbuf_64);
 275 
 276         htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size,
 277                                                         &htt->txbuf.paddr,
 278                                                         GFP_KERNEL);
 279         if (!htt->txbuf.vaddr_txbuff_64)
 280                 return -ENOMEM;
 281 
 282         htt->txbuf.size = size;
 283 
 284         return 0;
 285 }
 286 
 287 static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)
 288 {
 289         size_t size;
 290 
 291         if (!htt->frag_desc.vaddr_desc_32)
 292                 return;
 293 
 294         size = htt->max_num_pending_tx *
 295                         sizeof(struct htt_msdu_ext_desc);
 296 
 297         dma_free_coherent(htt->ar->dev,
 298                           size,
 299                           htt->frag_desc.vaddr_desc_32,
 300                           htt->frag_desc.paddr);
 301 
 302         htt->frag_desc.vaddr_desc_32 = NULL;
 303 }
 304 
 305 static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)
 306 {
 307         struct ath10k *ar = htt->ar;
 308         size_t size;
 309 
 310         if (!ar->hw_params.continuous_frag_desc)
 311                 return 0;
 312 
 313         size = htt->max_num_pending_tx *
 314                         sizeof(struct htt_msdu_ext_desc);
 315         htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,
 316                                                           &htt->frag_desc.paddr,
 317                                                           GFP_KERNEL);
 318         if (!htt->frag_desc.vaddr_desc_32) {
 319                 ath10k_err(ar, "failed to alloc fragment desc memory\n");
 320                 return -ENOMEM;
 321         }
 322         htt->frag_desc.size = size;
 323 
 324         return 0;
 325 }
 326 
 327 static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)
 328 {
 329         size_t size;
 330 
 331         if (!htt->frag_desc.vaddr_desc_64)
 332                 return;
 333 
 334         size = htt->max_num_pending_tx *
 335                         sizeof(struct htt_msdu_ext_desc_64);
 336 
 337         dma_free_coherent(htt->ar->dev,
 338                           size,
 339                           htt->frag_desc.vaddr_desc_64,
 340                           htt->frag_desc.paddr);
 341 
 342         htt->frag_desc.vaddr_desc_64 = NULL;
 343 }
 344 
 345 static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)
 346 {
 347         struct ath10k *ar = htt->ar;
 348         size_t size;
 349 
 350         if (!ar->hw_params.continuous_frag_desc)
 351                 return 0;
 352 
 353         size = htt->max_num_pending_tx *
 354                         sizeof(struct htt_msdu_ext_desc_64);
 355 
 356         htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,
 357                                                           &htt->frag_desc.paddr,
 358                                                           GFP_KERNEL);
 359         if (!htt->frag_desc.vaddr_desc_64) {
 360                 ath10k_err(ar, "failed to alloc fragment desc memory\n");
 361                 return -ENOMEM;
 362         }
 363         htt->frag_desc.size = size;
 364 
 365         return 0;
 366 }
 367 
 368 static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
 369 {
 370         struct ath10k *ar = htt->ar;
 371         size_t size;
 372 
 373         if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 374                       ar->running_fw->fw_file.fw_features))
 375                 return;
 376 
 377         size = sizeof(*htt->tx_q_state.vaddr);
 378 
 379         dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE);
 380         kfree(htt->tx_q_state.vaddr);
 381 }
 382 
 383 static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
 384 {
 385         struct ath10k *ar = htt->ar;
 386         size_t size;
 387         int ret;
 388 
 389         if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 390                       ar->running_fw->fw_file.fw_features))
 391                 return 0;
 392 
 393         htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
 394         htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS;
 395         htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES;
 396 
 397         size = sizeof(*htt->tx_q_state.vaddr);
 398         htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);
 399         if (!htt->tx_q_state.vaddr)
 400                 return -ENOMEM;
 401 
 402         htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,
 403                                                size, DMA_TO_DEVICE);
 404         ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr);
 405         if (ret) {
 406                 ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret);
 407                 kfree(htt->tx_q_state.vaddr);
 408                 return -EIO;
 409         }
 410 
 411         return 0;
 412 }
 413 
 414 static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt)
 415 {
 416         WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
 417         kfifo_free(&htt->txdone_fifo);
 418 }
 419 
 420 static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt)
 421 {
 422         int ret;
 423         size_t size;
 424 
 425         size = roundup_pow_of_two(htt->max_num_pending_tx);
 426         ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
 427         return ret;
 428 }
 429 
 430 static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
 431 {
 432         struct ath10k *ar = htt->ar;
 433         int ret;
 434 
 435         ret = ath10k_htt_alloc_txbuff(htt);
 436         if (ret) {
 437                 ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
 438                 return ret;
 439         }
 440 
 441         ret = ath10k_htt_alloc_frag_desc(htt);
 442         if (ret) {
 443                 ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
 444                 goto free_txbuf;
 445         }
 446 
 447         ret = ath10k_htt_tx_alloc_txq(htt);
 448         if (ret) {
 449                 ath10k_err(ar, "failed to alloc txq: %d\n", ret);
 450                 goto free_frag_desc;
 451         }
 452 
 453         ret = ath10k_htt_tx_alloc_txdone_fifo(htt);
 454         if (ret) {
 455                 ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
 456                 goto free_txq;
 457         }
 458 
 459         return 0;
 460 
 461 free_txq:
 462         ath10k_htt_tx_free_txq(htt);
 463 
 464 free_frag_desc:
 465         ath10k_htt_free_frag_desc(htt);
 466 
 467 free_txbuf:
 468         ath10k_htt_free_txbuff(htt);
 469 
 470         return ret;
 471 }
 472 
 473 int ath10k_htt_tx_start(struct ath10k_htt *htt)
 474 {
 475         struct ath10k *ar = htt->ar;
 476         int ret;
 477 
 478         ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
 479                    htt->max_num_pending_tx);
 480 
 481         spin_lock_init(&htt->tx_lock);
 482         idr_init(&htt->pending_tx);
 483 
 484         if (htt->tx_mem_allocated)
 485                 return 0;
 486 
 487         if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
 488                 return 0;
 489 
 490         ret = ath10k_htt_tx_alloc_buf(htt);
 491         if (ret)
 492                 goto free_idr_pending_tx;
 493 
 494         htt->tx_mem_allocated = true;
 495 
 496         return 0;
 497 
 498 free_idr_pending_tx:
 499         idr_destroy(&htt->pending_tx);
 500 
 501         return ret;
 502 }
 503 
 504 static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
 505 {
 506         struct ath10k *ar = ctx;
 507         struct ath10k_htt *htt = &ar->htt;
 508         struct htt_tx_done tx_done = {0};
 509 
 510         ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
 511 
 512         tx_done.msdu_id = msdu_id;
 513         tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
 514 
 515         ath10k_txrx_tx_unref(htt, &tx_done);
 516 
 517         return 0;
 518 }
 519 
 520 void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
 521 {
 522         if (!htt->tx_mem_allocated)
 523                 return;
 524 
 525         ath10k_htt_free_txbuff(htt);
 526         ath10k_htt_tx_free_txq(htt);
 527         ath10k_htt_free_frag_desc(htt);
 528         ath10k_htt_tx_free_txdone_fifo(htt);
 529         htt->tx_mem_allocated = false;
 530 }
 531 
 532 void ath10k_htt_tx_stop(struct ath10k_htt *htt)
 533 {
 534         idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
 535         idr_destroy(&htt->pending_tx);
 536 }
 537 
 538 void ath10k_htt_tx_free(struct ath10k_htt *htt)
 539 {
 540         ath10k_htt_tx_stop(htt);
 541         ath10k_htt_tx_destroy(htt);
 542 }
 543 
 544 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
 545 {
 546         dev_kfree_skb_any(skb);
 547 }
 548 
 549 void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
 550 {
 551         dev_kfree_skb_any(skb);
 552 }
 553 EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
 554 
 555 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
 556 {
 557         struct ath10k *ar = htt->ar;
 558         struct sk_buff *skb;
 559         struct htt_cmd *cmd;
 560         int len = 0;
 561         int ret;
 562 
 563         len += sizeof(cmd->hdr);
 564         len += sizeof(cmd->ver_req);
 565 
 566         skb = ath10k_htc_alloc_skb(ar, len);
 567         if (!skb)
 568                 return -ENOMEM;
 569 
 570         skb_put(skb, len);
 571         cmd = (struct htt_cmd *)skb->data;
 572         cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
 573 
 574         ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 575         if (ret) {
 576                 dev_kfree_skb_any(skb);
 577                 return ret;
 578         }
 579 
 580         return 0;
 581 }
 582 
 583 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask,
 584                              u64 cookie)
 585 {
 586         struct ath10k *ar = htt->ar;
 587         struct htt_stats_req *req;
 588         struct sk_buff *skb;
 589         struct htt_cmd *cmd;
 590         int len = 0, ret;
 591 
 592         len += sizeof(cmd->hdr);
 593         len += sizeof(cmd->stats_req);
 594 
 595         skb = ath10k_htc_alloc_skb(ar, len);
 596         if (!skb)
 597                 return -ENOMEM;
 598 
 599         skb_put(skb, len);
 600         cmd = (struct htt_cmd *)skb->data;
 601         cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
 602 
 603         req = &cmd->stats_req;
 604 
 605         memset(req, 0, sizeof(*req));
 606 
 607         /* currently we support only max 24 bit masks so no need to worry
 608          * about endian support
 609          */
 610         memcpy(req->upload_types, &mask, 3);
 611         memcpy(req->reset_types, &reset_mask, 3);
 612         req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
 613         req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
 614         req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
 615 
 616         ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 617         if (ret) {
 618                 ath10k_warn(ar, "failed to send htt type stats request: %d",
 619                             ret);
 620                 dev_kfree_skb_any(skb);
 621                 return ret;
 622         }
 623 
 624         return 0;
 625 }
 626 
 627 static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)
 628 {
 629         struct ath10k *ar = htt->ar;
 630         struct sk_buff *skb;
 631         struct htt_cmd *cmd;
 632         struct htt_frag_desc_bank_cfg32 *cfg;
 633         int ret, size;
 634         u8 info;
 635 
 636         if (!ar->hw_params.continuous_frag_desc)
 637                 return 0;
 638 
 639         if (!htt->frag_desc.paddr) {
 640                 ath10k_warn(ar, "invalid frag desc memory\n");
 641                 return -EINVAL;
 642         }
 643 
 644         size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);
 645         skb = ath10k_htc_alloc_skb(ar, size);
 646         if (!skb)
 647                 return -ENOMEM;
 648 
 649         skb_put(skb, size);
 650         cmd = (struct htt_cmd *)skb->data;
 651         cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
 652 
 653         info = 0;
 654         info |= SM(htt->tx_q_state.type,
 655                    HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
 656 
 657         if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 658                      ar->running_fw->fw_file.fw_features))
 659                 info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
 660 
 661         cfg = &cmd->frag_desc_bank_cfg32;
 662         cfg->info = info;
 663         cfg->num_banks = 1;
 664         cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
 665         cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr);
 666         cfg->bank_id[0].bank_min_id = 0;
 667         cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
 668                                                     1);
 669 
 670         cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
 671         cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
 672         cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
 673         cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
 674         cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
 675 
 676         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
 677 
 678         ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 679         if (ret) {
 680                 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
 681                             ret);
 682                 dev_kfree_skb_any(skb);
 683                 return ret;
 684         }
 685 
 686         return 0;
 687 }
 688 
 689 static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
 690 {
 691         struct ath10k *ar = htt->ar;
 692         struct sk_buff *skb;
 693         struct htt_cmd *cmd;
 694         struct htt_frag_desc_bank_cfg64 *cfg;
 695         int ret, size;
 696         u8 info;
 697 
 698         if (!ar->hw_params.continuous_frag_desc)
 699                 return 0;
 700 
 701         if (!htt->frag_desc.paddr) {
 702                 ath10k_warn(ar, "invalid frag desc memory\n");
 703                 return -EINVAL;
 704         }
 705 
 706         size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);
 707         skb = ath10k_htc_alloc_skb(ar, size);
 708         if (!skb)
 709                 return -ENOMEM;
 710 
 711         skb_put(skb, size);
 712         cmd = (struct htt_cmd *)skb->data;
 713         cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
 714 
 715         info = 0;
 716         info |= SM(htt->tx_q_state.type,
 717                    HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
 718 
 719         if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 720                      ar->running_fw->fw_file.fw_features))
 721                 info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
 722 
 723         cfg = &cmd->frag_desc_bank_cfg64;
 724         cfg->info = info;
 725         cfg->num_banks = 1;
 726         cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);
 727         cfg->bank_base_addrs[0] =  __cpu_to_le64(htt->frag_desc.paddr);
 728         cfg->bank_id[0].bank_min_id = 0;
 729         cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
 730                                                     1);
 731 
 732         cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
 733         cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
 734         cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
 735         cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
 736         cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
 737 
 738         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
 739 
 740         ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 741         if (ret) {
 742                 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
 743                             ret);
 744                 dev_kfree_skb_any(skb);
 745                 return ret;
 746         }
 747 
 748         return 0;
 749 }
 750 
 751 static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
 752 {
 753         struct htt_rx_ring_setup_ring32 *ring =
 754                         (struct htt_rx_ring_setup_ring32 *)rx_ring;
 755 
 756 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
 757         ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
 758         ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
 759         ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
 760         ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
 761         ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
 762         ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
 763         ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
 764         ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
 765         ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
 766         ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
 767 #undef desc_offset
 768 }
 769 
 770 static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
 771 {
 772         struct htt_rx_ring_setup_ring64 *ring =
 773                         (struct htt_rx_ring_setup_ring64 *)rx_ring;
 774 
 775 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
 776         ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
 777         ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
 778         ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
 779         ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
 780         ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
 781         ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
 782         ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
 783         ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
 784         ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
 785         ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
 786 #undef desc_offset
 787 }
 788 
 789 static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
 790 {
 791         struct ath10k *ar = htt->ar;
 792         struct sk_buff *skb;
 793         struct htt_cmd *cmd;
 794         struct htt_rx_ring_setup_ring32 *ring;
 795         const int num_rx_ring = 1;
 796         u16 flags;
 797         u32 fw_idx;
 798         int len;
 799         int ret;
 800 
 801         /*
 802          * the HW expects the buffer to be an integral number of 4-byte
 803          * "words"
 804          */
 805         BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
 806         BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 807 
 808         len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
 809             + (sizeof(*ring) * num_rx_ring);
 810         skb = ath10k_htc_alloc_skb(ar, len);
 811         if (!skb)
 812                 return -ENOMEM;
 813 
 814         skb_put(skb, len);
 815 
 816         cmd = (struct htt_cmd *)skb->data;
 817         ring = &cmd->rx_setup_32.rings[0];
 818 
 819         cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
 820         cmd->rx_setup_32.hdr.num_rings = 1;
 821 
 822         /* FIXME: do we need all of this? */
 823         flags = 0;
 824         flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
 825         flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
 826         flags |= HTT_RX_RING_FLAGS_PPDU_START;
 827         flags |= HTT_RX_RING_FLAGS_PPDU_END;
 828         flags |= HTT_RX_RING_FLAGS_MPDU_START;
 829         flags |= HTT_RX_RING_FLAGS_MPDU_END;
 830         flags |= HTT_RX_RING_FLAGS_MSDU_START;
 831         flags |= HTT_RX_RING_FLAGS_MSDU_END;
 832         flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
 833         flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
 834         flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
 835         flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
 836         flags |= HTT_RX_RING_FLAGS_CTRL_RX;
 837         flags |= HTT_RX_RING_FLAGS_MGMT_RX;
 838         flags |= HTT_RX_RING_FLAGS_NULL_RX;
 839         flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
 840 
 841         fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
 842 
 843         ring->fw_idx_shadow_reg_paddr =
 844                 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
 845         ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
 846         ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
 847         ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
 848         ring->flags = __cpu_to_le16(flags);
 849         ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
 850 
 851         ath10k_htt_fill_rx_desc_offset_32(ring);
 852         ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 853         if (ret) {
 854                 dev_kfree_skb_any(skb);
 855                 return ret;
 856         }
 857 
 858         return 0;
 859 }
 860 
 861 static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
 862 {
 863         struct ath10k *ar = htt->ar;
 864         struct sk_buff *skb;
 865         struct htt_cmd *cmd;
 866         struct htt_rx_ring_setup_ring64 *ring;
 867         const int num_rx_ring = 1;
 868         u16 flags;
 869         u32 fw_idx;
 870         int len;
 871         int ret;
 872 
 873         /* HW expects the buffer to be an integral number of 4-byte
 874          * "words"
 875          */
 876         BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
 877         BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 878 
 879         len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)
 880             + (sizeof(*ring) * num_rx_ring);
 881         skb = ath10k_htc_alloc_skb(ar, len);
 882         if (!skb)
 883                 return -ENOMEM;
 884 
 885         skb_put(skb, len);
 886 
 887         cmd = (struct htt_cmd *)skb->data;
 888         ring = &cmd->rx_setup_64.rings[0];
 889 
 890         cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
 891         cmd->rx_setup_64.hdr.num_rings = 1;
 892 
 893         flags = 0;
 894         flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
 895         flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
 896         flags |= HTT_RX_RING_FLAGS_PPDU_START;
 897         flags |= HTT_RX_RING_FLAGS_PPDU_END;
 898         flags |= HTT_RX_RING_FLAGS_MPDU_START;
 899         flags |= HTT_RX_RING_FLAGS_MPDU_END;
 900         flags |= HTT_RX_RING_FLAGS_MSDU_START;
 901         flags |= HTT_RX_RING_FLAGS_MSDU_END;
 902         flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
 903         flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
 904         flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
 905         flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
 906         flags |= HTT_RX_RING_FLAGS_CTRL_RX;
 907         flags |= HTT_RX_RING_FLAGS_MGMT_RX;
 908         flags |= HTT_RX_RING_FLAGS_NULL_RX;
 909         flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
 910 
 911         fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
 912 
 913         ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
 914         ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
 915         ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
 916         ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
 917         ring->flags = __cpu_to_le16(flags);
 918         ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
 919 
 920         ath10k_htt_fill_rx_desc_offset_64(ring);
 921         ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 922         if (ret) {
 923                 dev_kfree_skb_any(skb);
 924                 return ret;
 925         }
 926 
 927         return 0;
 928 }
 929 
 930 static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt)
 931 {
 932         struct ath10k *ar = htt->ar;
 933         struct sk_buff *skb;
 934         struct htt_cmd *cmd;
 935         struct htt_rx_ring_setup_ring32 *ring;
 936         const int num_rx_ring = 1;
 937         u16 flags;
 938         int len;
 939         int ret;
 940 
 941         /*
 942          * the HW expects the buffer to be an integral number of 4-byte
 943          * "words"
 944          */
 945         BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
 946         BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 947 
 948         len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
 949             + (sizeof(*ring) * num_rx_ring);
 950         skb = ath10k_htc_alloc_skb(ar, len);
 951         if (!skb)
 952                 return -ENOMEM;
 953 
 954         skb_put(skb, len);
 955 
 956         cmd = (struct htt_cmd *)skb->data;
 957         ring = &cmd->rx_setup_32.rings[0];
 958 
 959         cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
 960         cmd->rx_setup_32.hdr.num_rings = 1;
 961 
 962         flags = 0;
 963         flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
 964         flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
 965         flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
 966 
 967         memset(ring, 0, sizeof(*ring));
 968         ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN);
 969         ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
 970         ring->flags = __cpu_to_le16(flags);
 971 
 972         ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 973         if (ret) {
 974                 dev_kfree_skb_any(skb);
 975                 return ret;
 976         }
 977 
 978         return 0;
 979 }
 980 
 981 static int ath10k_htt_h2t_aggr_cfg_msg_32(struct ath10k_htt *htt,
 982                                           u8 max_subfrms_ampdu,
 983                                           u8 max_subfrms_amsdu)
 984 {
 985         struct ath10k *ar = htt->ar;
 986         struct htt_aggr_conf *aggr_conf;
 987         struct sk_buff *skb;
 988         struct htt_cmd *cmd;
 989         int len;
 990         int ret;
 991 
 992         /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
 993 
 994         if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
 995                 return -EINVAL;
 996 
 997         if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
 998                 return -EINVAL;
 999 
1000         len = sizeof(cmd->hdr);
1001         len += sizeof(cmd->aggr_conf);
1002 
1003         skb = ath10k_htc_alloc_skb(ar, len);
1004         if (!skb)
1005                 return -ENOMEM;
1006 
1007         skb_put(skb, len);
1008         cmd = (struct htt_cmd *)skb->data;
1009         cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
1010 
1011         aggr_conf = &cmd->aggr_conf;
1012         aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
1013         aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
1014 
1015         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
1016                    aggr_conf->max_num_amsdu_subframes,
1017                    aggr_conf->max_num_ampdu_subframes);
1018 
1019         ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
1020         if (ret) {
1021                 dev_kfree_skb_any(skb);
1022                 return ret;
1023         }
1024 
1025         return 0;
1026 }
1027 
1028 static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt *htt,
1029                                           u8 max_subfrms_ampdu,
1030                                           u8 max_subfrms_amsdu)
1031 {
1032         struct ath10k *ar = htt->ar;
1033         struct htt_aggr_conf_v2 *aggr_conf;
1034         struct sk_buff *skb;
1035         struct htt_cmd *cmd;
1036         int len;
1037         int ret;
1038 
1039         /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
1040 
1041         if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
1042                 return -EINVAL;
1043 
1044         if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
1045                 return -EINVAL;
1046 
1047         len = sizeof(cmd->hdr);
1048         len += sizeof(cmd->aggr_conf_v2);
1049 
1050         skb = ath10k_htc_alloc_skb(ar, len);
1051         if (!skb)
1052                 return -ENOMEM;
1053 
1054         skb_put(skb, len);
1055         cmd = (struct htt_cmd *)skb->data;
1056         cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
1057 
1058         aggr_conf = &cmd->aggr_conf_v2;
1059         aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
1060         aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
1061 
1062         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
1063                    aggr_conf->max_num_amsdu_subframes,
1064                    aggr_conf->max_num_ampdu_subframes);
1065 
1066         ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
1067         if (ret) {
1068                 dev_kfree_skb_any(skb);
1069                 return ret;
1070         }
1071 
1072         return 0;
1073 }
1074 
1075 int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
1076                              __le32 token,
1077                              __le16 fetch_seq_num,
1078                              struct htt_tx_fetch_record *records,
1079                              size_t num_records)
1080 {
1081         struct sk_buff *skb;
1082         struct htt_cmd *cmd;
1083         const u16 resp_id = 0;
1084         int len = 0;
1085         int ret;
1086 
1087         /* Response IDs are echo-ed back only for host driver convienence
1088          * purposes. They aren't used for anything in the driver yet so use 0.
1089          */
1090 
1091         len += sizeof(cmd->hdr);
1092         len += sizeof(cmd->tx_fetch_resp);
1093         len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
1094 
1095         skb = ath10k_htc_alloc_skb(ar, len);
1096         if (!skb)
1097                 return -ENOMEM;
1098 
1099         skb_put(skb, len);
1100         cmd = (struct htt_cmd *)skb->data;
1101         cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
1102         cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
1103         cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
1104         cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
1105         cmd->tx_fetch_resp.token = token;
1106 
1107         memcpy(cmd->tx_fetch_resp.records, records,
1108                sizeof(records[0]) * num_records);
1109 
1110         ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
1111         if (ret) {
1112                 ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
1113                 goto err_free_skb;
1114         }
1115 
1116         return 0;
1117 
1118 err_free_skb:
1119         dev_kfree_skb_any(skb);
1120 
1121         return ret;
1122 }
1123 
1124 static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
1125 {
1126         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1127         struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
1128         struct ath10k_vif *arvif;
1129 
1130         if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
1131                 return ar->scan.vdev_id;
1132         } else if (cb->vif) {
1133                 arvif = (void *)cb->vif->drv_priv;
1134                 return arvif->vdev_id;
1135         } else if (ar->monitor_started) {
1136                 return ar->monitor_vdev_id;
1137         } else {
1138                 return 0;
1139         }
1140 }
1141 
1142 static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
1143 {
1144         struct ieee80211_hdr *hdr = (void *)skb->data;
1145         struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
1146 
1147         if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
1148                 return HTT_DATA_TX_EXT_TID_MGMT;
1149         else if (cb->flags & ATH10K_SKB_F_QOS)
1150                 return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1151         else
1152                 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1153 }
1154 
1155 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
1156 {
1157         struct ath10k *ar = htt->ar;
1158         struct device *dev = ar->dev;
1159         struct sk_buff *txdesc = NULL;
1160         struct htt_cmd *cmd;
1161         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1162         u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1163         int len = 0;
1164         int msdu_id = -1;
1165         int res;
1166         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1167 
1168         len += sizeof(cmd->hdr);
1169         len += sizeof(cmd->mgmt_tx);
1170 
1171         res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1172         if (res < 0)
1173                 goto err;
1174 
1175         msdu_id = res;
1176 
1177         if ((ieee80211_is_action(hdr->frame_control) ||
1178              ieee80211_is_deauth(hdr->frame_control) ||
1179              ieee80211_is_disassoc(hdr->frame_control)) &&
1180              ieee80211_has_protected(hdr->frame_control)) {
1181                 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1182         }
1183 
1184         txdesc = ath10k_htc_alloc_skb(ar, len);
1185         if (!txdesc) {
1186                 res = -ENOMEM;
1187                 goto err_free_msdu_id;
1188         }
1189 
1190         skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1191                                        DMA_TO_DEVICE);
1192         res = dma_mapping_error(dev, skb_cb->paddr);
1193         if (res) {
1194                 res = -EIO;
1195                 goto err_free_txdesc;
1196         }
1197 
1198         skb_put(txdesc, len);
1199         cmd = (struct htt_cmd *)txdesc->data;
1200         memset(cmd, 0, len);
1201 
1202         cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
1203         cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
1204         cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
1205         cmd->mgmt_tx.desc_id    = __cpu_to_le32(msdu_id);
1206         cmd->mgmt_tx.vdev_id    = __cpu_to_le32(vdev_id);
1207         memcpy(cmd->mgmt_tx.hdr, msdu->data,
1208                min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
1209 
1210         res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
1211         if (res)
1212                 goto err_unmap_msdu;
1213 
1214         return 0;
1215 
1216 err_unmap_msdu:
1217         if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
1218                 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1219 err_free_txdesc:
1220         dev_kfree_skb_any(txdesc);
1221 err_free_msdu_id:
1222         spin_lock_bh(&htt->tx_lock);
1223         ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1224         spin_unlock_bh(&htt->tx_lock);
1225 err:
1226         return res;
1227 }
1228 
1229 #define HTT_TX_HL_NEEDED_HEADROOM \
1230         (unsigned int)(sizeof(struct htt_cmd_hdr) + \
1231         sizeof(struct htt_data_tx_desc) + \
1232         sizeof(struct ath10k_htc_hdr))
1233 
1234 static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
1235                             struct sk_buff *msdu)
1236 {
1237         struct ath10k *ar = htt->ar;
1238         int res, data_len;
1239         struct htt_cmd_hdr *cmd_hdr;
1240         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1241         struct htt_data_tx_desc *tx_desc;
1242         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1243         struct sk_buff *tmp_skb;
1244         bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1245         u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1246         u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1247         u8 flags0 = 0;
1248         u16 flags1 = 0;
1249         u16 msdu_id = 0;
1250 
1251         if ((ieee80211_is_action(hdr->frame_control) ||
1252              ieee80211_is_deauth(hdr->frame_control) ||
1253              ieee80211_is_disassoc(hdr->frame_control)) &&
1254              ieee80211_has_protected(hdr->frame_control)) {
1255                 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1256         }
1257 
1258         data_len = msdu->len;
1259 
1260         switch (txmode) {
1261         case ATH10K_HW_TXRX_RAW:
1262         case ATH10K_HW_TXRX_NATIVE_WIFI:
1263                 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1264                 /* fall through */
1265         case ATH10K_HW_TXRX_ETHERNET:
1266                 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1267                 break;
1268         case ATH10K_HW_TXRX_MGMT:
1269                 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1270                              HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1271                 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1272                 break;
1273         }
1274 
1275         if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1276                 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1277 
1278         flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1279         flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1280         if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1281             !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1282                 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1283                 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1284         }
1285 
1286         /* Prepend the HTT header and TX desc struct to the data message
1287          * and realloc the skb if it does not have enough headroom.
1288          */
1289         if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) {
1290                 tmp_skb = msdu;
1291 
1292                 ath10k_dbg(htt->ar, ATH10K_DBG_HTT,
1293                            "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n",
1294                            skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM);
1295                 msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM);
1296                 kfree_skb(tmp_skb);
1297                 if (!msdu) {
1298                         ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n");
1299                         res = -ENOMEM;
1300                         goto out;
1301                 }
1302         }
1303 
1304         if (ar->bus_param.hl_msdu_ids) {
1305                 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1306                 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1307                 if (res < 0) {
1308                         ath10k_err(ar, "msdu_id allocation failed %d\n", res);
1309                         goto out;
1310                 }
1311                 msdu_id = res;
1312         }
1313 
1314         /* As msdu is freed by mac80211 (in ieee80211_tx_status()) and by
1315          * ath10k (in ath10k_htt_htc_tx_complete()) we have to increase
1316          * reference by one to avoid a use-after-free case and a double
1317          * free.
1318          */
1319         skb_get(msdu);
1320 
1321         skb_push(msdu, sizeof(*cmd_hdr));
1322         skb_push(msdu, sizeof(*tx_desc));
1323         cmd_hdr = (struct htt_cmd_hdr *)msdu->data;
1324         tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr));
1325 
1326         cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1327         tx_desc->flags0 = flags0;
1328         tx_desc->flags1 = __cpu_to_le16(flags1);
1329         tx_desc->len = __cpu_to_le16(data_len);
1330         tx_desc->id = __cpu_to_le16(msdu_id);
1331         tx_desc->frags_paddr = 0; /* always zero */
1332         /* Initialize peer_id to INVALID_PEER because this is NOT
1333          * Reinjection path
1334          */
1335         tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID);
1336 
1337         res = ath10k_htc_send(&htt->ar->htc, htt->eid, msdu);
1338 
1339 out:
1340         return res;
1341 }
1342 
1343 static int ath10k_htt_tx_32(struct ath10k_htt *htt,
1344                             enum ath10k_hw_txrx_mode txmode,
1345                             struct sk_buff *msdu)
1346 {
1347         struct ath10k *ar = htt->ar;
1348         struct device *dev = ar->dev;
1349         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1350         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
1351         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1352         struct ath10k_hif_sg_item sg_items[2];
1353         struct ath10k_htt_txbuf_32 *txbuf;
1354         struct htt_data_tx_desc_frag *frags;
1355         bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1356         u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1357         u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1358         int prefetch_len;
1359         int res;
1360         u8 flags0 = 0;
1361         u16 msdu_id, flags1 = 0;
1362         u16 freq = 0;
1363         u32 frags_paddr = 0;
1364         u32 txbuf_paddr;
1365         struct htt_msdu_ext_desc *ext_desc = NULL;
1366         struct htt_msdu_ext_desc *ext_desc_t = NULL;
1367 
1368         res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1369         if (res < 0)
1370                 goto err;
1371 
1372         msdu_id = res;
1373 
1374         prefetch_len = min(htt->prefetch_len, msdu->len);
1375         prefetch_len = roundup(prefetch_len, 4);
1376 
1377         txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id;
1378         txbuf_paddr = htt->txbuf.paddr +
1379                       (sizeof(struct ath10k_htt_txbuf_32) * msdu_id);
1380 
1381         if ((ieee80211_is_action(hdr->frame_control) ||
1382              ieee80211_is_deauth(hdr->frame_control) ||
1383              ieee80211_is_disassoc(hdr->frame_control)) &&
1384              ieee80211_has_protected(hdr->frame_control)) {
1385                 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1386         } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
1387                    txmode == ATH10K_HW_TXRX_RAW &&
1388                    ieee80211_has_protected(hdr->frame_control)) {
1389                 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1390         }
1391 
1392         skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1393                                        DMA_TO_DEVICE);
1394         res = dma_mapping_error(dev, skb_cb->paddr);
1395         if (res) {
1396                 res = -EIO;
1397                 goto err_free_msdu_id;
1398         }
1399 
1400         if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
1401                 freq = ar->scan.roc_freq;
1402 
1403         switch (txmode) {
1404         case ATH10K_HW_TXRX_RAW:
1405         case ATH10K_HW_TXRX_NATIVE_WIFI:
1406                 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1407                 /* fall through */
1408         case ATH10K_HW_TXRX_ETHERNET:
1409                 if (ar->hw_params.continuous_frag_desc) {
1410                         ext_desc_t = htt->frag_desc.vaddr_desc_32;
1411                         memset(&ext_desc_t[msdu_id], 0,
1412                                sizeof(struct htt_msdu_ext_desc));
1413                         frags = (struct htt_data_tx_desc_frag *)
1414                                 &ext_desc_t[msdu_id].frags;
1415                         ext_desc = &ext_desc_t[msdu_id];
1416                         frags[0].tword_addr.paddr_lo =
1417                                 __cpu_to_le32(skb_cb->paddr);
1418                         frags[0].tword_addr.paddr_hi = 0;
1419                         frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1420 
1421                         frags_paddr =  htt->frag_desc.paddr +
1422                                 (sizeof(struct htt_msdu_ext_desc) * msdu_id);
1423                 } else {
1424                         frags = txbuf->frags;
1425                         frags[0].dword_addr.paddr =
1426                                 __cpu_to_le32(skb_cb->paddr);
1427                         frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
1428                         frags[1].dword_addr.paddr = 0;
1429                         frags[1].dword_addr.len = 0;
1430 
1431                         frags_paddr = txbuf_paddr;
1432                 }
1433                 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1434                 break;
1435         case ATH10K_HW_TXRX_MGMT:
1436                 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1437                              HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1438                 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1439 
1440                 frags_paddr = skb_cb->paddr;
1441                 break;
1442         }
1443 
1444         /* Normally all commands go through HTC which manages tx credits for
1445          * each endpoint and notifies when tx is completed.
1446          *
1447          * HTT endpoint is creditless so there's no need to care about HTC
1448          * flags. In that case it is trivial to fill the HTC header here.
1449          *
1450          * MSDU transmission is considered completed upon HTT event. This
1451          * implies no relevant resources can be freed until after the event is
1452          * received. That's why HTC tx completion handler itself is ignored by
1453          * setting NULL to transfer_context for all sg items.
1454          *
1455          * There is simply no point in pushing HTT TX_FRM through HTC tx path
1456          * as it's a waste of resources. By bypassing HTC it is possible to
1457          * avoid extra memory allocations, compress data structures and thus
1458          * improve performance.
1459          */
1460 
1461         txbuf->htc_hdr.eid = htt->eid;
1462         txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
1463                                            sizeof(txbuf->cmd_tx) +
1464                                            prefetch_len);
1465         txbuf->htc_hdr.flags = 0;
1466 
1467         if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1468                 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1469 
1470         flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1471         flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1472         if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1473             !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1474                 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1475                 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1476                 if (ar->hw_params.continuous_frag_desc)
1477                         ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
1478         }
1479 
1480         /* Prevent firmware from sending up tx inspection requests. There's
1481          * nothing ath10k can do with frames requested for inspection so force
1482          * it to simply rely a regular tx completion with discard status.
1483          */
1484         flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1485 
1486         txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1487         txbuf->cmd_tx.flags0 = flags0;
1488         txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
1489         txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
1490         txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
1491         txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
1492         if (ath10k_mac_tx_frm_has_freq(ar)) {
1493                 txbuf->cmd_tx.offchan_tx.peerid =
1494                                 __cpu_to_le16(HTT_INVALID_PEERID);
1495                 txbuf->cmd_tx.offchan_tx.freq =
1496                                 __cpu_to_le16(freq);
1497         } else {
1498                 txbuf->cmd_tx.peerid =
1499                                 __cpu_to_le32(HTT_INVALID_PEERID);
1500         }
1501 
1502         trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
1503         ath10k_dbg(ar, ATH10K_DBG_HTT,
1504                    "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
1505                    flags0, flags1, msdu->len, msdu_id, &frags_paddr,
1506                    &skb_cb->paddr, vdev_id, tid, freq);
1507         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
1508                         msdu->data, msdu->len);
1509         trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
1510         trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
1511 
1512         sg_items[0].transfer_id = 0;
1513         sg_items[0].transfer_context = NULL;
1514         sg_items[0].vaddr = &txbuf->htc_hdr;
1515         sg_items[0].paddr = txbuf_paddr +
1516                             sizeof(txbuf->frags);
1517         sg_items[0].len = sizeof(txbuf->htc_hdr) +
1518                           sizeof(txbuf->cmd_hdr) +
1519                           sizeof(txbuf->cmd_tx);
1520 
1521         sg_items[1].transfer_id = 0;
1522         sg_items[1].transfer_context = NULL;
1523         sg_items[1].vaddr = msdu->data;
1524         sg_items[1].paddr = skb_cb->paddr;
1525         sg_items[1].len = prefetch_len;
1526 
1527         res = ath10k_hif_tx_sg(htt->ar,
1528                                htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
1529                                sg_items, ARRAY_SIZE(sg_items));
1530         if (res)
1531                 goto err_unmap_msdu;
1532 
1533         return 0;
1534 
1535 err_unmap_msdu:
1536         dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1537 err_free_msdu_id:
1538         ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1539 err:
1540         return res;
1541 }
1542 
1543 static int ath10k_htt_tx_64(struct ath10k_htt *htt,
1544                             enum ath10k_hw_txrx_mode txmode,
1545                             struct sk_buff *msdu)
1546 {
1547         struct ath10k *ar = htt->ar;
1548         struct device *dev = ar->dev;
1549         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1550         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
1551         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1552         struct ath10k_hif_sg_item sg_items[2];
1553         struct ath10k_htt_txbuf_64 *txbuf;
1554         struct htt_data_tx_desc_frag *frags;
1555         bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1556         u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1557         u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1558         int prefetch_len;
1559         int res;
1560         u8 flags0 = 0;
1561         u16 msdu_id, flags1 = 0;
1562         u16 freq = 0;
1563         dma_addr_t frags_paddr = 0;
1564         dma_addr_t txbuf_paddr;
1565         struct htt_msdu_ext_desc_64 *ext_desc = NULL;
1566         struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
1567 
1568         res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1569         if (res < 0)
1570                 goto err;
1571 
1572         msdu_id = res;
1573 
1574         prefetch_len = min(htt->prefetch_len, msdu->len);
1575         prefetch_len = roundup(prefetch_len, 4);
1576 
1577         txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id;
1578         txbuf_paddr = htt->txbuf.paddr +
1579                       (sizeof(struct ath10k_htt_txbuf_64) * msdu_id);
1580 
1581         if ((ieee80211_is_action(hdr->frame_control) ||
1582              ieee80211_is_deauth(hdr->frame_control) ||
1583              ieee80211_is_disassoc(hdr->frame_control)) &&
1584              ieee80211_has_protected(hdr->frame_control)) {
1585                 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1586         } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
1587                    txmode == ATH10K_HW_TXRX_RAW &&
1588                    ieee80211_has_protected(hdr->frame_control)) {
1589                 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1590         }
1591 
1592         skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1593                                        DMA_TO_DEVICE);
1594         res = dma_mapping_error(dev, skb_cb->paddr);
1595         if (res) {
1596                 res = -EIO;
1597                 goto err_free_msdu_id;
1598         }
1599 
1600         if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
1601                 freq = ar->scan.roc_freq;
1602 
1603         switch (txmode) {
1604         case ATH10K_HW_TXRX_RAW:
1605         case ATH10K_HW_TXRX_NATIVE_WIFI:
1606                 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1607                 /* fall through */
1608         case ATH10K_HW_TXRX_ETHERNET:
1609                 if (ar->hw_params.continuous_frag_desc) {
1610                         ext_desc_t = htt->frag_desc.vaddr_desc_64;
1611                         memset(&ext_desc_t[msdu_id], 0,
1612                                sizeof(struct htt_msdu_ext_desc_64));
1613                         frags = (struct htt_data_tx_desc_frag *)
1614                                 &ext_desc_t[msdu_id].frags;
1615                         ext_desc = &ext_desc_t[msdu_id];
1616                         frags[0].tword_addr.paddr_lo =
1617                                 __cpu_to_le32(skb_cb->paddr);
1618                         frags[0].tword_addr.paddr_hi =
1619                                 __cpu_to_le16(upper_32_bits(skb_cb->paddr));
1620                         frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1621 
1622                         frags_paddr =  htt->frag_desc.paddr +
1623                            (sizeof(struct htt_msdu_ext_desc_64) * msdu_id);
1624                 } else {
1625                         frags = txbuf->frags;
1626                         frags[0].tword_addr.paddr_lo =
1627                                                 __cpu_to_le32(skb_cb->paddr);
1628                         frags[0].tword_addr.paddr_hi =
1629                                 __cpu_to_le16(upper_32_bits(skb_cb->paddr));
1630                         frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1631                         frags[1].tword_addr.paddr_lo = 0;
1632                         frags[1].tword_addr.paddr_hi = 0;
1633                         frags[1].tword_addr.len_16 = 0;
1634                 }
1635                 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1636                 break;
1637         case ATH10K_HW_TXRX_MGMT:
1638                 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1639                              HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1640                 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1641 
1642                 frags_paddr = skb_cb->paddr;
1643                 break;
1644         }
1645 
1646         /* Normally all commands go through HTC which manages tx credits for
1647          * each endpoint and notifies when tx is completed.
1648          *
1649          * HTT endpoint is creditless so there's no need to care about HTC
1650          * flags. In that case it is trivial to fill the HTC header here.
1651          *
1652          * MSDU transmission is considered completed upon HTT event. This
1653          * implies no relevant resources can be freed until after the event is
1654          * received. That's why HTC tx completion handler itself is ignored by
1655          * setting NULL to transfer_context for all sg items.
1656          *
1657          * There is simply no point in pushing HTT TX_FRM through HTC tx path
1658          * as it's a waste of resources. By bypassing HTC it is possible to
1659          * avoid extra memory allocations, compress data structures and thus
1660          * improve performance.
1661          */
1662 
1663         txbuf->htc_hdr.eid = htt->eid;
1664         txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
1665                                            sizeof(txbuf->cmd_tx) +
1666                                            prefetch_len);
1667         txbuf->htc_hdr.flags = 0;
1668 
1669         if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1670                 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1671 
1672         flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1673         flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1674         if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1675             !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1676                 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1677                 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1678                 if (ar->hw_params.continuous_frag_desc) {
1679                         memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag));
1680                         ext_desc->tso_flag[3] |=
1681                                 __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64);
1682                 }
1683         }
1684 
1685         /* Prevent firmware from sending up tx inspection requests. There's
1686          * nothing ath10k can do with frames requested for inspection so force
1687          * it to simply rely a regular tx completion with discard status.
1688          */
1689         flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1690 
1691         txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1692         txbuf->cmd_tx.flags0 = flags0;
1693         txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
1694         txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
1695         txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
1696 
1697         /* fill fragment descriptor */
1698         txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr);
1699         if (ath10k_mac_tx_frm_has_freq(ar)) {
1700                 txbuf->cmd_tx.offchan_tx.peerid =
1701                                 __cpu_to_le16(HTT_INVALID_PEERID);
1702                 txbuf->cmd_tx.offchan_tx.freq =
1703                                 __cpu_to_le16(freq);
1704         } else {
1705                 txbuf->cmd_tx.peerid =
1706                                 __cpu_to_le32(HTT_INVALID_PEERID);
1707         }
1708 
1709         trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
1710         ath10k_dbg(ar, ATH10K_DBG_HTT,
1711                    "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
1712                    flags0, flags1, msdu->len, msdu_id, &frags_paddr,
1713                    &skb_cb->paddr, vdev_id, tid, freq);
1714         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
1715                         msdu->data, msdu->len);
1716         trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
1717         trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
1718 
1719         sg_items[0].transfer_id = 0;
1720         sg_items[0].transfer_context = NULL;
1721         sg_items[0].vaddr = &txbuf->htc_hdr;
1722         sg_items[0].paddr = txbuf_paddr +
1723                             sizeof(txbuf->frags);
1724         sg_items[0].len = sizeof(txbuf->htc_hdr) +
1725                           sizeof(txbuf->cmd_hdr) +
1726                           sizeof(txbuf->cmd_tx);
1727 
1728         sg_items[1].transfer_id = 0;
1729         sg_items[1].transfer_context = NULL;
1730         sg_items[1].vaddr = msdu->data;
1731         sg_items[1].paddr = skb_cb->paddr;
1732         sg_items[1].len = prefetch_len;
1733 
1734         res = ath10k_hif_tx_sg(htt->ar,
1735                                htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
1736                                sg_items, ARRAY_SIZE(sg_items));
1737         if (res)
1738                 goto err_unmap_msdu;
1739 
1740         return 0;
1741 
1742 err_unmap_msdu:
1743         dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1744 err_free_msdu_id:
1745         ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1746 err:
1747         return res;
1748 }
1749 
1750 static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
1751         .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,
1752         .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
1753         .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,
1754         .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,
1755         .htt_tx = ath10k_htt_tx_32,
1756         .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,
1757         .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,
1758         .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
1759 };
1760 
1761 static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
1762         .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,
1763         .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,
1764         .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,
1765         .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,
1766         .htt_tx = ath10k_htt_tx_64,
1767         .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64,
1768         .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
1769         .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_v2,
1770 };
1771 
1772 static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
1773         .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl,
1774         .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
1775         .htt_tx = ath10k_htt_tx_hl,
1776         .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
1777 };
1778 
1779 void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
1780 {
1781         struct ath10k *ar = htt->ar;
1782 
1783         if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
1784                 htt->tx_ops = &htt_tx_ops_hl;
1785         else if (ar->hw_params.target_64bit)
1786                 htt->tx_ops = &htt_tx_ops_64;
1787         else
1788                 htt->tx_ops = &htt_tx_ops_32;
1789 }

/* [<][>][^][v][top][bottom][index][help] */