1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include <linux/etherdevice.h>
19 #include "htt.h"
20 #include "mac.h"
21 #include "hif.h"
22 #include "txrx.h"
23 #include "debug.h"
24
__ath10k_htt_tx_dec_pending(struct ath10k_htt * htt)25 void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
26 {
27 htt->num_pending_tx--;
28 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
29 ieee80211_wake_queues(htt->ar->hw);
30 }
31
ath10k_htt_tx_dec_pending(struct ath10k_htt * htt)32 static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
33 {
34 spin_lock_bh(&htt->tx_lock);
35 __ath10k_htt_tx_dec_pending(htt);
36 spin_unlock_bh(&htt->tx_lock);
37 }
38
ath10k_htt_tx_inc_pending(struct ath10k_htt * htt)39 static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
40 {
41 int ret = 0;
42
43 spin_lock_bh(&htt->tx_lock);
44
45 if (htt->num_pending_tx >= htt->max_num_pending_tx) {
46 ret = -EBUSY;
47 goto exit;
48 }
49
50 htt->num_pending_tx++;
51 if (htt->num_pending_tx == htt->max_num_pending_tx)
52 ieee80211_stop_queues(htt->ar->hw);
53
54 exit:
55 spin_unlock_bh(&htt->tx_lock);
56 return ret;
57 }
58
ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt * htt,struct sk_buff * skb)59 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
60 {
61 struct ath10k *ar = htt->ar;
62 int ret;
63
64 lockdep_assert_held(&htt->tx_lock);
65
66 ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
67
68 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
69
70 return ret;
71 }
72
ath10k_htt_tx_free_msdu_id(struct ath10k_htt * htt,u16 msdu_id)73 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
74 {
75 struct ath10k *ar = htt->ar;
76
77 lockdep_assert_held(&htt->tx_lock);
78
79 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
80
81 idr_remove(&htt->pending_tx, msdu_id);
82 }
83
ath10k_htt_tx_alloc(struct ath10k_htt * htt)84 int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
85 {
86 struct ath10k *ar = htt->ar;
87
88 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
89 htt->max_num_pending_tx);
90
91 spin_lock_init(&htt->tx_lock);
92 idr_init(&htt->pending_tx);
93
94 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
95 sizeof(struct ath10k_htt_txbuf), 4, 0);
96 if (!htt->tx_pool) {
97 idr_destroy(&htt->pending_tx);
98 return -ENOMEM;
99 }
100
101 return 0;
102 }
103
ath10k_htt_tx_clean_up_pending(int msdu_id,void * skb,void * ctx)104 static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
105 {
106 struct ath10k *ar = ctx;
107 struct ath10k_htt *htt = &ar->htt;
108 struct htt_tx_done tx_done = {0};
109
110 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
111
112 tx_done.discard = 1;
113 tx_done.msdu_id = msdu_id;
114
115 spin_lock_bh(&htt->tx_lock);
116 ath10k_txrx_tx_unref(htt, &tx_done);
117 spin_unlock_bh(&htt->tx_lock);
118
119 return 0;
120 }
121
ath10k_htt_tx_free(struct ath10k_htt * htt)122 void ath10k_htt_tx_free(struct ath10k_htt *htt)
123 {
124 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
125 idr_destroy(&htt->pending_tx);
126 dma_pool_destroy(htt->tx_pool);
127 }
128
ath10k_htt_htc_tx_complete(struct ath10k * ar,struct sk_buff * skb)129 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
130 {
131 dev_kfree_skb_any(skb);
132 }
133
ath10k_htt_h2t_ver_req_msg(struct ath10k_htt * htt)134 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
135 {
136 struct ath10k *ar = htt->ar;
137 struct sk_buff *skb;
138 struct htt_cmd *cmd;
139 int len = 0;
140 int ret;
141
142 len += sizeof(cmd->hdr);
143 len += sizeof(cmd->ver_req);
144
145 skb = ath10k_htc_alloc_skb(ar, len);
146 if (!skb)
147 return -ENOMEM;
148
149 skb_put(skb, len);
150 cmd = (struct htt_cmd *)skb->data;
151 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
152
153 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
154 if (ret) {
155 dev_kfree_skb_any(skb);
156 return ret;
157 }
158
159 return 0;
160 }
161
ath10k_htt_h2t_stats_req(struct ath10k_htt * htt,u8 mask,u64 cookie)162 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
163 {
164 struct ath10k *ar = htt->ar;
165 struct htt_stats_req *req;
166 struct sk_buff *skb;
167 struct htt_cmd *cmd;
168 int len = 0, ret;
169
170 len += sizeof(cmd->hdr);
171 len += sizeof(cmd->stats_req);
172
173 skb = ath10k_htc_alloc_skb(ar, len);
174 if (!skb)
175 return -ENOMEM;
176
177 skb_put(skb, len);
178 cmd = (struct htt_cmd *)skb->data;
179 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
180
181 req = &cmd->stats_req;
182
183 memset(req, 0, sizeof(*req));
184
185 /* currently we support only max 8 bit masks so no need to worry
186 * about endian support */
187 req->upload_types[0] = mask;
188 req->reset_types[0] = mask;
189 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
190 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
191 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
192
193 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
194 if (ret) {
195 ath10k_warn(ar, "failed to send htt type stats request: %d",
196 ret);
197 dev_kfree_skb_any(skb);
198 return ret;
199 }
200
201 return 0;
202 }
203
ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt * htt)204 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
205 {
206 struct ath10k *ar = htt->ar;
207 struct sk_buff *skb;
208 struct htt_cmd *cmd;
209 struct htt_rx_ring_setup_ring *ring;
210 const int num_rx_ring = 1;
211 u16 flags;
212 u32 fw_idx;
213 int len;
214 int ret;
215
216 /*
217 * the HW expects the buffer to be an integral number of 4-byte
218 * "words"
219 */
220 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
221 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
222
223 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
224 + (sizeof(*ring) * num_rx_ring);
225 skb = ath10k_htc_alloc_skb(ar, len);
226 if (!skb)
227 return -ENOMEM;
228
229 skb_put(skb, len);
230
231 cmd = (struct htt_cmd *)skb->data;
232 ring = &cmd->rx_setup.rings[0];
233
234 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
235 cmd->rx_setup.hdr.num_rings = 1;
236
237 /* FIXME: do we need all of this? */
238 flags = 0;
239 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
240 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
241 flags |= HTT_RX_RING_FLAGS_PPDU_START;
242 flags |= HTT_RX_RING_FLAGS_PPDU_END;
243 flags |= HTT_RX_RING_FLAGS_MPDU_START;
244 flags |= HTT_RX_RING_FLAGS_MPDU_END;
245 flags |= HTT_RX_RING_FLAGS_MSDU_START;
246 flags |= HTT_RX_RING_FLAGS_MSDU_END;
247 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
248 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
249 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
250 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
251 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
252 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
253 flags |= HTT_RX_RING_FLAGS_NULL_RX;
254 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
255
256 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
257
258 ring->fw_idx_shadow_reg_paddr =
259 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
260 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
261 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
262 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
263 ring->flags = __cpu_to_le16(flags);
264 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
265
266 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
267
268 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
269 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
270 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
271 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
272 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
273 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
274 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
275 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
276 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
277 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
278
279 #undef desc_offset
280
281 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
282 if (ret) {
283 dev_kfree_skb_any(skb);
284 return ret;
285 }
286
287 return 0;
288 }
289
ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt * htt,u8 max_subfrms_ampdu,u8 max_subfrms_amsdu)290 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
291 u8 max_subfrms_ampdu,
292 u8 max_subfrms_amsdu)
293 {
294 struct ath10k *ar = htt->ar;
295 struct htt_aggr_conf *aggr_conf;
296 struct sk_buff *skb;
297 struct htt_cmd *cmd;
298 int len;
299 int ret;
300
301 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
302
303 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
304 return -EINVAL;
305
306 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
307 return -EINVAL;
308
309 len = sizeof(cmd->hdr);
310 len += sizeof(cmd->aggr_conf);
311
312 skb = ath10k_htc_alloc_skb(ar, len);
313 if (!skb)
314 return -ENOMEM;
315
316 skb_put(skb, len);
317 cmd = (struct htt_cmd *)skb->data;
318 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
319
320 aggr_conf = &cmd->aggr_conf;
321 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
322 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
323
324 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
325 aggr_conf->max_num_amsdu_subframes,
326 aggr_conf->max_num_ampdu_subframes);
327
328 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
329 if (ret) {
330 dev_kfree_skb_any(skb);
331 return ret;
332 }
333
334 return 0;
335 }
336
ath10k_htt_mgmt_tx(struct ath10k_htt * htt,struct sk_buff * msdu)337 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
338 {
339 struct ath10k *ar = htt->ar;
340 struct device *dev = ar->dev;
341 struct sk_buff *txdesc = NULL;
342 struct htt_cmd *cmd;
343 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
344 u8 vdev_id = skb_cb->vdev_id;
345 int len = 0;
346 int msdu_id = -1;
347 int res;
348
349 res = ath10k_htt_tx_inc_pending(htt);
350 if (res)
351 goto err;
352
353 len += sizeof(cmd->hdr);
354 len += sizeof(cmd->mgmt_tx);
355
356 spin_lock_bh(&htt->tx_lock);
357 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
358 if (res < 0) {
359 spin_unlock_bh(&htt->tx_lock);
360 goto err_tx_dec;
361 }
362 msdu_id = res;
363 spin_unlock_bh(&htt->tx_lock);
364
365 txdesc = ath10k_htc_alloc_skb(ar, len);
366 if (!txdesc) {
367 res = -ENOMEM;
368 goto err_free_msdu_id;
369 }
370
371 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
372 DMA_TO_DEVICE);
373 res = dma_mapping_error(dev, skb_cb->paddr);
374 if (res) {
375 res = -EIO;
376 goto err_free_txdesc;
377 }
378
379 skb_put(txdesc, len);
380 cmd = (struct htt_cmd *)txdesc->data;
381 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
382 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
383 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
384 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
385 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
386 memcpy(cmd->mgmt_tx.hdr, msdu->data,
387 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
388
389 skb_cb->htt.txbuf = NULL;
390
391 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
392 if (res)
393 goto err_unmap_msdu;
394
395 return 0;
396
397 err_unmap_msdu:
398 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
399 err_free_txdesc:
400 dev_kfree_skb_any(txdesc);
401 err_free_msdu_id:
402 spin_lock_bh(&htt->tx_lock);
403 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
404 spin_unlock_bh(&htt->tx_lock);
405 err_tx_dec:
406 ath10k_htt_tx_dec_pending(htt);
407 err:
408 return res;
409 }
410
ath10k_htt_tx(struct ath10k_htt * htt,struct sk_buff * msdu)411 int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
412 {
413 struct ath10k *ar = htt->ar;
414 struct device *dev = ar->dev;
415 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
416 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
417 struct ath10k_hif_sg_item sg_items[2];
418 struct htt_data_tx_desc_frag *frags;
419 u8 vdev_id = skb_cb->vdev_id;
420 u8 tid = skb_cb->htt.tid;
421 int prefetch_len;
422 int res;
423 u8 flags0 = 0;
424 u16 msdu_id, flags1 = 0;
425 dma_addr_t paddr;
426 u32 frags_paddr;
427 bool use_frags;
428
429 res = ath10k_htt_tx_inc_pending(htt);
430 if (res)
431 goto err;
432
433 spin_lock_bh(&htt->tx_lock);
434 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
435 if (res < 0) {
436 spin_unlock_bh(&htt->tx_lock);
437 goto err_tx_dec;
438 }
439 msdu_id = res;
440 spin_unlock_bh(&htt->tx_lock);
441
442 prefetch_len = min(htt->prefetch_len, msdu->len);
443 prefetch_len = roundup(prefetch_len, 4);
444
445 /* Since HTT 3.0 there is no separate mgmt tx command. However in case
446 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
447 * fragment list host driver specifies directly frame pointer. */
448 use_frags = htt->target_version_major < 3 ||
449 !ieee80211_is_mgmt(hdr->frame_control);
450
451 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
452 &paddr);
453 if (!skb_cb->htt.txbuf) {
454 res = -ENOMEM;
455 goto err_free_msdu_id;
456 }
457 skb_cb->htt.txbuf_paddr = paddr;
458
459 if ((ieee80211_is_action(hdr->frame_control) ||
460 ieee80211_is_deauth(hdr->frame_control) ||
461 ieee80211_is_disassoc(hdr->frame_control)) &&
462 ieee80211_has_protected(hdr->frame_control))
463 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
464
465 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
466 DMA_TO_DEVICE);
467 res = dma_mapping_error(dev, skb_cb->paddr);
468 if (res) {
469 res = -EIO;
470 goto err_free_txbuf;
471 }
472
473 if (likely(use_frags)) {
474 frags = skb_cb->htt.txbuf->frags;
475
476 frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
477 frags[0].len = __cpu_to_le32(msdu->len);
478 frags[1].paddr = 0;
479 frags[1].len = 0;
480
481 flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
482 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
483
484 frags_paddr = skb_cb->htt.txbuf_paddr;
485 } else {
486 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
487 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
488
489 frags_paddr = skb_cb->paddr;
490 }
491
492 /* Normally all commands go through HTC which manages tx credits for
493 * each endpoint and notifies when tx is completed.
494 *
495 * HTT endpoint is creditless so there's no need to care about HTC
496 * flags. In that case it is trivial to fill the HTC header here.
497 *
498 * MSDU transmission is considered completed upon HTT event. This
499 * implies no relevant resources can be freed until after the event is
500 * received. That's why HTC tx completion handler itself is ignored by
501 * setting NULL to transfer_context for all sg items.
502 *
503 * There is simply no point in pushing HTT TX_FRM through HTC tx path
504 * as it's a waste of resources. By bypassing HTC it is possible to
505 * avoid extra memory allocations, compress data structures and thus
506 * improve performance. */
507
508 skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
509 skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
510 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
511 sizeof(skb_cb->htt.txbuf->cmd_tx) +
512 prefetch_len);
513 skb_cb->htt.txbuf->htc_hdr.flags = 0;
514
515 if (!ieee80211_has_protected(hdr->frame_control))
516 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
517
518 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
519
520 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
521 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
522 if (msdu->ip_summed == CHECKSUM_PARTIAL) {
523 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
524 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
525 }
526
527 /* Prevent firmware from sending up tx inspection requests. There's
528 * nothing ath10k can do with frames requested for inspection so force
529 * it to simply rely a regular tx completion with discard status.
530 */
531 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
532
533 skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
534 skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
535 skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
536 skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
537 skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
538 skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
539 skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
540 skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);
541
542 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
543 ath10k_dbg(ar, ATH10K_DBG_HTT,
544 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
545 flags0, flags1, msdu->len, msdu_id, frags_paddr,
546 (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
547 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
548 msdu->data, msdu->len);
549 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
550 trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
551
552 sg_items[0].transfer_id = 0;
553 sg_items[0].transfer_context = NULL;
554 sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
555 sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
556 sizeof(skb_cb->htt.txbuf->frags);
557 sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
558 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
559 sizeof(skb_cb->htt.txbuf->cmd_tx);
560
561 sg_items[1].transfer_id = 0;
562 sg_items[1].transfer_context = NULL;
563 sg_items[1].vaddr = msdu->data;
564 sg_items[1].paddr = skb_cb->paddr;
565 sg_items[1].len = prefetch_len;
566
567 res = ath10k_hif_tx_sg(htt->ar,
568 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
569 sg_items, ARRAY_SIZE(sg_items));
570 if (res)
571 goto err_unmap_msdu;
572
573 return 0;
574
575 err_unmap_msdu:
576 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
577 err_free_txbuf:
578 dma_pool_free(htt->tx_pool,
579 skb_cb->htt.txbuf,
580 skb_cb->htt.txbuf_paddr);
581 err_free_msdu_id:
582 spin_lock_bh(&htt->tx_lock);
583 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
584 spin_unlock_bh(&htt->tx_lock);
585 err_tx_dec:
586 ath10k_htt_tx_dec_pending(htt);
587 err:
588 return res;
589 }
590