1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include "core.h"
19 #include "hif.h"
20 #include "debug.h"
21
22 /********/
23 /* Send */
24 /********/
25
ath10k_htc_send_complete_check(struct ath10k_htc_ep * ep,int force)26 static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep,
27 int force)
28 {
29 /*
30 * Check whether HIF has any prior sends that have finished,
31 * have not had the post-processing done.
32 */
33 ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force);
34 }
35
ath10k_htc_control_tx_complete(struct ath10k * ar,struct sk_buff * skb)36 static void ath10k_htc_control_tx_complete(struct ath10k *ar,
37 struct sk_buff *skb)
38 {
39 kfree_skb(skb);
40 }
41
ath10k_htc_build_tx_ctrl_skb(void * ar)42 static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
43 {
44 struct sk_buff *skb;
45 struct ath10k_skb_cb *skb_cb;
46
47 skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
48 if (!skb)
49 return NULL;
50
51 skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
52 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
53
54 skb_cb = ATH10K_SKB_CB(skb);
55 memset(skb_cb, 0, sizeof(*skb_cb));
56
57 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
58 return skb;
59 }
60
ath10k_htc_restore_tx_skb(struct ath10k_htc * htc,struct sk_buff * skb)61 static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
62 struct sk_buff *skb)
63 {
64 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
65
66 dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
67 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
68 }
69
ath10k_htc_notify_tx_completion(struct ath10k_htc_ep * ep,struct sk_buff * skb)70 static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
71 struct sk_buff *skb)
72 {
73 struct ath10k *ar = ep->htc->ar;
74
75 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
76 ep->eid, skb);
77
78 ath10k_htc_restore_tx_skb(ep->htc, skb);
79
80 if (!ep->ep_ops.ep_tx_complete) {
81 ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
82 dev_kfree_skb_any(skb);
83 return;
84 }
85
86 ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
87 }
88
89 /* assumes tx_lock is held */
ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep * ep)90 static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
91 {
92 struct ath10k *ar = ep->htc->ar;
93
94 if (!ep->tx_credit_flow_enabled)
95 return false;
96 if (ep->tx_credits >= ep->tx_credits_per_max_message)
97 return false;
98
99 ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
100 ep->eid);
101 return true;
102 }
103
ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep * ep,struct sk_buff * skb)104 static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
105 struct sk_buff *skb)
106 {
107 struct ath10k_htc_hdr *hdr;
108
109 hdr = (struct ath10k_htc_hdr *)skb->data;
110
111 hdr->eid = ep->eid;
112 hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
113 hdr->flags = 0;
114
115 spin_lock_bh(&ep->htc->tx_lock);
116 hdr->seq_no = ep->seq_no++;
117
118 if (ath10k_htc_ep_need_credit_update(ep))
119 hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
120
121 spin_unlock_bh(&ep->htc->tx_lock);
122 }
123
ath10k_htc_send(struct ath10k_htc * htc,enum ath10k_htc_ep_id eid,struct sk_buff * skb)124 int ath10k_htc_send(struct ath10k_htc *htc,
125 enum ath10k_htc_ep_id eid,
126 struct sk_buff *skb)
127 {
128 struct ath10k *ar = htc->ar;
129 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
130 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
131 struct ath10k_hif_sg_item sg_item;
132 struct device *dev = htc->ar->dev;
133 int credits = 0;
134 int ret;
135
136 if (htc->ar->state == ATH10K_STATE_WEDGED)
137 return -ECOMM;
138
139 if (eid >= ATH10K_HTC_EP_COUNT) {
140 ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
141 return -ENOENT;
142 }
143
144 skb_push(skb, sizeof(struct ath10k_htc_hdr));
145
146 if (ep->tx_credit_flow_enabled) {
147 credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
148 spin_lock_bh(&htc->tx_lock);
149 if (ep->tx_credits < credits) {
150 spin_unlock_bh(&htc->tx_lock);
151 ret = -EAGAIN;
152 goto err_pull;
153 }
154 ep->tx_credits -= credits;
155 ath10k_dbg(ar, ATH10K_DBG_HTC,
156 "htc ep %d consumed %d credits (total %d)\n",
157 eid, credits, ep->tx_credits);
158 spin_unlock_bh(&htc->tx_lock);
159 }
160
161 ath10k_htc_prepare_tx_skb(ep, skb);
162
163 skb_cb->eid = eid;
164 skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
165 ret = dma_mapping_error(dev, skb_cb->paddr);
166 if (ret) {
167 ret = -EIO;
168 goto err_credits;
169 }
170
171 sg_item.transfer_id = ep->eid;
172 sg_item.transfer_context = skb;
173 sg_item.vaddr = skb->data;
174 sg_item.paddr = skb_cb->paddr;
175 sg_item.len = skb->len;
176
177 ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
178 if (ret)
179 goto err_unmap;
180
181 return 0;
182
183 err_unmap:
184 dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
185 err_credits:
186 if (ep->tx_credit_flow_enabled) {
187 spin_lock_bh(&htc->tx_lock);
188 ep->tx_credits += credits;
189 ath10k_dbg(ar, ATH10K_DBG_HTC,
190 "htc ep %d reverted %d credits back (total %d)\n",
191 eid, credits, ep->tx_credits);
192 spin_unlock_bh(&htc->tx_lock);
193
194 if (ep->ep_ops.ep_tx_credits)
195 ep->ep_ops.ep_tx_credits(htc->ar);
196 }
197 err_pull:
198 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
199 return ret;
200 }
201
ath10k_htc_tx_completion_handler(struct ath10k * ar,struct sk_buff * skb)202 static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
203 struct sk_buff *skb)
204 {
205 struct ath10k_htc *htc = &ar->htc;
206 struct ath10k_skb_cb *skb_cb;
207 struct ath10k_htc_ep *ep;
208
209 if (WARN_ON_ONCE(!skb))
210 return 0;
211
212 skb_cb = ATH10K_SKB_CB(skb);
213 ep = &htc->endpoint[skb_cb->eid];
214
215 ath10k_htc_notify_tx_completion(ep, skb);
216 /* the skb now belongs to the completion handler */
217
218 return 0;
219 }
220
221 /***********/
222 /* Receive */
223 /***********/
224
225 static void
ath10k_htc_process_credit_report(struct ath10k_htc * htc,const struct ath10k_htc_credit_report * report,int len,enum ath10k_htc_ep_id eid)226 ath10k_htc_process_credit_report(struct ath10k_htc *htc,
227 const struct ath10k_htc_credit_report *report,
228 int len,
229 enum ath10k_htc_ep_id eid)
230 {
231 struct ath10k *ar = htc->ar;
232 struct ath10k_htc_ep *ep;
233 int i, n_reports;
234
235 if (len % sizeof(*report))
236 ath10k_warn(ar, "Uneven credit report len %d", len);
237
238 n_reports = len / sizeof(*report);
239
240 spin_lock_bh(&htc->tx_lock);
241 for (i = 0; i < n_reports; i++, report++) {
242 if (report->eid >= ATH10K_HTC_EP_COUNT)
243 break;
244
245 ep = &htc->endpoint[report->eid];
246 ep->tx_credits += report->credits;
247
248 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
249 report->eid, report->credits, ep->tx_credits);
250
251 if (ep->ep_ops.ep_tx_credits) {
252 spin_unlock_bh(&htc->tx_lock);
253 ep->ep_ops.ep_tx_credits(htc->ar);
254 spin_lock_bh(&htc->tx_lock);
255 }
256 }
257 spin_unlock_bh(&htc->tx_lock);
258 }
259
ath10k_htc_process_trailer(struct ath10k_htc * htc,u8 * buffer,int length,enum ath10k_htc_ep_id src_eid)260 static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
261 u8 *buffer,
262 int length,
263 enum ath10k_htc_ep_id src_eid)
264 {
265 struct ath10k *ar = htc->ar;
266 int status = 0;
267 struct ath10k_htc_record *record;
268 u8 *orig_buffer;
269 int orig_length;
270 size_t len;
271
272 orig_buffer = buffer;
273 orig_length = length;
274
275 while (length > 0) {
276 record = (struct ath10k_htc_record *)buffer;
277
278 if (length < sizeof(record->hdr)) {
279 status = -EINVAL;
280 break;
281 }
282
283 if (record->hdr.len > length) {
284 /* no room left in buffer for record */
285 ath10k_warn(ar, "Invalid record length: %d\n",
286 record->hdr.len);
287 status = -EINVAL;
288 break;
289 }
290
291 switch (record->hdr.id) {
292 case ATH10K_HTC_RECORD_CREDITS:
293 len = sizeof(struct ath10k_htc_credit_report);
294 if (record->hdr.len < len) {
295 ath10k_warn(ar, "Credit report too long\n");
296 status = -EINVAL;
297 break;
298 }
299 ath10k_htc_process_credit_report(htc,
300 record->credit_report,
301 record->hdr.len,
302 src_eid);
303 break;
304 default:
305 ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
306 record->hdr.id, record->hdr.len);
307 break;
308 }
309
310 if (status)
311 break;
312
313 /* multiple records may be present in a trailer */
314 buffer += sizeof(record->hdr) + record->hdr.len;
315 length -= sizeof(record->hdr) + record->hdr.len;
316 }
317
318 if (status)
319 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
320 orig_buffer, orig_length);
321
322 return status;
323 }
324
ath10k_htc_rx_completion_handler(struct ath10k * ar,struct sk_buff * skb)325 static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
326 struct sk_buff *skb)
327 {
328 int status = 0;
329 struct ath10k_htc *htc = &ar->htc;
330 struct ath10k_htc_hdr *hdr;
331 struct ath10k_htc_ep *ep;
332 u16 payload_len;
333 u32 trailer_len = 0;
334 size_t min_len;
335 u8 eid;
336 bool trailer_present;
337
338 hdr = (struct ath10k_htc_hdr *)skb->data;
339 skb_pull(skb, sizeof(*hdr));
340
341 eid = hdr->eid;
342
343 if (eid >= ATH10K_HTC_EP_COUNT) {
344 ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
345 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
346 hdr, sizeof(*hdr));
347 status = -EINVAL;
348 goto out;
349 }
350
351 ep = &htc->endpoint[eid];
352
353 /*
354 * If this endpoint that received a message from the target has
355 * a to-target HIF pipe whose send completions are polled rather
356 * than interrupt-driven, this is a good point to ask HIF to check
357 * whether it has any completed sends to handle.
358 */
359 if (ep->ul_is_polled)
360 ath10k_htc_send_complete_check(ep, 1);
361
362 payload_len = __le16_to_cpu(hdr->len);
363
364 if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
365 ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
366 payload_len + sizeof(*hdr));
367 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
368 hdr, sizeof(*hdr));
369 status = -EINVAL;
370 goto out;
371 }
372
373 if (skb->len < payload_len) {
374 ath10k_dbg(ar, ATH10K_DBG_HTC,
375 "HTC Rx: insufficient length, got %d, expected %d\n",
376 skb->len, payload_len);
377 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
378 "", hdr, sizeof(*hdr));
379 status = -EINVAL;
380 goto out;
381 }
382
383 /* get flags to check for trailer */
384 trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
385 if (trailer_present) {
386 u8 *trailer;
387
388 trailer_len = hdr->trailer_len;
389 min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
390
391 if ((trailer_len < min_len) ||
392 (trailer_len > payload_len)) {
393 ath10k_warn(ar, "Invalid trailer length: %d\n",
394 trailer_len);
395 status = -EPROTO;
396 goto out;
397 }
398
399 trailer = (u8 *)hdr;
400 trailer += sizeof(*hdr);
401 trailer += payload_len;
402 trailer -= trailer_len;
403 status = ath10k_htc_process_trailer(htc, trailer,
404 trailer_len, hdr->eid);
405 if (status)
406 goto out;
407
408 skb_trim(skb, skb->len - trailer_len);
409 }
410
411 if (((int)payload_len - (int)trailer_len) <= 0)
412 /* zero length packet with trailer data, just drop these */
413 goto out;
414
415 if (eid == ATH10K_HTC_EP_0) {
416 struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
417
418 switch (__le16_to_cpu(msg->hdr.message_id)) {
419 default:
420 /* handle HTC control message */
421 if (completion_done(&htc->ctl_resp)) {
422 /*
423 * this is a fatal error, target should not be
424 * sending unsolicited messages on the ep 0
425 */
426 ath10k_warn(ar, "HTC rx ctrl still processing\n");
427 status = -EINVAL;
428 complete(&htc->ctl_resp);
429 goto out;
430 }
431
432 htc->control_resp_len =
433 min_t(int, skb->len,
434 ATH10K_HTC_MAX_CTRL_MSG_LEN);
435
436 memcpy(htc->control_resp_buffer, skb->data,
437 htc->control_resp_len);
438
439 complete(&htc->ctl_resp);
440 break;
441 case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
442 htc->htc_ops.target_send_suspend_complete(ar);
443 }
444 goto out;
445 }
446
447 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
448 eid, skb);
449 ep->ep_ops.ep_rx_complete(ar, skb);
450
451 /* skb is now owned by the rx completion handler */
452 skb = NULL;
453 out:
454 kfree_skb(skb);
455
456 return status;
457 }
458
ath10k_htc_control_rx_complete(struct ath10k * ar,struct sk_buff * skb)459 static void ath10k_htc_control_rx_complete(struct ath10k *ar,
460 struct sk_buff *skb)
461 {
462 /* This is unexpected. FW is not supposed to send regular rx on this
463 * endpoint. */
464 ath10k_warn(ar, "unexpected htc rx\n");
465 kfree_skb(skb);
466 }
467
468 /***************/
469 /* Init/Deinit */
470 /***************/
471
htc_service_name(enum ath10k_htc_svc_id id)472 static const char *htc_service_name(enum ath10k_htc_svc_id id)
473 {
474 switch (id) {
475 case ATH10K_HTC_SVC_ID_RESERVED:
476 return "Reserved";
477 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
478 return "Control";
479 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
480 return "WMI";
481 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
482 return "DATA BE";
483 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
484 return "DATA BK";
485 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
486 return "DATA VI";
487 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
488 return "DATA VO";
489 case ATH10K_HTC_SVC_ID_NMI_CONTROL:
490 return "NMI Control";
491 case ATH10K_HTC_SVC_ID_NMI_DATA:
492 return "NMI Data";
493 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
494 return "HTT Data";
495 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
496 return "RAW";
497 }
498
499 return "Unknown";
500 }
501
ath10k_htc_reset_endpoint_states(struct ath10k_htc * htc)502 static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
503 {
504 struct ath10k_htc_ep *ep;
505 int i;
506
507 for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
508 ep = &htc->endpoint[i];
509 ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
510 ep->max_ep_message_len = 0;
511 ep->max_tx_queue_depth = 0;
512 ep->eid = i;
513 ep->htc = htc;
514 ep->tx_credit_flow_enabled = true;
515 }
516 }
517
ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc * htc)518 static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc)
519 {
520 struct ath10k_htc_svc_tx_credits *entry;
521
522 entry = &htc->service_tx_alloc[0];
523
524 /*
525 * for PCIE allocate all credists/HTC buffers to WMI.
526 * no buffers are used/required for data. data always
527 * remains on host.
528 */
529 entry++;
530 entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
531 entry->credit_allocation = htc->total_transmit_credits;
532 }
533
ath10k_htc_get_credit_allocation(struct ath10k_htc * htc,u16 service_id)534 static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
535 u16 service_id)
536 {
537 u8 allocation = 0;
538 int i;
539
540 for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
541 if (htc->service_tx_alloc[i].service_id == service_id)
542 allocation =
543 htc->service_tx_alloc[i].credit_allocation;
544 }
545
546 return allocation;
547 }
548
ath10k_htc_wait_target(struct ath10k_htc * htc)549 int ath10k_htc_wait_target(struct ath10k_htc *htc)
550 {
551 struct ath10k *ar = htc->ar;
552 int i, status = 0;
553 struct ath10k_htc_svc_conn_req conn_req;
554 struct ath10k_htc_svc_conn_resp conn_resp;
555 struct ath10k_htc_msg *msg;
556 u16 message_id;
557 u16 credit_count;
558 u16 credit_size;
559
560 status = wait_for_completion_timeout(&htc->ctl_resp,
561 ATH10K_HTC_WAIT_TIMEOUT_HZ);
562 if (status == 0) {
563 /* Workaround: In some cases the PCI HIF doesn't
564 * receive interrupt for the control response message
565 * even if the buffer was completed. It is suspected
566 * iomap writes unmasking PCI CE irqs aren't propagated
567 * properly in KVM PCI-passthrough sometimes.
568 */
569 ath10k_warn(ar, "failed to receive control response completion, polling..\n");
570
571 for (i = 0; i < CE_COUNT; i++)
572 ath10k_hif_send_complete_check(htc->ar, i, 1);
573
574 status = wait_for_completion_timeout(&htc->ctl_resp,
575 ATH10K_HTC_WAIT_TIMEOUT_HZ);
576
577 if (status == 0)
578 status = -ETIMEDOUT;
579 }
580
581 if (status < 0) {
582 ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
583 return status;
584 }
585
586 if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
587 ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
588 htc->control_resp_len);
589 return -ECOMM;
590 }
591
592 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
593 message_id = __le16_to_cpu(msg->hdr.message_id);
594 credit_count = __le16_to_cpu(msg->ready.credit_count);
595 credit_size = __le16_to_cpu(msg->ready.credit_size);
596
597 if (message_id != ATH10K_HTC_MSG_READY_ID) {
598 ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
599 return -ECOMM;
600 }
601
602 htc->total_transmit_credits = credit_count;
603 htc->target_credit_size = credit_size;
604
605 ath10k_dbg(ar, ATH10K_DBG_HTC,
606 "Target ready! transmit resources: %d size:%d\n",
607 htc->total_transmit_credits,
608 htc->target_credit_size);
609
610 if ((htc->total_transmit_credits == 0) ||
611 (htc->target_credit_size == 0)) {
612 ath10k_err(ar, "Invalid credit size received\n");
613 return -ECOMM;
614 }
615
616 ath10k_htc_setup_target_buffer_assignments(htc);
617
618 /* setup our pseudo HTC control endpoint connection */
619 memset(&conn_req, 0, sizeof(conn_req));
620 memset(&conn_resp, 0, sizeof(conn_resp));
621 conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
622 conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
623 conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
624 conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
625
626 /* connect fake service */
627 status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
628 if (status) {
629 ath10k_err(ar, "could not connect to htc service (%d)\n",
630 status);
631 return status;
632 }
633
634 return 0;
635 }
636
ath10k_htc_connect_service(struct ath10k_htc * htc,struct ath10k_htc_svc_conn_req * conn_req,struct ath10k_htc_svc_conn_resp * conn_resp)637 int ath10k_htc_connect_service(struct ath10k_htc *htc,
638 struct ath10k_htc_svc_conn_req *conn_req,
639 struct ath10k_htc_svc_conn_resp *conn_resp)
640 {
641 struct ath10k *ar = htc->ar;
642 struct ath10k_htc_msg *msg;
643 struct ath10k_htc_conn_svc *req_msg;
644 struct ath10k_htc_conn_svc_response resp_msg_dummy;
645 struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
646 enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
647 struct ath10k_htc_ep *ep;
648 struct sk_buff *skb;
649 unsigned int max_msg_size = 0;
650 int length, status;
651 bool disable_credit_flow_ctrl = false;
652 u16 message_id, service_id, flags = 0;
653 u8 tx_alloc = 0;
654
655 /* special case for HTC pseudo control service */
656 if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
657 disable_credit_flow_ctrl = true;
658 assigned_eid = ATH10K_HTC_EP_0;
659 max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
660 memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
661 goto setup;
662 }
663
664 tx_alloc = ath10k_htc_get_credit_allocation(htc,
665 conn_req->service_id);
666 if (!tx_alloc)
667 ath10k_dbg(ar, ATH10K_DBG_BOOT,
668 "boot htc service %s does not allocate target credits\n",
669 htc_service_name(conn_req->service_id));
670
671 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
672 if (!skb) {
673 ath10k_err(ar, "Failed to allocate HTC packet\n");
674 return -ENOMEM;
675 }
676
677 length = sizeof(msg->hdr) + sizeof(msg->connect_service);
678 skb_put(skb, length);
679 memset(skb->data, 0, length);
680
681 msg = (struct ath10k_htc_msg *)skb->data;
682 msg->hdr.message_id =
683 __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
684
685 flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
686
687 /* Only enable credit flow control for WMI ctrl service */
688 if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
689 flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
690 disable_credit_flow_ctrl = true;
691 }
692
693 req_msg = &msg->connect_service;
694 req_msg->flags = __cpu_to_le16(flags);
695 req_msg->service_id = __cpu_to_le16(conn_req->service_id);
696
697 reinit_completion(&htc->ctl_resp);
698
699 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
700 if (status) {
701 kfree_skb(skb);
702 return status;
703 }
704
705 /* wait for response */
706 status = wait_for_completion_timeout(&htc->ctl_resp,
707 ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
708 if (status == 0) {
709 ath10k_err(ar, "Service connect timeout: %d\n", status);
710 return -ETIMEDOUT;
711 }
712
713 /* we controlled the buffer creation, it's aligned */
714 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
715 resp_msg = &msg->connect_service_response;
716 message_id = __le16_to_cpu(msg->hdr.message_id);
717 service_id = __le16_to_cpu(resp_msg->service_id);
718
719 if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
720 (htc->control_resp_len < sizeof(msg->hdr) +
721 sizeof(msg->connect_service_response))) {
722 ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
723 return -EPROTO;
724 }
725
726 ath10k_dbg(ar, ATH10K_DBG_HTC,
727 "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
728 htc_service_name(service_id),
729 resp_msg->status, resp_msg->eid);
730
731 conn_resp->connect_resp_code = resp_msg->status;
732
733 /* check response status */
734 if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
735 ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
736 htc_service_name(service_id),
737 resp_msg->status);
738 return -EPROTO;
739 }
740
741 assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
742 max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
743
744 setup:
745
746 if (assigned_eid >= ATH10K_HTC_EP_COUNT)
747 return -EPROTO;
748
749 if (max_msg_size == 0)
750 return -EPROTO;
751
752 ep = &htc->endpoint[assigned_eid];
753 ep->eid = assigned_eid;
754
755 if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
756 return -EPROTO;
757
758 /* return assigned endpoint to caller */
759 conn_resp->eid = assigned_eid;
760 conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
761
762 /* setup the endpoint */
763 ep->service_id = conn_req->service_id;
764 ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
765 ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
766 ep->tx_credits = tx_alloc;
767 ep->tx_credit_size = htc->target_credit_size;
768 ep->tx_credits_per_max_message = ep->max_ep_message_len /
769 htc->target_credit_size;
770
771 if (ep->max_ep_message_len % htc->target_credit_size)
772 ep->tx_credits_per_max_message++;
773
774 /* copy all the callbacks */
775 ep->ep_ops = conn_req->ep_ops;
776
777 status = ath10k_hif_map_service_to_pipe(htc->ar,
778 ep->service_id,
779 &ep->ul_pipe_id,
780 &ep->dl_pipe_id,
781 &ep->ul_is_polled,
782 &ep->dl_is_polled);
783 if (status)
784 return status;
785
786 ath10k_dbg(ar, ATH10K_DBG_BOOT,
787 "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
788 htc_service_name(ep->service_id), ep->ul_pipe_id,
789 ep->dl_pipe_id, ep->eid);
790
791 ath10k_dbg(ar, ATH10K_DBG_BOOT,
792 "boot htc ep %d ul polled %d dl polled %d\n",
793 ep->eid, ep->ul_is_polled, ep->dl_is_polled);
794
795 if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
796 ep->tx_credit_flow_enabled = false;
797 ath10k_dbg(ar, ATH10K_DBG_BOOT,
798 "boot htc service '%s' eid %d TX flow control disabled\n",
799 htc_service_name(ep->service_id), assigned_eid);
800 }
801
802 return status;
803 }
804
ath10k_htc_alloc_skb(struct ath10k * ar,int size)805 struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
806 {
807 struct sk_buff *skb;
808
809 skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
810 if (!skb)
811 return NULL;
812
813 skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
814
815 /* FW/HTC requires 4-byte aligned streams */
816 if (!IS_ALIGNED((unsigned long)skb->data, 4))
817 ath10k_warn(ar, "Unaligned HTC tx skb\n");
818
819 return skb;
820 }
821
ath10k_htc_start(struct ath10k_htc * htc)822 int ath10k_htc_start(struct ath10k_htc *htc)
823 {
824 struct ath10k *ar = htc->ar;
825 struct sk_buff *skb;
826 int status = 0;
827 struct ath10k_htc_msg *msg;
828
829 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
830 if (!skb)
831 return -ENOMEM;
832
833 skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
834 memset(skb->data, 0, skb->len);
835
836 msg = (struct ath10k_htc_msg *)skb->data;
837 msg->hdr.message_id =
838 __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
839
840 ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
841
842 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
843 if (status) {
844 kfree_skb(skb);
845 return status;
846 }
847
848 return 0;
849 }
850
851 /* registered target arrival callback from the HIF layer */
ath10k_htc_init(struct ath10k * ar)852 int ath10k_htc_init(struct ath10k *ar)
853 {
854 struct ath10k_hif_cb htc_callbacks;
855 struct ath10k_htc_ep *ep = NULL;
856 struct ath10k_htc *htc = &ar->htc;
857
858 spin_lock_init(&htc->tx_lock);
859
860 ath10k_htc_reset_endpoint_states(htc);
861
862 /* setup HIF layer callbacks */
863 htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
864 htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
865 htc->ar = ar;
866
867 /* Get HIF default pipe for HTC message exchange */
868 ep = &htc->endpoint[ATH10K_HTC_EP_0];
869
870 ath10k_hif_set_callbacks(ar, &htc_callbacks);
871 ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
872
873 init_completion(&htc->ctl_resp);
874
875 return 0;
876 }
877