This source file includes following definitions.
- cmdq_sm_stopped_entry
- cmdq_sm_stopped
- cmdq_sm_init_wait_entry
- cmdq_sm_init_wait
- cmdq_sm_ready_entry
- cmdq_sm_ready
- cmdq_sm_dbell_wait_entry
- cmdq_sm_dbell_wait
- bfa_msgq_cmdq_dbell_ready
- bfa_msgq_cmdq_dbell
- __cmd_copy
- bfa_msgq_cmdq_ci_update
- bfa_msgq_cmdq_copy_next
- bfa_msgq_cmdq_copy_req
- bfa_msgq_cmdq_copy_rsp
- bfa_msgq_cmdq_attach
- rspq_sm_stopped_entry
- rspq_sm_stopped
- rspq_sm_init_wait_entry
- rspq_sm_init_wait
- rspq_sm_ready_entry
- rspq_sm_ready
- rspq_sm_dbell_wait_entry
- rspq_sm_dbell_wait
- bfa_msgq_rspq_dbell_ready
- bfa_msgq_rspq_dbell
- bfa_msgq_rspq_pi_update
- bfa_msgq_rspq_attach
- bfa_msgq_init_rsp
- bfa_msgq_init
- bfa_msgq_isr
- bfa_msgq_notify
- bfa_msgq_meminfo
- bfa_msgq_memclaim
- bfa_msgq_attach
- bfa_msgq_regisr
- bfa_msgq_cmd_post
- bfa_msgq_rsp_copy
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include "bfi.h"
15 #include "bfa_msgq.h"
16 #include "bfa_ioc.h"
17
18 #define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
19 { \
20 bfa_msgq_cmdcbfn_t cbfn; \
21 void *cbarg; \
22 cbfn = (_cmdq_ent)->cbfn; \
23 cbarg = (_cmdq_ent)->cbarg; \
24 (_cmdq_ent)->cbfn = NULL; \
25 (_cmdq_ent)->cbarg = NULL; \
26 if (cbfn) { \
27 cbfn(cbarg, (_status)); \
28 } \
29 }
30
31 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
32 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
33
34 enum cmdq_event {
35 CMDQ_E_START = 1,
36 CMDQ_E_STOP = 2,
37 CMDQ_E_FAIL = 3,
38 CMDQ_E_POST = 4,
39 CMDQ_E_INIT_RESP = 5,
40 CMDQ_E_DB_READY = 6,
41 };
42
43 bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
44 bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
45 bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
46 bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
47 enum cmdq_event);
48
49 static void
50 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
51 {
52 struct bfa_msgq_cmd_entry *cmdq_ent;
53
54 cmdq->producer_index = 0;
55 cmdq->consumer_index = 0;
56 cmdq->flags = 0;
57 cmdq->token = 0;
58 cmdq->offset = 0;
59 cmdq->bytes_to_copy = 0;
60 while (!list_empty(&cmdq->pending_q)) {
61 cmdq_ent = list_first_entry(&cmdq->pending_q,
62 struct bfa_msgq_cmd_entry, qe);
63 list_del(&cmdq_ent->qe);
64 call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
65 }
66 }
67
68 static void
69 cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
70 {
71 switch (event) {
72 case CMDQ_E_START:
73 bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
74 break;
75
76 case CMDQ_E_STOP:
77 case CMDQ_E_FAIL:
78
79 break;
80
81 case CMDQ_E_POST:
82 cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
83 break;
84
85 default:
86 bfa_sm_fault(event);
87 }
88 }
89
90 static void
91 cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
92 {
93 bfa_wc_down(&cmdq->msgq->init_wc);
94 }
95
96 static void
97 cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
98 {
99 switch (event) {
100 case CMDQ_E_STOP:
101 case CMDQ_E_FAIL:
102 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
103 break;
104
105 case CMDQ_E_POST:
106 cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
107 break;
108
109 case CMDQ_E_INIT_RESP:
110 if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
111 cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
112 bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
113 } else
114 bfa_fsm_set_state(cmdq, cmdq_sm_ready);
115 break;
116
117 default:
118 bfa_sm_fault(event);
119 }
120 }
121
122 static void
123 cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
124 {
125 }
126
127 static void
128 cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
129 {
130 switch (event) {
131 case CMDQ_E_STOP:
132 case CMDQ_E_FAIL:
133 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
134 break;
135
136 case CMDQ_E_POST:
137 bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
138 break;
139
140 default:
141 bfa_sm_fault(event);
142 }
143 }
144
145 static void
146 cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
147 {
148 bfa_msgq_cmdq_dbell(cmdq);
149 }
150
151 static void
152 cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
153 {
154 switch (event) {
155 case CMDQ_E_STOP:
156 case CMDQ_E_FAIL:
157 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
158 break;
159
160 case CMDQ_E_POST:
161 cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
162 break;
163
164 case CMDQ_E_DB_READY:
165 if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
166 cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
167 bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
168 } else
169 bfa_fsm_set_state(cmdq, cmdq_sm_ready);
170 break;
171
172 default:
173 bfa_sm_fault(event);
174 }
175 }
176
177 static void
178 bfa_msgq_cmdq_dbell_ready(void *arg)
179 {
180 struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
181 bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
182 }
183
184 static void
185 bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
186 {
187 struct bfi_msgq_h2i_db *dbell =
188 (struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
189
190 memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
191 bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
192 dbell->mh.mtag.i2htok = 0;
193 dbell->idx.cmdq_pi = htons(cmdq->producer_index);
194
195 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
196 bfa_msgq_cmdq_dbell_ready, cmdq)) {
197 bfa_msgq_cmdq_dbell_ready(cmdq);
198 }
199 }
200
201 static void
202 __cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
203 {
204 size_t len = cmd->msg_size;
205 int num_entries = 0;
206 size_t to_copy;
207 u8 *src, *dst;
208
209 src = (u8 *)cmd->msg_hdr;
210 dst = (u8 *)cmdq->addr.kva;
211 dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
212
213 while (len) {
214 to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ?
215 len : BFI_MSGQ_CMD_ENTRY_SIZE;
216 memcpy(dst, src, to_copy);
217 len -= to_copy;
218 src += BFI_MSGQ_CMD_ENTRY_SIZE;
219 BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
220 dst = (u8 *)cmdq->addr.kva;
221 dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
222 num_entries++;
223 }
224
225 }
226
227 static void
228 bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
229 {
230 struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
231 struct bfa_msgq_cmd_entry *cmd;
232 int posted = 0;
233
234 cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
235
236
237 while (!list_empty(&cmdq->pending_q)) {
238 cmd = list_first_entry(&cmdq->pending_q,
239 struct bfa_msgq_cmd_entry, qe);
240 if (ntohs(cmd->msg_hdr->num_entries) <=
241 BFA_MSGQ_FREE_CNT(cmdq)) {
242 list_del(&cmd->qe);
243 __cmd_copy(cmdq, cmd);
244 posted = 1;
245 call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
246 } else {
247 break;
248 }
249 }
250
251 if (posted)
252 bfa_fsm_send_event(cmdq, CMDQ_E_POST);
253 }
254
255 static void
256 bfa_msgq_cmdq_copy_next(void *arg)
257 {
258 struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
259
260 if (cmdq->bytes_to_copy)
261 bfa_msgq_cmdq_copy_rsp(cmdq);
262 }
263
264 static void
265 bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
266 {
267 struct bfi_msgq_i2h_cmdq_copy_req *req =
268 (struct bfi_msgq_i2h_cmdq_copy_req *)mb;
269
270 cmdq->token = 0;
271 cmdq->offset = ntohs(req->offset);
272 cmdq->bytes_to_copy = ntohs(req->len);
273 bfa_msgq_cmdq_copy_rsp(cmdq);
274 }
275
276 static void
277 bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
278 {
279 struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
280 (struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
281 int copied;
282 u8 *addr = (u8 *)cmdq->addr.kva;
283
284 memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
285 bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
286 rsp->mh.mtag.i2htok = htons(cmdq->token);
287 copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
288 cmdq->bytes_to_copy;
289 addr += cmdq->offset;
290 memcpy(rsp->data, addr, copied);
291
292 cmdq->token++;
293 cmdq->offset += copied;
294 cmdq->bytes_to_copy -= copied;
295
296 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
297 bfa_msgq_cmdq_copy_next, cmdq)) {
298 bfa_msgq_cmdq_copy_next(cmdq);
299 }
300 }
301
302 static void
303 bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
304 {
305 cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
306 INIT_LIST_HEAD(&cmdq->pending_q);
307 cmdq->msgq = msgq;
308 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
309 }
310
311 static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
312
313 enum rspq_event {
314 RSPQ_E_START = 1,
315 RSPQ_E_STOP = 2,
316 RSPQ_E_FAIL = 3,
317 RSPQ_E_RESP = 4,
318 RSPQ_E_INIT_RESP = 5,
319 RSPQ_E_DB_READY = 6,
320 };
321
322 bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
323 bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
324 enum rspq_event);
325 bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
326 bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
327 enum rspq_event);
328
329 static void
330 rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq)
331 {
332 rspq->producer_index = 0;
333 rspq->consumer_index = 0;
334 rspq->flags = 0;
335 }
336
337 static void
338 rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event)
339 {
340 switch (event) {
341 case RSPQ_E_START:
342 bfa_fsm_set_state(rspq, rspq_sm_init_wait);
343 break;
344
345 case RSPQ_E_STOP:
346 case RSPQ_E_FAIL:
347
348 break;
349
350 default:
351 bfa_sm_fault(event);
352 }
353 }
354
355 static void
356 rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq)
357 {
358 bfa_wc_down(&rspq->msgq->init_wc);
359 }
360
361 static void
362 rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
363 {
364 switch (event) {
365 case RSPQ_E_FAIL:
366 case RSPQ_E_STOP:
367 bfa_fsm_set_state(rspq, rspq_sm_stopped);
368 break;
369
370 case RSPQ_E_INIT_RESP:
371 bfa_fsm_set_state(rspq, rspq_sm_ready);
372 break;
373
374 default:
375 bfa_sm_fault(event);
376 }
377 }
378
379 static void
380 rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq)
381 {
382 }
383
384 static void
385 rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event)
386 {
387 switch (event) {
388 case RSPQ_E_STOP:
389 case RSPQ_E_FAIL:
390 bfa_fsm_set_state(rspq, rspq_sm_stopped);
391 break;
392
393 case RSPQ_E_RESP:
394 bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
395 break;
396
397 default:
398 bfa_sm_fault(event);
399 }
400 }
401
402 static void
403 rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq)
404 {
405 if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc))
406 bfa_msgq_rspq_dbell(rspq);
407 }
408
409 static void
410 rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
411 {
412 switch (event) {
413 case RSPQ_E_STOP:
414 case RSPQ_E_FAIL:
415 bfa_fsm_set_state(rspq, rspq_sm_stopped);
416 break;
417
418 case RSPQ_E_RESP:
419 rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE;
420 break;
421
422 case RSPQ_E_DB_READY:
423 if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) {
424 rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE;
425 bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
426 } else
427 bfa_fsm_set_state(rspq, rspq_sm_ready);
428 break;
429
430 default:
431 bfa_sm_fault(event);
432 }
433 }
434
435 static void
436 bfa_msgq_rspq_dbell_ready(void *arg)
437 {
438 struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg;
439 bfa_fsm_send_event(rspq, RSPQ_E_DB_READY);
440 }
441
442 static void
443 bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
444 {
445 struct bfi_msgq_h2i_db *dbell =
446 (struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);
447
448 memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
449 bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
450 dbell->mh.mtag.i2htok = 0;
451 dbell->idx.rspq_ci = htons(rspq->consumer_index);
452
453 if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
454 bfa_msgq_rspq_dbell_ready, rspq)) {
455 bfa_msgq_rspq_dbell_ready(rspq);
456 }
457 }
458
459 static void
460 bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb)
461 {
462 struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
463 struct bfi_msgq_mhdr *msghdr;
464 int num_entries;
465 int mc;
466 u8 *rspq_qe;
467
468 rspq->producer_index = ntohs(dbell->idx.rspq_pi);
469
470 while (rspq->consumer_index != rspq->producer_index) {
471 rspq_qe = (u8 *)rspq->addr.kva;
472 rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE);
473 msghdr = (struct bfi_msgq_mhdr *)rspq_qe;
474
475 mc = msghdr->msg_class;
476 num_entries = ntohs(msghdr->num_entries);
477
478 if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL))
479 break;
480
481 (rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr);
482
483 BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries,
484 rspq->depth);
485 }
486
487 bfa_fsm_send_event(rspq, RSPQ_E_RESP);
488 }
489
490 static void
491 bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq)
492 {
493 rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY;
494 rspq->msgq = msgq;
495 bfa_fsm_set_state(rspq, rspq_sm_stopped);
496 }
497
498 static void
499 bfa_msgq_init_rsp(struct bfa_msgq *msgq,
500 struct bfi_mbmsg *mb)
501 {
502 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
503 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP);
504 }
505
506 static void
507 bfa_msgq_init(void *arg)
508 {
509 struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
510 struct bfi_msgq_cfg_req *msgq_cfg =
511 (struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];
512
513 memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
514 bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
515 msgq_cfg->mh.mtag.i2htok = 0;
516
517 bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
518 msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
519 bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
520 msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
521
522 bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
523 }
524
525 static void
526 bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg)
527 {
528 struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
529
530 switch (msg->mh.msg_id) {
531 case BFI_MSGQ_I2H_INIT_RSP:
532 bfa_msgq_init_rsp(msgq, msg);
533 break;
534
535 case BFI_MSGQ_I2H_DOORBELL_PI:
536 bfa_msgq_rspq_pi_update(&msgq->rspq, msg);
537 break;
538
539 case BFI_MSGQ_I2H_DOORBELL_CI:
540 bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
541 break;
542
543 case BFI_MSGQ_I2H_CMDQ_COPY_REQ:
544 bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
545 break;
546
547 default:
548 BUG_ON(1);
549 }
550 }
551
552 static void
553 bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event)
554 {
555 struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
556
557 switch (event) {
558 case BFA_IOC_E_ENABLED:
559 bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq);
560 bfa_wc_up(&msgq->init_wc);
561 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
562 bfa_wc_up(&msgq->init_wc);
563 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START);
564 bfa_wc_wait(&msgq->init_wc);
565 break;
566
567 case BFA_IOC_E_DISABLED:
568 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
569 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP);
570 break;
571
572 case BFA_IOC_E_FAILED:
573 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
574 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL);
575 break;
576
577 default:
578 break;
579 }
580 }
581
582 u32
583 bfa_msgq_meminfo(void)
584 {
585 return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) +
586 roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ);
587 }
588
589 void
590 bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa)
591 {
592 msgq->cmdq.addr.kva = kva;
593 msgq->cmdq.addr.pa = pa;
594
595 kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
596 pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
597
598 msgq->rspq.addr.kva = kva;
599 msgq->rspq.addr.pa = pa;
600 }
601
602 void
603 bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
604 {
605 msgq->ioc = ioc;
606
607 bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
608 bfa_msgq_rspq_attach(&msgq->rspq, msgq);
609
610 bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
611 bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
612 bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
613 }
614
615 void
616 bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
617 bfa_msgq_mcfunc_t cbfn, void *cbarg)
618 {
619 msgq->rspq.rsphdlr[mc].cbfn = cbfn;
620 msgq->rspq.rsphdlr[mc].cbarg = cbarg;
621 }
622
623 void
624 bfa_msgq_cmd_post(struct bfa_msgq *msgq, struct bfa_msgq_cmd_entry *cmd)
625 {
626 if (ntohs(cmd->msg_hdr->num_entries) <=
627 BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
628 __cmd_copy(&msgq->cmdq, cmd);
629 call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
630 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
631 } else {
632 list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
633 }
634 }
635
636 void
637 bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len)
638 {
639 struct bfa_msgq_rspq *rspq = &msgq->rspq;
640 size_t len = buf_len;
641 size_t to_copy;
642 int ci;
643 u8 *src, *dst;
644
645 ci = rspq->consumer_index;
646 src = (u8 *)rspq->addr.kva;
647 src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
648 dst = buf;
649
650 while (len) {
651 to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ?
652 len : BFI_MSGQ_RSP_ENTRY_SIZE;
653 memcpy(dst, src, to_copy);
654 len -= to_copy;
655 dst += BFI_MSGQ_RSP_ENTRY_SIZE;
656 BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth);
657 src = (u8 *)rspq->addr.kva;
658 src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
659 }
660 }