1/*
2 * Linux network driver for QLogic BR-series Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
16 * All rights reserved
17 * www.qlogic.com
18 */
19
20/* MSGQ module source file. */
21
22#include "bfi.h"
23#include "bfa_msgq.h"
24#include "bfa_ioc.h"
25
26#define call_cmdq_ent_cbfn(_cmdq_ent, _status)				\
27{									\
28	bfa_msgq_cmdcbfn_t cbfn;					\
29	void *cbarg;							\
30	cbfn = (_cmdq_ent)->cbfn;					\
31	cbarg = (_cmdq_ent)->cbarg;					\
32	(_cmdq_ent)->cbfn = NULL;					\
33	(_cmdq_ent)->cbarg = NULL;					\
34	if (cbfn) {							\
35		cbfn(cbarg, (_status));					\
36	}								\
37}
38
39static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
40static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
41
42enum cmdq_event {
43	CMDQ_E_START			= 1,
44	CMDQ_E_STOP			= 2,
45	CMDQ_E_FAIL			= 3,
46	CMDQ_E_POST			= 4,
47	CMDQ_E_INIT_RESP		= 5,
48	CMDQ_E_DB_READY			= 6,
49};
50
51bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
52bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
53bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
54bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
55			enum cmdq_event);
56
57static void
58cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
59{
60	struct bfa_msgq_cmd_entry *cmdq_ent;
61
62	cmdq->producer_index = 0;
63	cmdq->consumer_index = 0;
64	cmdq->flags = 0;
65	cmdq->token = 0;
66	cmdq->offset = 0;
67	cmdq->bytes_to_copy = 0;
68	while (!list_empty(&cmdq->pending_q)) {
69		bfa_q_deq(&cmdq->pending_q, &cmdq_ent);
70		bfa_q_qe_init(&cmdq_ent->qe);
71		call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
72	}
73}
74
75static void
76cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
77{
78	switch (event) {
79	case CMDQ_E_START:
80		bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
81		break;
82
83	case CMDQ_E_STOP:
84	case CMDQ_E_FAIL:
85		/* No-op */
86		break;
87
88	case CMDQ_E_POST:
89		cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
90		break;
91
92	default:
93		bfa_sm_fault(event);
94	}
95}
96
97static void
98cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
99{
100	bfa_wc_down(&cmdq->msgq->init_wc);
101}
102
103static void
104cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
105{
106	switch (event) {
107	case CMDQ_E_STOP:
108	case CMDQ_E_FAIL:
109		bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
110		break;
111
112	case CMDQ_E_POST:
113		cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
114		break;
115
116	case CMDQ_E_INIT_RESP:
117		if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
118			cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
119			bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
120		} else
121			bfa_fsm_set_state(cmdq, cmdq_sm_ready);
122		break;
123
124	default:
125		bfa_sm_fault(event);
126	}
127}
128
129static void
130cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
131{
132}
133
134static void
135cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
136{
137	switch (event) {
138	case CMDQ_E_STOP:
139	case CMDQ_E_FAIL:
140		bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
141		break;
142
143	case CMDQ_E_POST:
144		bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
145		break;
146
147	default:
148		bfa_sm_fault(event);
149	}
150}
151
152static void
153cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
154{
155	bfa_msgq_cmdq_dbell(cmdq);
156}
157
158static void
159cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
160{
161	switch (event) {
162	case CMDQ_E_STOP:
163	case CMDQ_E_FAIL:
164		bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
165		break;
166
167	case CMDQ_E_POST:
168		cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
169		break;
170
171	case CMDQ_E_DB_READY:
172		if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
173			cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
174			bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
175		} else
176			bfa_fsm_set_state(cmdq, cmdq_sm_ready);
177		break;
178
179	default:
180		bfa_sm_fault(event);
181	}
182}
183
184static void
185bfa_msgq_cmdq_dbell_ready(void *arg)
186{
187	struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
188	bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
189}
190
191static void
192bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
193{
194	struct bfi_msgq_h2i_db *dbell =
195		(struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
196
197	memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
198	bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
199	dbell->mh.mtag.i2htok = 0;
200	dbell->idx.cmdq_pi = htons(cmdq->producer_index);
201
202	if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
203				bfa_msgq_cmdq_dbell_ready, cmdq)) {
204		bfa_msgq_cmdq_dbell_ready(cmdq);
205	}
206}
207
208static void
209__cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
210{
211	size_t len = cmd->msg_size;
212	int num_entries = 0;
213	size_t to_copy;
214	u8 *src, *dst;
215
216	src = (u8 *)cmd->msg_hdr;
217	dst = (u8 *)cmdq->addr.kva;
218	dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
219
220	while (len) {
221		to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ?
222				len : BFI_MSGQ_CMD_ENTRY_SIZE;
223		memcpy(dst, src, to_copy);
224		len -= to_copy;
225		src += BFI_MSGQ_CMD_ENTRY_SIZE;
226		BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
227		dst = (u8 *)cmdq->addr.kva;
228		dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
229		num_entries++;
230	}
231
232}
233
234static void
235bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
236{
237	struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
238	struct bfa_msgq_cmd_entry *cmd;
239	int posted = 0;
240
241	cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
242
243	/* Walk through pending list to see if the command can be posted */
244	while (!list_empty(&cmdq->pending_q)) {
245		cmd =
246		(struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q);
247		if (ntohs(cmd->msg_hdr->num_entries) <=
248			BFA_MSGQ_FREE_CNT(cmdq)) {
249			list_del(&cmd->qe);
250			__cmd_copy(cmdq, cmd);
251			posted = 1;
252			call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
253		} else {
254			break;
255		}
256	}
257
258	if (posted)
259		bfa_fsm_send_event(cmdq, CMDQ_E_POST);
260}
261
262static void
263bfa_msgq_cmdq_copy_next(void *arg)
264{
265	struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
266
267	if (cmdq->bytes_to_copy)
268		bfa_msgq_cmdq_copy_rsp(cmdq);
269}
270
271static void
272bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
273{
274	struct bfi_msgq_i2h_cmdq_copy_req *req =
275		(struct bfi_msgq_i2h_cmdq_copy_req *)mb;
276
277	cmdq->token = 0;
278	cmdq->offset = ntohs(req->offset);
279	cmdq->bytes_to_copy = ntohs(req->len);
280	bfa_msgq_cmdq_copy_rsp(cmdq);
281}
282
283static void
284bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
285{
286	struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
287		(struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
288	int copied;
289	u8 *addr = (u8 *)cmdq->addr.kva;
290
291	memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
292	bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
293	rsp->mh.mtag.i2htok = htons(cmdq->token);
294	copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
295		cmdq->bytes_to_copy;
296	addr += cmdq->offset;
297	memcpy(rsp->data, addr, copied);
298
299	cmdq->token++;
300	cmdq->offset += copied;
301	cmdq->bytes_to_copy -= copied;
302
303	if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
304				bfa_msgq_cmdq_copy_next, cmdq)) {
305		bfa_msgq_cmdq_copy_next(cmdq);
306	}
307}
308
309static void
310bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
311{
312	cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
313	INIT_LIST_HEAD(&cmdq->pending_q);
314	cmdq->msgq = msgq;
315	bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
316}
317
318static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
319
320enum rspq_event {
321	RSPQ_E_START			= 1,
322	RSPQ_E_STOP			= 2,
323	RSPQ_E_FAIL			= 3,
324	RSPQ_E_RESP			= 4,
325	RSPQ_E_INIT_RESP		= 5,
326	RSPQ_E_DB_READY			= 6,
327};
328
329bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
330bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
331			enum rspq_event);
332bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
333bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
334			enum rspq_event);
335
336static void
337rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq)
338{
339	rspq->producer_index = 0;
340	rspq->consumer_index = 0;
341	rspq->flags = 0;
342}
343
344static void
345rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event)
346{
347	switch (event) {
348	case RSPQ_E_START:
349		bfa_fsm_set_state(rspq, rspq_sm_init_wait);
350		break;
351
352	case RSPQ_E_STOP:
353	case RSPQ_E_FAIL:
354		/* No-op */
355		break;
356
357	default:
358		bfa_sm_fault(event);
359	}
360}
361
362static void
363rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq)
364{
365	bfa_wc_down(&rspq->msgq->init_wc);
366}
367
368static void
369rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
370{
371	switch (event) {
372	case RSPQ_E_FAIL:
373	case RSPQ_E_STOP:
374		bfa_fsm_set_state(rspq, rspq_sm_stopped);
375		break;
376
377	case RSPQ_E_INIT_RESP:
378		bfa_fsm_set_state(rspq, rspq_sm_ready);
379		break;
380
381	default:
382		bfa_sm_fault(event);
383	}
384}
385
386static void
387rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq)
388{
389}
390
391static void
392rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event)
393{
394	switch (event) {
395	case RSPQ_E_STOP:
396	case RSPQ_E_FAIL:
397		bfa_fsm_set_state(rspq, rspq_sm_stopped);
398		break;
399
400	case RSPQ_E_RESP:
401		bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
402		break;
403
404	default:
405		bfa_sm_fault(event);
406	}
407}
408
409static void
410rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq)
411{
412	if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc))
413		bfa_msgq_rspq_dbell(rspq);
414}
415
416static void
417rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
418{
419	switch (event) {
420	case RSPQ_E_STOP:
421	case RSPQ_E_FAIL:
422		bfa_fsm_set_state(rspq, rspq_sm_stopped);
423		break;
424
425	case RSPQ_E_RESP:
426		rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE;
427		break;
428
429	case RSPQ_E_DB_READY:
430		if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) {
431			rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE;
432			bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
433		} else
434			bfa_fsm_set_state(rspq, rspq_sm_ready);
435		break;
436
437	default:
438		bfa_sm_fault(event);
439	}
440}
441
442static void
443bfa_msgq_rspq_dbell_ready(void *arg)
444{
445	struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg;
446	bfa_fsm_send_event(rspq, RSPQ_E_DB_READY);
447}
448
449static void
450bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
451{
452	struct bfi_msgq_h2i_db *dbell =
453		(struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);
454
455	memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
456	bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
457	dbell->mh.mtag.i2htok = 0;
458	dbell->idx.rspq_ci = htons(rspq->consumer_index);
459
460	if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
461				bfa_msgq_rspq_dbell_ready, rspq)) {
462		bfa_msgq_rspq_dbell_ready(rspq);
463	}
464}
465
466static void
467bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb)
468{
469	struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
470	struct bfi_msgq_mhdr *msghdr;
471	int num_entries;
472	int mc;
473	u8 *rspq_qe;
474
475	rspq->producer_index = ntohs(dbell->idx.rspq_pi);
476
477	while (rspq->consumer_index != rspq->producer_index) {
478		rspq_qe = (u8 *)rspq->addr.kva;
479		rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE);
480		msghdr = (struct bfi_msgq_mhdr *)rspq_qe;
481
482		mc = msghdr->msg_class;
483		num_entries = ntohs(msghdr->num_entries);
484
485		if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL))
486			break;
487
488		(rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr);
489
490		BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries,
491				rspq->depth);
492	}
493
494	bfa_fsm_send_event(rspq, RSPQ_E_RESP);
495}
496
497static void
498bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq)
499{
500	rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY;
501	rspq->msgq = msgq;
502	bfa_fsm_set_state(rspq, rspq_sm_stopped);
503}
504
505static void
506bfa_msgq_init_rsp(struct bfa_msgq *msgq,
507		 struct bfi_mbmsg *mb)
508{
509	bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
510	bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP);
511}
512
513static void
514bfa_msgq_init(void *arg)
515{
516	struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
517	struct bfi_msgq_cfg_req *msgq_cfg =
518		(struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];
519
520	memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
521	bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
522	msgq_cfg->mh.mtag.i2htok = 0;
523
524	bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
525	msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
526	bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
527	msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
528
529	bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
530}
531
532static void
533bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg)
534{
535	struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
536
537	switch (msg->mh.msg_id) {
538	case BFI_MSGQ_I2H_INIT_RSP:
539		bfa_msgq_init_rsp(msgq, msg);
540		break;
541
542	case BFI_MSGQ_I2H_DOORBELL_PI:
543		bfa_msgq_rspq_pi_update(&msgq->rspq, msg);
544		break;
545
546	case BFI_MSGQ_I2H_DOORBELL_CI:
547		bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
548		break;
549
550	case BFI_MSGQ_I2H_CMDQ_COPY_REQ:
551		bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
552		break;
553
554	default:
555		BUG_ON(1);
556	}
557}
558
559static void
560bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event)
561{
562	struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
563
564	switch (event) {
565	case BFA_IOC_E_ENABLED:
566		bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq);
567		bfa_wc_up(&msgq->init_wc);
568		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
569		bfa_wc_up(&msgq->init_wc);
570		bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START);
571		bfa_wc_wait(&msgq->init_wc);
572		break;
573
574	case BFA_IOC_E_DISABLED:
575		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
576		bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP);
577		break;
578
579	case BFA_IOC_E_FAILED:
580		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
581		bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL);
582		break;
583
584	default:
585		break;
586	}
587}
588
589u32
590bfa_msgq_meminfo(void)
591{
592	return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) +
593		roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ);
594}
595
596void
597bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa)
598{
599	msgq->cmdq.addr.kva = kva;
600	msgq->cmdq.addr.pa  = pa;
601
602	kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
603	pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
604
605	msgq->rspq.addr.kva = kva;
606	msgq->rspq.addr.pa = pa;
607}
608
609void
610bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
611{
612	msgq->ioc    = ioc;
613
614	bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
615	bfa_msgq_rspq_attach(&msgq->rspq, msgq);
616
617	bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
618	bfa_q_qe_init(&msgq->ioc_notify);
619	bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
620	bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
621}
622
623void
624bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
625		bfa_msgq_mcfunc_t cbfn, void *cbarg)
626{
627	msgq->rspq.rsphdlr[mc].cbfn	= cbfn;
628	msgq->rspq.rsphdlr[mc].cbarg	= cbarg;
629}
630
631void
632bfa_msgq_cmd_post(struct bfa_msgq *msgq,  struct bfa_msgq_cmd_entry *cmd)
633{
634	if (ntohs(cmd->msg_hdr->num_entries) <=
635		BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
636		__cmd_copy(&msgq->cmdq, cmd);
637		call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
638		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
639	} else {
640		list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
641	}
642}
643
644void
645bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len)
646{
647	struct bfa_msgq_rspq *rspq = &msgq->rspq;
648	size_t len = buf_len;
649	size_t to_copy;
650	int ci;
651	u8 *src, *dst;
652
653	ci = rspq->consumer_index;
654	src = (u8 *)rspq->addr.kva;
655	src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
656	dst = buf;
657
658	while (len) {
659		to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ?
660				len : BFI_MSGQ_RSP_ENTRY_SIZE;
661		memcpy(dst, src, to_copy);
662		len -= to_copy;
663		dst += BFI_MSGQ_RSP_ENTRY_SIZE;
664		BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth);
665		src = (u8 *)rspq->addr.kva;
666		src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
667	}
668}
669