1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include <linux/io.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/list.h>
17#include <linux/pci.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/string.h>
21#include "qed.h"
22#include "qed_cxt.h"
23#include "qed_dev_api.h"
24#include "qed_hsi.h"
25#include "qed_hw.h"
26#include "qed_int.h"
27#include "qed_mcp.h"
28#include "qed_reg_addr.h"
29#include "qed_sp.h"
30
31/***************************************************************************
32* Structures & Definitions
33***************************************************************************/
34
35#define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
36#define SPQ_BLOCK_SLEEP_LENGTH          (1000)
37
38/***************************************************************************
39* Blocking Imp. (BLOCK/EBLOCK mode)
40***************************************************************************/
41static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
42				void *cookie,
43				union event_ring_data *data,
44				u8 fw_return_code)
45{
46	struct qed_spq_comp_done *comp_done;
47
48	comp_done = (struct qed_spq_comp_done *)cookie;
49
50	comp_done->done			= 0x1;
51	comp_done->fw_return_code	= fw_return_code;
52
53	/* make update visible to waiting thread */
54	smp_wmb();
55}
56
57static int qed_spq_block(struct qed_hwfn *p_hwfn,
58			 struct qed_spq_entry *p_ent,
59			 u8 *p_fw_ret)
60{
61	int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
62	struct qed_spq_comp_done *comp_done;
63	int rc;
64
65	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
66	while (sleep_count) {
67		/* validate we receive completion update */
68		smp_rmb();
69		if (comp_done->done == 1) {
70			if (p_fw_ret)
71				*p_fw_ret = comp_done->fw_return_code;
72			return 0;
73		}
74		usleep_range(5000, 10000);
75		sleep_count--;
76	}
77
78	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
79	rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
80	if (rc != 0)
81		DP_NOTICE(p_hwfn, "MCP drain failed\n");
82
83	/* Retry after drain */
84	sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
85	while (sleep_count) {
86		/* validate we receive completion update */
87		smp_rmb();
88		if (comp_done->done == 1) {
89			if (p_fw_ret)
90				*p_fw_ret = comp_done->fw_return_code;
91			return 0;
92		}
93		usleep_range(5000, 10000);
94		sleep_count--;
95	}
96
97	if (comp_done->done == 1) {
98		if (p_fw_ret)
99			*p_fw_ret = comp_done->fw_return_code;
100		return 0;
101	}
102
103	DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
104
105	return -EBUSY;
106}
107
108/***************************************************************************
109* SPQ entries inner API
110***************************************************************************/
111static int
112qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
113		   struct qed_spq_entry *p_ent)
114{
115	p_ent->flags = 0;
116
117	switch (p_ent->comp_mode) {
118	case QED_SPQ_MODE_EBLOCK:
119	case QED_SPQ_MODE_BLOCK:
120		p_ent->comp_cb.function = qed_spq_blocking_cb;
121		break;
122	case QED_SPQ_MODE_CB:
123		break;
124	default:
125		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
126			  p_ent->comp_mode);
127		return -EINVAL;
128	}
129
130	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
131		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
132		   p_ent->elem.hdr.cid,
133		   p_ent->elem.hdr.cmd_id,
134		   p_ent->elem.hdr.protocol_id,
135		   p_ent->elem.data_ptr.hi,
136		   p_ent->elem.data_ptr.lo,
137		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
138			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
139			   "MODE_CB"));
140
141	return 0;
142}
143
144/***************************************************************************
145* HSI access
146***************************************************************************/
147static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
148				  struct qed_spq *p_spq)
149{
150	u16				pq;
151	struct qed_cxt_info		cxt_info;
152	struct core_conn_context	*p_cxt;
153	union qed_qm_pq_params		pq_params;
154	int				rc;
155
156	cxt_info.iid = p_spq->cid;
157
158	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
159
160	if (rc < 0) {
161		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
162			  p_spq->cid);
163		return;
164	}
165
166	p_cxt = cxt_info.p_cxt;
167
168	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
169		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
170	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
171		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
172	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
173		  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
174
175	/* QM physical queue */
176	memset(&pq_params, 0, sizeof(pq_params));
177	pq_params.core.tc = LB_TC;
178	pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
179	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
180
181	p_cxt->xstorm_st_context.spq_base_lo =
182		DMA_LO_LE(p_spq->chain.p_phys_addr);
183	p_cxt->xstorm_st_context.spq_base_hi =
184		DMA_HI_LE(p_spq->chain.p_phys_addr);
185
186	p_cxt->xstorm_st_context.consolid_base_addr.lo =
187		DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
188	p_cxt->xstorm_st_context.consolid_base_addr.hi =
189		DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
190}
191
192static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
193			   struct qed_spq *p_spq,
194			   struct qed_spq_entry *p_ent)
195{
196	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
197	u16 echo = qed_chain_get_prod_idx(p_chain);
198	struct slow_path_element	*elem;
199	struct core_db_data		db;
200
201	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
202	elem = qed_chain_produce(p_chain);
203	if (!elem) {
204		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
205		return -EINVAL;
206	}
207
208	*elem = p_ent->elem; /* struct assignment */
209
210	/* send a doorbell on the slow hwfn session */
211	memset(&db, 0, sizeof(db));
212	SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
213	SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
214	SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
215		  DQ_XCM_CORE_SPQ_PROD_CMD);
216	db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
217
218	/* validate producer is up to-date */
219	rmb();
220
221	db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
222
223	/* do not reorder */
224	barrier();
225
226	DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
227
228	/* make sure doorbell is rang */
229	mmiowb();
230
231	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
232		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
233		   qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
234		   p_spq->cid, db.params, db.agg_flags,
235		   qed_chain_get_prod_idx(p_chain));
236
237	return 0;
238}
239
240/***************************************************************************
241* Asynchronous events
242***************************************************************************/
243static int
244qed_async_event_completion(struct qed_hwfn *p_hwfn,
245			   struct event_ring_entry *p_eqe)
246{
247	DP_NOTICE(p_hwfn,
248		  "Unknown Async completion for protocol: %d\n",
249		   p_eqe->protocol_id);
250	return -EINVAL;
251}
252
253/***************************************************************************
254* EQ API
255***************************************************************************/
256void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
257			u16 prod)
258{
259	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
260		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
261
262	REG_WR16(p_hwfn, addr, prod);
263
264	/* keep prod updates ordered */
265	mmiowb();
266}
267
268int qed_eq_completion(struct qed_hwfn *p_hwfn,
269		      void *cookie)
270
271{
272	struct qed_eq *p_eq = cookie;
273	struct qed_chain *p_chain = &p_eq->chain;
274	int rc = 0;
275
276	/* take a snapshot of the FW consumer */
277	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
278
279	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
280
281	/* Need to guarantee the fw_cons index we use points to a usuable
282	 * element (to comply with our chain), so our macros would comply
283	 */
284	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
285	    qed_chain_get_usable_per_page(p_chain))
286		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
287
288	/* Complete current segment of eq entries */
289	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
290		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
291
292		if (!p_eqe) {
293			rc = -EINVAL;
294			break;
295		}
296
297		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
298			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
299			   p_eqe->opcode,
300			   p_eqe->protocol_id,
301			   p_eqe->reserved0,
302			   le16_to_cpu(p_eqe->echo),
303			   p_eqe->fw_return_code,
304			   p_eqe->flags);
305
306		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
307			if (qed_async_event_completion(p_hwfn, p_eqe))
308				rc = -EINVAL;
309		} else if (qed_spq_completion(p_hwfn,
310					      p_eqe->echo,
311					      p_eqe->fw_return_code,
312					      &p_eqe->data)) {
313			rc = -EINVAL;
314		}
315
316		qed_chain_recycle_consumed(p_chain);
317	}
318
319	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
320
321	return rc;
322}
323
324struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
325			    u16 num_elem)
326{
327	struct qed_eq *p_eq;
328
329	/* Allocate EQ struct */
330	p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC);
331	if (!p_eq) {
332		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
333		return NULL;
334	}
335
336	/* Allocate and initialize EQ chain*/
337	if (qed_chain_alloc(p_hwfn->cdev,
338			    QED_CHAIN_USE_TO_PRODUCE,
339			    QED_CHAIN_MODE_PBL,
340			    num_elem,
341			    sizeof(union event_ring_element),
342			    &p_eq->chain)) {
343		DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
344		goto eq_allocate_fail;
345	}
346
347	/* register EQ completion on the SP SB */
348	qed_int_register_cb(p_hwfn,
349			    qed_eq_completion,
350			    p_eq,
351			    &p_eq->eq_sb_index,
352			    &p_eq->p_fw_cons);
353
354	return p_eq;
355
356eq_allocate_fail:
357	qed_eq_free(p_hwfn, p_eq);
358	return NULL;
359}
360
361void qed_eq_setup(struct qed_hwfn *p_hwfn,
362		  struct qed_eq *p_eq)
363{
364	qed_chain_reset(&p_eq->chain);
365}
366
367void qed_eq_free(struct qed_hwfn *p_hwfn,
368		 struct qed_eq *p_eq)
369{
370	if (!p_eq)
371		return;
372	qed_chain_free(p_hwfn->cdev, &p_eq->chain);
373	kfree(p_eq);
374}
375
376/***************************************************************************
377* CQE API - manipulate EQ functionality
378***************************************************************************/
379static int qed_cqe_completion(
380	struct qed_hwfn *p_hwfn,
381	struct eth_slow_path_rx_cqe *cqe,
382	enum protocol_type protocol)
383{
384	/* @@@tmp - it's possible we'll eventually want to handle some
385	 * actual commands that can arrive here, but for now this is only
386	 * used to complete the ramrod using the echo value on the cqe
387	 */
388	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
389}
390
391int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
392			   struct eth_slow_path_rx_cqe *cqe)
393{
394	int rc;
395
396	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
397	if (rc)
398		DP_NOTICE(p_hwfn,
399			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
400			  cqe->ramrod_cmd_id);
401
402	return rc;
403}
404
405/***************************************************************************
406* Slow hwfn Queue (spq)
407***************************************************************************/
408void qed_spq_setup(struct qed_hwfn *p_hwfn)
409{
410	struct qed_spq		*p_spq	= p_hwfn->p_spq;
411	struct qed_spq_entry	*p_virt = NULL;
412	dma_addr_t		p_phys	= 0;
413	unsigned int		i	= 0;
414
415	INIT_LIST_HEAD(&p_spq->pending);
416	INIT_LIST_HEAD(&p_spq->completion_pending);
417	INIT_LIST_HEAD(&p_spq->free_pool);
418	INIT_LIST_HEAD(&p_spq->unlimited_pending);
419	spin_lock_init(&p_spq->lock);
420
421	/* SPQ empty pool */
422	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
423	p_virt	= p_spq->p_virt;
424
425	for (i = 0; i < p_spq->chain.capacity; i++) {
426		p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
427		p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
428
429		list_add_tail(&p_virt->list, &p_spq->free_pool);
430
431		p_virt++;
432		p_phys += sizeof(struct qed_spq_entry);
433	}
434
435	/* Statistics */
436	p_spq->normal_count		= 0;
437	p_spq->comp_count		= 0;
438	p_spq->comp_sent_count		= 0;
439	p_spq->unlimited_pending_count	= 0;
440
441	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
442	p_spq->comp_bitmap_idx = 0;
443
444	/* SPQ cid, cannot fail */
445	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
446	qed_spq_hw_initialize(p_hwfn, p_spq);
447
448	/* reset the chain itself */
449	qed_chain_reset(&p_spq->chain);
450}
451
452int qed_spq_alloc(struct qed_hwfn *p_hwfn)
453{
454	struct qed_spq		*p_spq	= NULL;
455	dma_addr_t		p_phys	= 0;
456	struct qed_spq_entry	*p_virt = NULL;
457
458	/* SPQ struct */
459	p_spq =
460		kzalloc(sizeof(struct qed_spq), GFP_ATOMIC);
461	if (!p_spq) {
462		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
463		return -ENOMEM;
464	}
465
466	/* SPQ ring  */
467	if (qed_chain_alloc(p_hwfn->cdev,
468			    QED_CHAIN_USE_TO_PRODUCE,
469			    QED_CHAIN_MODE_SINGLE,
470			    0,   /* N/A when the mode is SINGLE */
471			    sizeof(struct slow_path_element),
472			    &p_spq->chain)) {
473		DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
474		goto spq_allocate_fail;
475	}
476
477	/* allocate and fill the SPQ elements (incl. ramrod data list) */
478	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
479				    p_spq->chain.capacity *
480				    sizeof(struct qed_spq_entry),
481				    &p_phys,
482				    GFP_KERNEL);
483
484	if (!p_virt)
485		goto spq_allocate_fail;
486
487	p_spq->p_virt = p_virt;
488	p_spq->p_phys = p_phys;
489	p_hwfn->p_spq = p_spq;
490
491	return 0;
492
493spq_allocate_fail:
494	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
495	kfree(p_spq);
496	return -ENOMEM;
497}
498
499void qed_spq_free(struct qed_hwfn *p_hwfn)
500{
501	struct qed_spq *p_spq = p_hwfn->p_spq;
502
503	if (!p_spq)
504		return;
505
506	if (p_spq->p_virt)
507		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
508				  p_spq->chain.capacity *
509				  sizeof(struct qed_spq_entry),
510				  p_spq->p_virt,
511				  p_spq->p_phys);
512
513	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
514	;
515	kfree(p_spq);
516}
517
518int
519qed_spq_get_entry(struct qed_hwfn *p_hwfn,
520		  struct qed_spq_entry **pp_ent)
521{
522	struct qed_spq *p_spq = p_hwfn->p_spq;
523	struct qed_spq_entry *p_ent = NULL;
524	int rc = 0;
525
526	spin_lock_bh(&p_spq->lock);
527
528	if (list_empty(&p_spq->free_pool)) {
529		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
530		if (!p_ent) {
531			rc = -ENOMEM;
532			goto out_unlock;
533		}
534		p_ent->queue = &p_spq->unlimited_pending;
535	} else {
536		p_ent = list_first_entry(&p_spq->free_pool,
537					 struct qed_spq_entry,
538					 list);
539		list_del(&p_ent->list);
540		p_ent->queue = &p_spq->pending;
541	}
542
543	*pp_ent = p_ent;
544
545out_unlock:
546	spin_unlock_bh(&p_spq->lock);
547	return rc;
548}
549
550/* Locked variant; Should be called while the SPQ lock is taken */
551static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
552				   struct qed_spq_entry *p_ent)
553{
554	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
555}
556
557void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
558			  struct qed_spq_entry *p_ent)
559{
560	spin_lock_bh(&p_hwfn->p_spq->lock);
561	__qed_spq_return_entry(p_hwfn, p_ent);
562	spin_unlock_bh(&p_hwfn->p_spq->lock);
563}
564
565/**
566 * @brief qed_spq_add_entry - adds a new entry to the pending
567 *        list. Should be used while lock is being held.
568 *
569 * Addes an entry to the pending list is there is room (en empty
570 * element is available in the free_pool), or else places the
571 * entry in the unlimited_pending pool.
572 *
573 * @param p_hwfn
574 * @param p_ent
575 * @param priority
576 *
577 * @return int
578 */
579static int
580qed_spq_add_entry(struct qed_hwfn *p_hwfn,
581		  struct qed_spq_entry *p_ent,
582		  enum spq_priority priority)
583{
584	struct qed_spq *p_spq = p_hwfn->p_spq;
585
586	if (p_ent->queue == &p_spq->unlimited_pending) {
587
588		if (list_empty(&p_spq->free_pool)) {
589			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
590			p_spq->unlimited_pending_count++;
591
592			return 0;
593		} else {
594			struct qed_spq_entry *p_en2;
595
596			p_en2 = list_first_entry(&p_spq->free_pool,
597						 struct qed_spq_entry,
598						 list);
599			list_del(&p_en2->list);
600
601			/* Copy the ring element physical pointer to the new
602			 * entry, since we are about to override the entire ring
603			 * entry and don't want to lose the pointer.
604			 */
605			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
606
607			*p_en2 = *p_ent;
608
609			kfree(p_ent);
610
611			p_ent = p_en2;
612		}
613	}
614
615	/* entry is to be placed in 'pending' queue */
616	switch (priority) {
617	case QED_SPQ_PRIORITY_NORMAL:
618		list_add_tail(&p_ent->list, &p_spq->pending);
619		p_spq->normal_count++;
620		break;
621	case QED_SPQ_PRIORITY_HIGH:
622		list_add(&p_ent->list, &p_spq->pending);
623		p_spq->high_count++;
624		break;
625	default:
626		return -EINVAL;
627	}
628
629	return 0;
630}
631
632/***************************************************************************
633* Accessor
634***************************************************************************/
635u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
636{
637	if (!p_hwfn->p_spq)
638		return 0xffffffff;      /* illegal */
639	return p_hwfn->p_spq->cid;
640}
641
642/***************************************************************************
643* Posting new Ramrods
644***************************************************************************/
645static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
646			     struct list_head *head,
647			     u32 keep_reserve)
648{
649	struct qed_spq *p_spq = p_hwfn->p_spq;
650	int rc;
651
652	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
653	       !list_empty(head)) {
654		struct qed_spq_entry *p_ent =
655			list_first_entry(head, struct qed_spq_entry, list);
656		list_del(&p_ent->list);
657		list_add_tail(&p_ent->list, &p_spq->completion_pending);
658		p_spq->comp_sent_count++;
659
660		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
661		if (rc) {
662			list_del(&p_ent->list);
663			__qed_spq_return_entry(p_hwfn, p_ent);
664			return rc;
665		}
666	}
667
668	return 0;
669}
670
671static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
672{
673	struct qed_spq *p_spq = p_hwfn->p_spq;
674	struct qed_spq_entry *p_ent = NULL;
675
676	while (!list_empty(&p_spq->free_pool)) {
677		if (list_empty(&p_spq->unlimited_pending))
678			break;
679
680		p_ent = list_first_entry(&p_spq->unlimited_pending,
681					 struct qed_spq_entry,
682					 list);
683		if (!p_ent)
684			return -EINVAL;
685
686		list_del(&p_ent->list);
687
688		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
689	}
690
691	return qed_spq_post_list(p_hwfn, &p_spq->pending,
692				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
693}
694
695int qed_spq_post(struct qed_hwfn *p_hwfn,
696		 struct qed_spq_entry *p_ent,
697		 u8 *fw_return_code)
698{
699	int rc = 0;
700	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
701	bool b_ret_ent = true;
702
703	if (!p_hwfn)
704		return -EINVAL;
705
706	if (!p_ent) {
707		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
708		return -EINVAL;
709	}
710
711	/* Complete the entry */
712	rc = qed_spq_fill_entry(p_hwfn, p_ent);
713
714	spin_lock_bh(&p_spq->lock);
715
716	/* Check return value after LOCK is taken for cleaner error flow */
717	if (rc)
718		goto spq_post_fail;
719
720	/* Add the request to the pending queue */
721	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
722	if (rc)
723		goto spq_post_fail;
724
725	rc = qed_spq_pend_post(p_hwfn);
726	if (rc) {
727		/* Since it's possible that pending failed for a different
728		 * entry [although unlikely], the failed entry was already
729		 * dealt with; No need to return it here.
730		 */
731		b_ret_ent = false;
732		goto spq_post_fail;
733	}
734
735	spin_unlock_bh(&p_spq->lock);
736
737	if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
738		/* For entries in QED BLOCK mode, the completion code cannot
739		 * perform the necessary cleanup - if it did, we couldn't
740		 * access p_ent here to see whether it's successful or not.
741		 * Thus, after gaining the answer perform the cleanup here.
742		 */
743		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
744		if (rc)
745			goto spq_post_fail2;
746
747		/* return to pool */
748		qed_spq_return_entry(p_hwfn, p_ent);
749	}
750	return rc;
751
752spq_post_fail2:
753	spin_lock_bh(&p_spq->lock);
754	list_del(&p_ent->list);
755	qed_chain_return_produced(&p_spq->chain);
756
757spq_post_fail:
758	/* return to the free pool */
759	if (b_ret_ent)
760		__qed_spq_return_entry(p_hwfn, p_ent);
761	spin_unlock_bh(&p_spq->lock);
762
763	return rc;
764}
765
766int qed_spq_completion(struct qed_hwfn *p_hwfn,
767		       __le16 echo,
768		       u8 fw_return_code,
769		       union event_ring_data *p_data)
770{
771	struct qed_spq		*p_spq;
772	struct qed_spq_entry	*p_ent = NULL;
773	struct qed_spq_entry	*tmp;
774	struct qed_spq_entry	*found = NULL;
775	int			rc;
776
777	if (!p_hwfn)
778		return -EINVAL;
779
780	p_spq = p_hwfn->p_spq;
781	if (!p_spq)
782		return -EINVAL;
783
784	spin_lock_bh(&p_spq->lock);
785	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
786				 list) {
787		if (p_ent->elem.hdr.echo == echo) {
788			u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
789
790			list_del(&p_ent->list);
791
792			/* Avoid overriding of SPQ entries when getting
793			 * out-of-order completions, by marking the completions
794			 * in a bitmap and increasing the chain consumer only
795			 * for the first successive completed entries.
796			 */
797			bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
798
799			while (test_bit(p_spq->comp_bitmap_idx,
800					p_spq->p_comp_bitmap)) {
801				bitmap_clear(p_spq->p_comp_bitmap,
802					     p_spq->comp_bitmap_idx,
803					     SPQ_RING_SIZE);
804				p_spq->comp_bitmap_idx++;
805				qed_chain_return_produced(&p_spq->chain);
806			}
807
808			p_spq->comp_count++;
809			found = p_ent;
810			break;
811		}
812
813		/* This is relatively uncommon - depends on scenarios
814		 * which have mutliple per-PF sent ramrods.
815		 */
816		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
817			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
818			   le16_to_cpu(echo),
819			   le16_to_cpu(p_ent->elem.hdr.echo));
820	}
821
822	/* Release lock before callback, as callback may post
823	 * an additional ramrod.
824	 */
825	spin_unlock_bh(&p_spq->lock);
826
827	if (!found) {
828		DP_NOTICE(p_hwfn,
829			  "Failed to find an entry this EQE completes\n");
830		return -EEXIST;
831	}
832
833	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
834		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
835	if (found->comp_cb.function)
836		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
837					fw_return_code);
838
839	if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
840		/* EBLOCK is responsible for freeing its own entry */
841		qed_spq_return_entry(p_hwfn, found);
842
843	/* Attempt to post pending requests */
844	spin_lock_bh(&p_spq->lock);
845	rc = qed_spq_pend_post(p_hwfn);
846	spin_unlock_bh(&p_spq->lock);
847
848	return rc;
849}
850
851struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
852{
853	struct qed_consq *p_consq;
854
855	/* Allocate ConsQ struct */
856	p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC);
857	if (!p_consq) {
858		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
859		return NULL;
860	}
861
862	/* Allocate and initialize EQ chain*/
863	if (qed_chain_alloc(p_hwfn->cdev,
864			    QED_CHAIN_USE_TO_PRODUCE,
865			    QED_CHAIN_MODE_PBL,
866			    QED_CHAIN_PAGE_SIZE / 0x80,
867			    0x80,
868			    &p_consq->chain)) {
869		DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
870		goto consq_allocate_fail;
871	}
872
873	return p_consq;
874
875consq_allocate_fail:
876	qed_consq_free(p_hwfn, p_consq);
877	return NULL;
878}
879
880void qed_consq_setup(struct qed_hwfn *p_hwfn,
881		     struct qed_consq *p_consq)
882{
883	qed_chain_reset(&p_consq->chain);
884}
885
886void qed_consq_free(struct qed_hwfn *p_hwfn,
887		    struct qed_consq *p_consq)
888{
889	if (!p_consq)
890		return;
891	qed_chain_free(p_hwfn->cdev, &p_consq->chain);
892	kfree(p_consq);
893}
894