1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#include <linux/types.h>
10#include <linux/bitops.h>
11#include <linux/dma-mapping.h>
12#include <linux/errno.h>
13#include <linux/kernel.h>
14#include <linux/list.h>
15#include <linux/log2.h>
16#include <linux/pci.h>
17#include <linux/slab.h>
18#include <linux/string.h>
19#include <linux/bitops.h>
20#include "qed.h"
21#include "qed_cxt.h"
22#include "qed_dev_api.h"
23#include "qed_hsi.h"
24#include "qed_hw.h"
25#include "qed_init_ops.h"
26#include "qed_reg_addr.h"
27
28/* Max number of connection types in HW (DQ/CDU etc.) */
29#define MAX_CONN_TYPES		PROTOCOLID_COMMON
30#define NUM_TASK_TYPES		2
31#define NUM_TASK_PF_SEGMENTS	4
32
33/* QM constants */
34#define QM_PQ_ELEMENT_SIZE	4 /* in bytes */
35
36/* Doorbell-Queue constants */
37#define DQ_RANGE_SHIFT		4
38#define DQ_RANGE_ALIGN		BIT(DQ_RANGE_SHIFT)
39
40/* ILT constants */
41#define ILT_DEFAULT_HW_P_SIZE		3
42#define ILT_PAGE_IN_BYTES(hw_p_size)	(1U << ((hw_p_size) + 12))
43#define ILT_CFG_REG(cli, reg)	PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
44
45/* ILT entry structure */
46#define ILT_ENTRY_PHY_ADDR_MASK		0x000FFFFFFFFFFFULL
47#define ILT_ENTRY_PHY_ADDR_SHIFT	0
48#define ILT_ENTRY_VALID_MASK		0x1ULL
49#define ILT_ENTRY_VALID_SHIFT		52
50#define ILT_ENTRY_IN_REGS		2
51#define ILT_REG_SIZE_IN_BYTES		4
52
53/* connection context union */
54union conn_context {
55	struct core_conn_context core_ctx;
56	struct eth_conn_context eth_ctx;
57};
58
59#define CONN_CXT_SIZE(p_hwfn) \
60	ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
61
62/* PF per protocl configuration object */
63struct qed_conn_type_cfg {
64	u32 cid_count;
65	u32 cid_start;
66};
67
68/* ILT Client configuration, Per connection type (protocol) resources. */
69#define ILT_CLI_PF_BLOCKS	(1 + NUM_TASK_PF_SEGMENTS * 2)
70#define CDUC_BLK		(0)
71
72enum ilt_clients {
73	ILT_CLI_CDUC,
74	ILT_CLI_QM,
75	ILT_CLI_MAX
76};
77
78struct ilt_cfg_pair {
79	u32 reg;
80	u32 val;
81};
82
83struct qed_ilt_cli_blk {
84	u32 total_size; /* 0 means not active */
85	u32 real_size_in_page;
86	u32 start_line;
87};
88
89struct qed_ilt_client_cfg {
90	bool active;
91
92	/* ILT boundaries */
93	struct ilt_cfg_pair first;
94	struct ilt_cfg_pair last;
95	struct ilt_cfg_pair p_size;
96
97	/* ILT client blocks for PF */
98	struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
99	u32 pf_total_lines;
100};
101
102/* Per Path -
103 *      ILT shadow table
104 *      Protocol acquired CID lists
105 *      PF start line in ILT
106 */
107struct qed_dma_mem {
108	dma_addr_t p_phys;
109	void *p_virt;
110	size_t size;
111};
112
113struct qed_cid_acquired_map {
114	u32		start_cid;
115	u32		max_count;
116	unsigned long	*cid_map;
117};
118
119struct qed_cxt_mngr {
120	/* Per protocl configuration */
121	struct qed_conn_type_cfg	conn_cfg[MAX_CONN_TYPES];
122
123	/* computed ILT structure */
124	struct qed_ilt_client_cfg	clients[ILT_CLI_MAX];
125
126	/* Acquired CIDs */
127	struct qed_cid_acquired_map	acquired[MAX_CONN_TYPES];
128
129	/* ILT  shadow table */
130	struct qed_dma_mem		*ilt_shadow;
131	u32				pf_start_line;
132};
133
134static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
135{
136	u32 type, pf_cids = 0;
137
138	for (type = 0; type < MAX_CONN_TYPES; type++)
139		pf_cids += p_mngr->conn_cfg[type].cid_count;
140
141	return pf_cids;
142}
143
144static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
145			    struct qed_qm_iids *iids)
146{
147	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
148	int type;
149
150	for (type = 0; type < MAX_CONN_TYPES; type++)
151		iids->cids += p_mngr->conn_cfg[type].cid_count;
152
153	DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
154}
155
156/* set the iids count per protocol */
157static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
158					enum protocol_type type,
159					u32 cid_count)
160{
161	struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
162	struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
163
164	p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
165}
166
167static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
168				 struct qed_ilt_cli_blk *p_blk,
169				 u32 start_line, u32 total_size,
170				 u32 elem_size)
171{
172	u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
173
174	/* verify thatits called only once for each block */
175	if (p_blk->total_size)
176		return;
177
178	p_blk->total_size = total_size;
179	p_blk->real_size_in_page = 0;
180	if (elem_size)
181		p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
182	p_blk->start_line = start_line;
183}
184
185static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
186				 struct qed_ilt_client_cfg *p_cli,
187				 struct qed_ilt_cli_blk *p_blk,
188				 u32 *p_line, enum ilt_clients client_id)
189{
190	if (!p_blk->total_size)
191		return;
192
193	if (!p_cli->active)
194		p_cli->first.val = *p_line;
195
196	p_cli->active = true;
197	*p_line += DIV_ROUND_UP(p_blk->total_size,
198				p_blk->real_size_in_page);
199	p_cli->last.val = *p_line - 1;
200
201	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
202		   "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
203		   client_id, p_cli->first.val,
204		   p_cli->last.val, p_blk->total_size,
205		   p_blk->real_size_in_page, p_blk->start_line);
206}
207
208int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
209{
210	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
211	struct qed_ilt_client_cfg *p_cli;
212	struct qed_ilt_cli_blk *p_blk;
213	u32 curr_line, total, pf_cids;
214	struct qed_qm_iids qm_iids;
215
216	memset(&qm_iids, 0, sizeof(qm_iids));
217
218	p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
219
220	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
221		   "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
222		   p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
223
224	/* CDUC */
225	p_cli = &p_mngr->clients[ILT_CLI_CDUC];
226	curr_line = p_mngr->pf_start_line;
227	p_cli->pf_total_lines = 0;
228
229	/* get the counters for the CDUC and QM clients  */
230	pf_cids = qed_cxt_cdu_iids(p_mngr);
231
232	p_blk = &p_cli->pf_blks[CDUC_BLK];
233
234	total = pf_cids * CONN_CXT_SIZE(p_hwfn);
235
236	qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
237			     total, CONN_CXT_SIZE(p_hwfn));
238
239	qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
240	p_cli->pf_total_lines = curr_line - p_blk->start_line;
241
242	/* QM */
243	p_cli = &p_mngr->clients[ILT_CLI_QM];
244	p_blk = &p_cli->pf_blks[0];
245
246	qed_cxt_qm_iids(p_hwfn, &qm_iids);
247	total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
248				   p_hwfn->qm_info.num_pqs, 0);
249
250	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
251		   "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
252		   qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
253
254	qed_ilt_cli_blk_fill(p_cli, p_blk,
255			     curr_line, total * 0x1000,
256			     QM_PQ_ELEMENT_SIZE);
257
258	qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
259	p_cli->pf_total_lines = curr_line - p_blk->start_line;
260
261	if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
262	    RESC_NUM(p_hwfn, QED_ILT)) {
263		DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
264		       curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
265		return -EINVAL;
266	}
267
268	return 0;
269}
270
271#define for_each_ilt_valid_client(pos, clients)	\
272		for (pos = 0; pos < ILT_CLI_MAX; pos++)
273
274/* Total number of ILT lines used by this PF */
275static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
276{
277	u32 size = 0;
278	u32 i;
279
280	for_each_ilt_valid_client(i, ilt_clients) {
281		if (!ilt_clients[i].active)
282			continue;
283		size += (ilt_clients[i].last.val -
284			 ilt_clients[i].first.val + 1);
285	}
286
287	return size;
288}
289
290static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
291{
292	struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
293	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
294	u32 ilt_size, i;
295
296	ilt_size = qed_cxt_ilt_shadow_size(p_cli);
297
298	for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
299		struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
300
301		if (p_dma->p_virt)
302			dma_free_coherent(&p_hwfn->cdev->pdev->dev,
303					  p_dma->size, p_dma->p_virt,
304					  p_dma->p_phys);
305		p_dma->p_virt = NULL;
306	}
307	kfree(p_mngr->ilt_shadow);
308}
309
310static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
311			     struct qed_ilt_cli_blk *p_blk,
312			     enum ilt_clients ilt_client,
313			     u32 start_line_offset)
314{
315	struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
316	u32 lines, line, sz_left;
317
318	if (!p_blk->total_size)
319		return 0;
320
321	sz_left = p_blk->total_size;
322	lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page);
323	line = p_blk->start_line + start_line_offset -
324	       p_hwfn->p_cxt_mngr->pf_start_line;
325
326	for (; lines; lines--) {
327		dma_addr_t p_phys;
328		void *p_virt;
329		u32 size;
330
331		size = min_t(u32, sz_left,
332			     p_blk->real_size_in_page);
333		p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
334					    size,
335					    &p_phys,
336					    GFP_KERNEL);
337		if (!p_virt)
338			return -ENOMEM;
339		memset(p_virt, 0, size);
340
341		ilt_shadow[line].p_phys = p_phys;
342		ilt_shadow[line].p_virt = p_virt;
343		ilt_shadow[line].size = size;
344
345		DP_VERBOSE(p_hwfn, QED_MSG_ILT,
346			   "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
347			    line, (u64)p_phys, p_virt, size);
348
349		sz_left -= size;
350		line++;
351	}
352
353	return 0;
354}
355
356static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
357{
358	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
359	struct qed_ilt_client_cfg *clients = p_mngr->clients;
360	struct qed_ilt_cli_blk *p_blk;
361	u32 size, i, j;
362	int rc;
363
364	size = qed_cxt_ilt_shadow_size(clients);
365	p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
366				     GFP_KERNEL);
367	if (!p_mngr->ilt_shadow) {
368		DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
369		rc = -ENOMEM;
370		goto ilt_shadow_fail;
371	}
372
373	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
374		   "Allocated 0x%x bytes for ilt shadow\n",
375		   (u32)(size * sizeof(struct qed_dma_mem)));
376
377	for_each_ilt_valid_client(i, clients) {
378		if (!clients[i].active)
379			continue;
380		for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
381			p_blk = &clients[i].pf_blks[j];
382			rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
383			if (rc != 0)
384				goto ilt_shadow_fail;
385		}
386	}
387
388	return 0;
389
390ilt_shadow_fail:
391	qed_ilt_shadow_free(p_hwfn);
392	return rc;
393}
394
395static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
396{
397	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
398	u32 type;
399
400	for (type = 0; type < MAX_CONN_TYPES; type++) {
401		kfree(p_mngr->acquired[type].cid_map);
402		p_mngr->acquired[type].max_count = 0;
403		p_mngr->acquired[type].start_cid = 0;
404	}
405}
406
407static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
408{
409	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
410	u32 start_cid = 0;
411	u32 type;
412
413	for (type = 0; type < MAX_CONN_TYPES; type++) {
414		u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
415		u32 size;
416
417		if (cid_cnt == 0)
418			continue;
419
420		size = DIV_ROUND_UP(cid_cnt,
421				    sizeof(unsigned long) * BITS_PER_BYTE) *
422		       sizeof(unsigned long);
423		p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
424		if (!p_mngr->acquired[type].cid_map)
425			goto cid_map_fail;
426
427		p_mngr->acquired[type].max_count = cid_cnt;
428		p_mngr->acquired[type].start_cid = start_cid;
429
430		p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
431
432		DP_VERBOSE(p_hwfn, QED_MSG_CXT,
433			   "Type %08x start: %08x count %08x\n",
434			   type, p_mngr->acquired[type].start_cid,
435			   p_mngr->acquired[type].max_count);
436		start_cid += cid_cnt;
437	}
438
439	return 0;
440
441cid_map_fail:
442	qed_cid_map_free(p_hwfn);
443	return -ENOMEM;
444}
445
446int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
447{
448	struct qed_cxt_mngr *p_mngr;
449	u32 i;
450
451	p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC);
452	if (!p_mngr) {
453		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
454		return -ENOMEM;
455	}
456
457	/* Initialize ILT client registers */
458	p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
459	p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
460	p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
461
462	p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
463	p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
464	p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
465
466	/* default ILT page size for all clients is 32K */
467	for (i = 0; i < ILT_CLI_MAX; i++)
468		p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
469
470	/* Set the cxt mangr pointer priori to further allocations */
471	p_hwfn->p_cxt_mngr = p_mngr;
472
473	return 0;
474}
475
476int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
477{
478	int rc;
479
480	/* Allocate the ILT shadow table */
481	rc = qed_ilt_shadow_alloc(p_hwfn);
482	if (rc) {
483		DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
484		goto tables_alloc_fail;
485	}
486
487	/* Allocate and initialize the acquired cids bitmaps */
488	rc = qed_cid_map_alloc(p_hwfn);
489	if (rc) {
490		DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
491		goto tables_alloc_fail;
492	}
493
494	return 0;
495
496tables_alloc_fail:
497	qed_cxt_mngr_free(p_hwfn);
498	return rc;
499}
500
501void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
502{
503	if (!p_hwfn->p_cxt_mngr)
504		return;
505
506	qed_cid_map_free(p_hwfn);
507	qed_ilt_shadow_free(p_hwfn);
508	kfree(p_hwfn->p_cxt_mngr);
509
510	p_hwfn->p_cxt_mngr = NULL;
511}
512
513void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
514{
515	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
516	int type;
517
518	/* Reset acquired cids */
519	for (type = 0; type < MAX_CONN_TYPES; type++) {
520		u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
521
522		if (cid_cnt == 0)
523			continue;
524
525		memset(p_mngr->acquired[type].cid_map, 0,
526		       DIV_ROUND_UP(cid_cnt,
527				    sizeof(unsigned long) * BITS_PER_BYTE) *
528		       sizeof(unsigned long));
529	}
530}
531
532/* CDU Common */
533#define CDUC_CXT_SIZE_SHIFT \
534	CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
535
536#define CDUC_CXT_SIZE_MASK \
537	(CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
538
539#define CDUC_BLOCK_WASTE_SHIFT \
540	CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
541
542#define CDUC_BLOCK_WASTE_MASK \
543	(CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
544
545#define CDUC_NCIB_SHIFT	\
546	CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
547
548#define CDUC_NCIB_MASK \
549	(CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
550
551static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
552{
553	u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
554
555	/* CDUC - connection configuration */
556	page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
557	cxt_size = CONN_CXT_SIZE(p_hwfn);
558	elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
559	block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
560
561	SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
562	SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
563	SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
564	STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
565}
566
567void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
568{
569	struct qed_qm_pf_rt_init_params params;
570	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
571	struct qed_qm_iids iids;
572
573	memset(&iids, 0, sizeof(iids));
574	qed_cxt_qm_iids(p_hwfn, &iids);
575
576	memset(&params, 0, sizeof(params));
577	params.port_id = p_hwfn->port_id;
578	params.pf_id = p_hwfn->rel_pf_id;
579	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
580	params.is_first_pf = p_hwfn->first_on_engine;
581	params.num_pf_cids = iids.cids;
582	params.start_pq = qm_info->start_pq;
583	params.num_pf_pqs = qm_info->num_pqs;
584	params.start_vport = qm_info->num_vports;
585	params.pf_wfq = qm_info->pf_wfq;
586	params.pf_rl = qm_info->pf_rl;
587	params.pq_params = qm_info->qm_pq_params;
588	params.vport_params = qm_info->qm_vport_params;
589
590	qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
591}
592
593/* CM PF */
594static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
595{
596	union qed_qm_pq_params pq_params;
597	u16 pq;
598
599	/* XCM pure-LB queue */
600	memset(&pq_params, 0, sizeof(pq_params));
601	pq_params.core.tc = LB_TC;
602	pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
603	STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
604
605	return 0;
606}
607
608/* DQ PF */
609static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
610{
611	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
612	u32 dq_pf_max_cid = 0;
613
614	dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
615	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
616
617	dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
618	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
619
620	dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
621	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
622
623	dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
624	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
625
626	dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
627	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
628
629	/* 5 - PF */
630	dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
631	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
632}
633
634static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
635{
636	struct qed_ilt_client_cfg *ilt_clients;
637	int i;
638
639	ilt_clients = p_hwfn->p_cxt_mngr->clients;
640	for_each_ilt_valid_client(i, ilt_clients) {
641		if (!ilt_clients[i].active)
642			continue;
643		STORE_RT_REG(p_hwfn,
644			     ilt_clients[i].first.reg,
645			     ilt_clients[i].first.val);
646		STORE_RT_REG(p_hwfn,
647			     ilt_clients[i].last.reg,
648			     ilt_clients[i].last.val);
649		STORE_RT_REG(p_hwfn,
650			     ilt_clients[i].p_size.reg,
651			     ilt_clients[i].p_size.val);
652	}
653}
654
655/* ILT (PSWRQ2) PF */
656static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
657{
658	struct qed_ilt_client_cfg *clients;
659	struct qed_cxt_mngr *p_mngr;
660	struct qed_dma_mem *p_shdw;
661	u32 line, rt_offst, i;
662
663	qed_ilt_bounds_init(p_hwfn);
664
665	p_mngr = p_hwfn->p_cxt_mngr;
666	p_shdw = p_mngr->ilt_shadow;
667	clients = p_hwfn->p_cxt_mngr->clients;
668
669	for_each_ilt_valid_client(i, clients) {
670		if (!clients[i].active)
671			continue;
672
673		/** Client's 1st val and RT array are absolute, ILT shadows'
674		 *  lines are relative.
675		 */
676		line = clients[i].first.val - p_mngr->pf_start_line;
677		rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
678			   clients[i].first.val * ILT_ENTRY_IN_REGS;
679
680		for (; line <= clients[i].last.val - p_mngr->pf_start_line;
681		     line++, rt_offst += ILT_ENTRY_IN_REGS) {
682			u64 ilt_hw_entry = 0;
683
684			/** p_virt could be NULL incase of dynamic
685			 *  allocation
686			 */
687			if (p_shdw[line].p_virt) {
688				SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
689				SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
690					  (p_shdw[line].p_phys >> 12));
691
692				DP_VERBOSE(p_hwfn, QED_MSG_ILT,
693					   "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
694					   rt_offst, line, i,
695					   (u64)(p_shdw[line].p_phys >> 12));
696			}
697
698			STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
699		}
700	}
701}
702
703void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
704{
705	qed_cdu_init_common(p_hwfn);
706}
707
708void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
709{
710	qed_qm_init_pf(p_hwfn);
711	qed_cm_init_pf(p_hwfn);
712	qed_dq_init_pf(p_hwfn);
713	qed_ilt_init_pf(p_hwfn);
714}
715
716int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
717			enum protocol_type type,
718			u32 *p_cid)
719{
720	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
721	u32 rel_cid;
722
723	if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
724		DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
725		return -EINVAL;
726	}
727
728	rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
729				      p_mngr->acquired[type].max_count);
730
731	if (rel_cid >= p_mngr->acquired[type].max_count) {
732		DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
733			  type);
734		return -EINVAL;
735	}
736
737	__set_bit(rel_cid, p_mngr->acquired[type].cid_map);
738
739	*p_cid = rel_cid + p_mngr->acquired[type].start_cid;
740
741	return 0;
742}
743
744static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
745				      u32 cid,
746				      enum protocol_type *p_type)
747{
748	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
749	struct qed_cid_acquired_map *p_map;
750	enum protocol_type p;
751	u32 rel_cid;
752
753	/* Iterate over protocols and find matching cid range */
754	for (p = 0; p < MAX_CONN_TYPES; p++) {
755		p_map = &p_mngr->acquired[p];
756
757		if (!p_map->cid_map)
758			continue;
759		if (cid >= p_map->start_cid &&
760		    cid < p_map->start_cid + p_map->max_count)
761			break;
762	}
763	*p_type = p;
764
765	if (p == MAX_CONN_TYPES) {
766		DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
767		return false;
768	}
769
770	rel_cid = cid - p_map->start_cid;
771	if (!test_bit(rel_cid, p_map->cid_map)) {
772		DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
773		return false;
774	}
775	return true;
776}
777
778void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
779			 u32 cid)
780{
781	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
782	enum protocol_type type;
783	bool b_acquired;
784	u32 rel_cid;
785
786	/* Test acquired and find matching per-protocol map */
787	b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
788
789	if (!b_acquired)
790		return;
791
792	rel_cid = cid - p_mngr->acquired[type].start_cid;
793	__clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
794}
795
796int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
797			 struct qed_cxt_info *p_info)
798{
799	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
800	u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
801	enum protocol_type type;
802	bool b_acquired;
803
804	/* Test acquired and find matching per-protocol map */
805	b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
806
807	if (!b_acquired)
808		return -EINVAL;
809
810	/* set the protocl type */
811	p_info->type = type;
812
813	/* compute context virtual pointer */
814	hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
815
816	conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
817	cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
818	line = p_info->iid / cxts_per_p;
819
820	/* Make sure context is allocated (dynamic allocation) */
821	if (!p_mngr->ilt_shadow[line].p_virt)
822		return -EINVAL;
823
824	p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
825			p_info->iid % cxts_per_p * conn_cxt_size;
826
827	DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
828		   "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
829		   p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
830
831	return 0;
832}
833
834int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
835{
836	struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params;
837
838	/* Set the number of required CORE connections */
839	u32 core_cids = 1; /* SPQ */
840
841	qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
842
843	qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
844				    p_params->num_cons);
845
846	return 0;
847}
848