1/*
2  This file is provided under a dual BSD/GPLv2 license.  When using or
3  redistributing this file, you may do so under either license.
4
5  GPL LICENSE SUMMARY
6  Copyright(c) 2014 Intel Corporation.
7  This program is free software; you can redistribute it and/or modify
8  it under the terms of version 2 of the GNU General Public License as
9  published by the Free Software Foundation.
10
11  This program is distributed in the hope that it will be useful, but
12  WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  General Public License for more details.
15
16  Contact Information:
17  qat-linux@intel.com
18
19  BSD LICENSE
20  Copyright(c) 2014 Intel Corporation.
21  Redistribution and use in source and binary forms, with or without
22  modification, are permitted provided that the following conditions
23  are met:
24
25    * Redistributions of source code must retain the above copyright
26      notice, this list of conditions and the following disclaimer.
27    * Redistributions in binary form must reproduce the above copyright
28      notice, this list of conditions and the following disclaimer in
29      the documentation and/or other materials provided with the
30      distribution.
31    * Neither the name of Intel Corporation nor the names of its
32      contributors may be used to endorse or promote products derived
33      from this software without specific prior written permission.
34
35  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47#include <linux/slab.h>
48
49#include "adf_accel_devices.h"
50#include "adf_common_drv.h"
51#include "icp_qat_hal.h"
52#include "icp_qat_uclo.h"
53
54#define BAD_REGADDR               0xffff
55#define MAX_RETRY_TIMES           10000
56#define INIT_CTX_ARB_VALUE        0x0
57#define INIT_CTX_ENABLE_VALUE     0x0
58#define INIT_PC_VALUE             0x0
59#define INIT_WAKEUP_EVENTS_VALUE  0x1
60#define INIT_SIG_EVENTS_VALUE     0x1
61#define INIT_CCENABLE_VALUE       0x2000
62#define RST_CSR_QAT_LSB           20
63#define RST_CSR_AE_LSB		  0
64#define MC_TIMESTAMP_ENABLE       (0x1 << 7)
65
66#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
67	(~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
68	(~(1 << CE_REG_PAR_ERR_BITPOS)))
69#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
70	(inst = ((inst & 0xFFFF00C03FFull) | \
71		((((const_val) << 12) & 0x0FF00000ull) | \
72		(((const_val) << 10) & 0x0003FC00ull))))
73#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
74	(inst = ((inst & 0xFFFF00FFF00ull) | \
75		((((const_val) << 12) & 0x0FF00000ull) | \
76		(((const_val) <<  0) & 0x000000FFull))))
77
78#define AE(handle, ae) handle->hal_handle->aes[ae]
79
80static const uint64_t inst_4b[] = {
81	0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
82	0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
83	0x0A021000000ull
84};
85
86static const uint64_t inst[] = {
87	0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
88	0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
89	0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
90	0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
91	0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
92	0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
93	0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
94	0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
95	0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
96	0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
97	0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
98	0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
99	0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
100	0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
101	0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
102	0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
103	0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
104	0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
105	0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
106	0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
107	0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
108	0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
109};
110
111void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
112			  unsigned char ae, unsigned int ctx_mask)
113{
114	AE(handle, ae).live_ctx_mask = ctx_mask;
115}
116
117#define CSR_RETRY_TIMES 500
118static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
119			     unsigned char ae, unsigned int csr,
120			     unsigned int *value)
121{
122	unsigned int iterations = CSR_RETRY_TIMES;
123
124	do {
125		*value = GET_AE_CSR(handle, ae, csr);
126		if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
127			return 0;
128	} while (iterations--);
129
130	pr_err("QAT: Read CSR timeout\n");
131	return -EFAULT;
132}
133
134static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
135			     unsigned char ae, unsigned int csr,
136			     unsigned int value)
137{
138	unsigned int iterations = CSR_RETRY_TIMES;
139
140	do {
141		SET_AE_CSR(handle, ae, csr, value);
142		if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
143			return 0;
144	} while (iterations--);
145
146	pr_err("QAT: Write CSR Timeout\n");
147	return -EFAULT;
148}
149
150static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
151				     unsigned char ae, unsigned char ctx,
152				     unsigned int *events)
153{
154	unsigned int cur_ctx;
155
156	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
157	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
158	qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
159	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
160}
161
162static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
163			       unsigned char ae, unsigned int cycles,
164			       int chk_inactive)
165{
166	unsigned int base_cnt = 0, cur_cnt = 0;
167	unsigned int csr = (1 << ACS_ABO_BITPOS);
168	int times = MAX_RETRY_TIMES;
169	int elapsed_cycles = 0;
170
171	qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
172	base_cnt &= 0xffff;
173	while ((int)cycles > elapsed_cycles && times--) {
174		if (chk_inactive)
175			qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
176
177		qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
178		cur_cnt &= 0xffff;
179		elapsed_cycles = cur_cnt - base_cnt;
180
181		if (elapsed_cycles < 0)
182			elapsed_cycles += 0x10000;
183
184		/* ensure at least 8 time cycles elapsed in wait_cycles */
185		if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
186			return 0;
187	}
188	if (!times) {
189		pr_err("QAT: wait_num_cycles time out\n");
190		return -EFAULT;
191	}
192	return 0;
193}
194
195#define CLR_BIT(wrd, bit) (wrd & ~(1 << bit))
196#define SET_BIT(wrd, bit) (wrd | 1 << bit)
197
198int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
199			    unsigned char ae, unsigned char mode)
200{
201	unsigned int csr, new_csr;
202
203	if ((mode != 4) && (mode != 8)) {
204		pr_err("QAT: bad ctx mode=%d\n", mode);
205		return -EINVAL;
206	}
207
208	/* Sets the accelaration engine context mode to either four or eight */
209	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
210	csr = IGNORE_W1C_MASK & csr;
211	new_csr = (mode == 4) ?
212		SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
213		CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
214	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
215	return 0;
216}
217
218int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
219			   unsigned char ae, unsigned char mode)
220{
221	unsigned int csr, new_csr;
222
223	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
224	csr &= IGNORE_W1C_MASK;
225
226	new_csr = (mode) ?
227		SET_BIT(csr, CE_NN_MODE_BITPOS) :
228		CLR_BIT(csr, CE_NN_MODE_BITPOS);
229
230	if (new_csr != csr)
231		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
232
233	return 0;
234}
235
236int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
237			   unsigned char ae, enum icp_qat_uof_regtype lm_type,
238			   unsigned char mode)
239{
240	unsigned int csr, new_csr;
241
242	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
243	csr &= IGNORE_W1C_MASK;
244	switch (lm_type) {
245	case ICP_LMEM0:
246		new_csr = (mode) ?
247			SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
248			CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
249		break;
250	case ICP_LMEM1:
251		new_csr = (mode) ?
252			SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
253			CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
254		break;
255	default:
256		pr_err("QAT: lmType = 0x%x\n", lm_type);
257		return -EINVAL;
258	}
259
260	if (new_csr != csr)
261		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
262	return 0;
263}
264
265static unsigned short qat_hal_get_reg_addr(unsigned int type,
266					   unsigned short reg_num)
267{
268	unsigned short reg_addr;
269
270	switch (type) {
271	case ICP_GPA_ABS:
272	case ICP_GPB_ABS:
273		reg_addr = 0x80 | (reg_num & 0x7f);
274		break;
275	case ICP_GPA_REL:
276	case ICP_GPB_REL:
277		reg_addr = reg_num & 0x1f;
278		break;
279	case ICP_SR_RD_REL:
280	case ICP_SR_WR_REL:
281	case ICP_SR_REL:
282		reg_addr = 0x180 | (reg_num & 0x1f);
283		break;
284	case ICP_SR_ABS:
285		reg_addr = 0x140 | ((reg_num & 0x3) << 1);
286		break;
287	case ICP_DR_RD_REL:
288	case ICP_DR_WR_REL:
289	case ICP_DR_REL:
290		reg_addr = 0x1c0 | (reg_num & 0x1f);
291		break;
292	case ICP_DR_ABS:
293		reg_addr = 0x100 | ((reg_num & 0x3) << 1);
294		break;
295	case ICP_NEIGH_REL:
296		reg_addr = 0x280 | (reg_num & 0x1f);
297		break;
298	case ICP_LMEM0:
299		reg_addr = 0x200;
300		break;
301	case ICP_LMEM1:
302		reg_addr = 0x220;
303		break;
304	case ICP_NO_DEST:
305		reg_addr = 0x300 | (reg_num & 0xff);
306		break;
307	default:
308		reg_addr = BAD_REGADDR;
309		break;
310	}
311	return reg_addr;
312}
313
314void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
315{
316	unsigned int ae_reset_csr;
317
318	ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
319	ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB;
320	ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB;
321	SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
322}
323
324static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
325				unsigned char ae, unsigned int ctx_mask,
326				unsigned int ae_csr, unsigned int csr_val)
327{
328	unsigned int ctx, cur_ctx;
329
330	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
331
332	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
333		if (!(ctx_mask & (1 << ctx)))
334			continue;
335		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
336		qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
337	}
338
339	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
340}
341
342static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
343				unsigned char ae, unsigned char ctx,
344				unsigned int ae_csr, unsigned int *csr_val)
345{
346	unsigned int cur_ctx;
347
348	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
349	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
350	qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
351	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
352}
353
354static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
355				  unsigned char ae, unsigned int ctx_mask,
356				  unsigned int events)
357{
358	unsigned int ctx, cur_ctx;
359
360	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
361	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
362		if (!(ctx_mask & (1 << ctx)))
363			continue;
364		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
365		qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
366	}
367	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
368}
369
370static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
371				     unsigned char ae, unsigned int ctx_mask,
372				     unsigned int events)
373{
374	unsigned int ctx, cur_ctx;
375
376	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
377	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
378		if (!(ctx_mask & (1 << ctx)))
379			continue;
380		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
381		qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
382				  events);
383	}
384	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
385}
386
387static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
388{
389	unsigned int base_cnt, cur_cnt;
390	unsigned char ae;
391	unsigned int times = MAX_RETRY_TIMES;
392
393	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
394		if (!(handle->hal_handle->ae_mask & (1 << ae)))
395			continue;
396
397		qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
398				  (unsigned int *)&base_cnt);
399		base_cnt &= 0xffff;
400
401		do {
402			qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
403					  (unsigned int *)&cur_cnt);
404			cur_cnt &= 0xffff;
405		} while (times-- && (cur_cnt == base_cnt));
406
407		if (!times) {
408			pr_err("QAT: AE%d is inactive!!\n", ae);
409			return -EFAULT;
410		}
411	}
412
413	return 0;
414}
415
416static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
417{
418	unsigned int misc_ctl;
419	unsigned char ae;
420
421	/* stop the timestamp timers */
422	misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL);
423	if (misc_ctl & MC_TIMESTAMP_ENABLE)
424		SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl &
425			    (~MC_TIMESTAMP_ENABLE));
426
427	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
428		if (!(handle->hal_handle->ae_mask & (1 << ae)))
429			continue;
430		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
431		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
432	}
433	/* start timestamp timers */
434	SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE);
435}
436
437#define ESRAM_AUTO_TINIT	BIT(2)
438#define ESRAM_AUTO_TINIT_DONE	BIT(3)
439#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
440#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
441static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
442{
443	void __iomem *csr_addr = handle->hal_ep_csr_addr_v +
444				 ESRAM_AUTO_INIT_CSR_OFFSET;
445	unsigned int csr_val, times = 30;
446
447	csr_val = ADF_CSR_RD(csr_addr, 0);
448	if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
449		return 0;
450
451	csr_val = ADF_CSR_RD(csr_addr, 0);
452	csr_val |= ESRAM_AUTO_TINIT;
453	ADF_CSR_WR(csr_addr, 0, csr_val);
454
455	do {
456		qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
457		csr_val = ADF_CSR_RD(csr_addr, 0);
458	} while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
459	if ((!times)) {
460		pr_err("QAT: Fail to init eSram!\n");
461		return -EFAULT;
462	}
463	return 0;
464}
465
466#define SHRAM_INIT_CYCLES 2060
467int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
468{
469	unsigned int ae_reset_csr;
470	unsigned char ae;
471	unsigned int clk_csr;
472	unsigned int times = 100;
473	unsigned int csr;
474
475	/* write to the reset csr */
476	ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
477	ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB);
478	ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB);
479	do {
480		SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
481		if (!(times--))
482			goto out_err;
483		csr = GET_GLB_CSR(handle, ICP_RESET);
484	} while ((handle->hal_handle->ae_mask |
485		 (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr);
486	/* enable clock */
487	clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE);
488	clk_csr |= handle->hal_handle->ae_mask << 0;
489	clk_csr |= handle->hal_handle->slice_mask << 20;
490	SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr);
491	if (qat_hal_check_ae_alive(handle))
492		goto out_err;
493
494	/* Set undefined power-up/reset states to reasonable default values */
495	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
496		if (!(handle->hal_handle->ae_mask & (1 << ae)))
497			continue;
498		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
499				  INIT_CTX_ENABLE_VALUE);
500		qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
501				    CTX_STS_INDIRECT,
502				    handle->hal_handle->upc_mask &
503				    INIT_PC_VALUE);
504		qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
505		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
506		qat_hal_put_wakeup_event(handle, ae,
507					 ICP_QAT_UCLO_AE_ALL_CTX,
508					 INIT_WAKEUP_EVENTS_VALUE);
509		qat_hal_put_sig_event(handle, ae,
510				      ICP_QAT_UCLO_AE_ALL_CTX,
511				      INIT_SIG_EVENTS_VALUE);
512	}
513	if (qat_hal_init_esram(handle))
514		goto out_err;
515	if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
516		goto out_err;
517	qat_hal_reset_timestamp(handle);
518
519	return 0;
520out_err:
521	pr_err("QAT: failed to get device out of reset\n");
522	return -EFAULT;
523}
524
525static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
526				unsigned char ae, unsigned int ctx_mask)
527{
528	unsigned int ctx;
529
530	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
531	ctx &= IGNORE_W1C_MASK &
532		(~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
533	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
534}
535
536static uint64_t qat_hal_parity_64bit(uint64_t word)
537{
538	word ^= word >> 1;
539	word ^= word >> 2;
540	word ^= word >> 4;
541	word ^= word >> 8;
542	word ^= word >> 16;
543	word ^= word >> 32;
544	return word & 1;
545}
546
547static uint64_t qat_hal_set_uword_ecc(uint64_t uword)
548{
549	uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
550		bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
551		bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
552		bit6_mask = 0xdaf69a46910ULL;
553
554	/* clear the ecc bits */
555	uword &= ~(0x7fULL << 0x2C);
556	uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
557	uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
558	uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
559	uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
560	uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
561	uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
562	uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
563	return uword;
564}
565
566void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
567		       unsigned char ae, unsigned int uaddr,
568		       unsigned int words_num, uint64_t *uword)
569{
570	unsigned int ustore_addr;
571	unsigned int i;
572
573	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
574	uaddr |= UA_ECS;
575	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
576	for (i = 0; i < words_num; i++) {
577		unsigned int uwrd_lo, uwrd_hi;
578		uint64_t tmp;
579
580		tmp = qat_hal_set_uword_ecc(uword[i]);
581		uwrd_lo = (unsigned int)(tmp & 0xffffffff);
582		uwrd_hi = (unsigned int)(tmp >> 0x20);
583		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
584		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
585	}
586	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
587}
588
589static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
590			       unsigned char ae, unsigned int ctx_mask)
591{
592	unsigned int ctx;
593
594	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
595	ctx &= IGNORE_W1C_MASK;
596	ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
597	ctx |= (ctx_mask << CE_ENABLE_BITPOS);
598	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
599}
600
601static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
602{
603	unsigned char ae;
604	unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
605	int times = MAX_RETRY_TIMES;
606	unsigned int csr_val = 0;
607	unsigned short reg;
608	unsigned int savctx = 0;
609	int ret = 0;
610
611	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
612		if (!(handle->hal_handle->ae_mask & (1 << ae)))
613			continue;
614		for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
615			qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
616					     reg, 0);
617			qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
618					     reg, 0);
619		}
620		qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
621		csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
622		qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
623		qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
624		csr_val &= IGNORE_W1C_MASK;
625		csr_val |= CE_NN_MODE;
626		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
627		qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
628				  (uint64_t *)inst);
629		qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
630				    handle->hal_handle->upc_mask &
631				    INIT_PC_VALUE);
632		qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
633		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
634		qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
635		qat_hal_wr_indr_csr(handle, ae, ctx_mask,
636				    CTX_SIG_EVENTS_INDIRECT, 0);
637		qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
638		qat_hal_enable_ctx(handle, ae, ctx_mask);
639	}
640	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
641		if (!(handle->hal_handle->ae_mask & (1 << ae)))
642			continue;
643		/* wait for AE to finish */
644		do {
645			ret = qat_hal_wait_cycles(handle, ae, 20, 1);
646		} while (ret && times--);
647
648		if (!times) {
649			pr_err("QAT: clear GPR of AE %d failed", ae);
650			return -EINVAL;
651		}
652		qat_hal_disable_ctx(handle, ae, ctx_mask);
653		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
654				  savctx & ACS_ACNO);
655		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
656				  INIT_CTX_ENABLE_VALUE);
657		qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
658				    handle->hal_handle->upc_mask &
659				    INIT_PC_VALUE);
660		qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
661		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
662		qat_hal_put_wakeup_event(handle, ae, ctx_mask,
663					 INIT_WAKEUP_EVENTS_VALUE);
664		qat_hal_put_sig_event(handle, ae, ctx_mask,
665				      INIT_SIG_EVENTS_VALUE);
666	}
667	return 0;
668}
669
670#define ICP_DH895XCC_AE_OFFSET      0x20000
671#define ICP_DH895XCC_CAP_OFFSET     (ICP_DH895XCC_AE_OFFSET + 0x10000)
672#define LOCAL_TO_XFER_REG_OFFSET    0x800
673#define ICP_DH895XCC_EP_OFFSET      0x3a000
674int qat_hal_init(struct adf_accel_dev *accel_dev)
675{
676	unsigned char ae;
677	unsigned int max_en_ae_id = 0;
678	struct icp_qat_fw_loader_handle *handle;
679	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
680	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
681	struct adf_bar *misc_bar =
682			&pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
683	struct adf_bar *sram_bar =
684			&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
685
686	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
687	if (!handle)
688		return -ENOMEM;
689
690	handle->hal_cap_g_ctl_csr_addr_v = misc_bar->virt_addr +
691						ICP_DH895XCC_CAP_OFFSET;
692	handle->hal_cap_ae_xfer_csr_addr_v = misc_bar->virt_addr +
693						ICP_DH895XCC_AE_OFFSET;
694	handle->hal_ep_csr_addr_v = misc_bar->virt_addr +
695				    ICP_DH895XCC_EP_OFFSET;
696	handle->hal_cap_ae_local_csr_addr_v =
697		handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET;
698	handle->hal_sram_addr_v = sram_bar->virt_addr;
699	handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
700	if (!handle->hal_handle)
701		goto out_hal_handle;
702	handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
703	handle->hal_handle->ae_mask = hw_data->ae_mask;
704	handle->hal_handle->slice_mask = hw_data->accel_mask;
705	/* create AE objects */
706	handle->hal_handle->upc_mask = 0x1ffff;
707	handle->hal_handle->max_ustore = 0x4000;
708	for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) {
709		if (!(hw_data->ae_mask & (1 << ae)))
710			continue;
711		handle->hal_handle->aes[ae].free_addr = 0;
712		handle->hal_handle->aes[ae].free_size =
713		    handle->hal_handle->max_ustore;
714		handle->hal_handle->aes[ae].ustore_size =
715		    handle->hal_handle->max_ustore;
716		handle->hal_handle->aes[ae].live_ctx_mask =
717						ICP_QAT_UCLO_AE_ALL_CTX;
718		max_en_ae_id = ae;
719	}
720	handle->hal_handle->ae_max_num = max_en_ae_id + 1;
721	/* take all AEs out of reset */
722	if (qat_hal_clr_reset(handle)) {
723		dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
724		goto out_err;
725	}
726	if (qat_hal_clear_gpr(handle))
727		goto out_err;
728	/* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
729	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
730		unsigned int csr_val = 0;
731
732		if (!(hw_data->ae_mask & (1 << ae)))
733			continue;
734		qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
735		csr_val |= 0x1;
736		qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
737	}
738	accel_dev->fw_loader->fw_loader = handle;
739	return 0;
740
741out_err:
742	kfree(handle->hal_handle);
743out_hal_handle:
744	kfree(handle);
745	return -EFAULT;
746}
747
748void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
749{
750	if (!handle)
751		return;
752	kfree(handle->hal_handle);
753	kfree(handle);
754}
755
756void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
757		   unsigned int ctx_mask)
758{
759	qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
760				 ICP_QAT_UCLO_AE_ALL_CTX, 0x10000);
761	qat_hal_enable_ctx(handle, ae, ctx_mask);
762}
763
764void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
765		  unsigned int ctx_mask)
766{
767	qat_hal_disable_ctx(handle, ae, ctx_mask);
768}
769
770void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
771		    unsigned char ae, unsigned int ctx_mask, unsigned int upc)
772{
773	qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
774			    handle->hal_handle->upc_mask & upc);
775}
776
777static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
778			       unsigned char ae, unsigned int uaddr,
779			       unsigned int words_num, uint64_t *uword)
780{
781	unsigned int i, uwrd_lo, uwrd_hi;
782	unsigned int ustore_addr, misc_control;
783
784	qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
785	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
786			  misc_control & 0xfffffffb);
787	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
788	uaddr |= UA_ECS;
789	for (i = 0; i < words_num; i++) {
790		qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
791		uaddr++;
792		qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
793		qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
794		uword[i] = uwrd_hi;
795		uword[i] = (uword[i] << 0x20) | uwrd_lo;
796	}
797	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
798	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
799}
800
801void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
802		     unsigned char ae, unsigned int uaddr,
803		     unsigned int words_num, unsigned int *data)
804{
805	unsigned int i, ustore_addr;
806
807	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
808	uaddr |= UA_ECS;
809	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
810	for (i = 0; i < words_num; i++) {
811		unsigned int uwrd_lo, uwrd_hi, tmp;
812
813		uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
814			  ((data[i] & 0xff00) << 2) |
815			  (0x3 << 8) | (data[i] & 0xff);
816		uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
817		uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
818		tmp = ((data[i] >> 0x10) & 0xffff);
819		uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
820		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
821		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
822	}
823	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
824}
825
826#define MAX_EXEC_INST 100
827static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
828				   unsigned char ae, unsigned char ctx,
829				   uint64_t *micro_inst, unsigned int inst_num,
830				   int code_off, unsigned int max_cycle,
831				   unsigned int *endpc)
832{
833	uint64_t savuwords[MAX_EXEC_INST];
834	unsigned int ind_lm_addr0, ind_lm_addr1;
835	unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
836	unsigned int ind_cnt_sig;
837	unsigned int ind_sig, act_sig;
838	unsigned int csr_val = 0, newcsr_val;
839	unsigned int savctx;
840	unsigned int savcc, wakeup_events, savpc;
841	unsigned int ctxarb_ctl, ctx_enables;
842
843	if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
844		pr_err("QAT: invalid instruction num %d\n", inst_num);
845		return -EINVAL;
846	}
847	/* save current context */
848	qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
849	qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
850	qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
851			    &ind_lm_addr_byte0);
852	qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
853			    &ind_lm_addr_byte1);
854	if (inst_num <= MAX_EXEC_INST)
855		qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
856	qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
857	qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
858	savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
859	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
860	ctx_enables &= IGNORE_W1C_MASK;
861	qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
862	qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
863	qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
864	qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
865			    &ind_cnt_sig);
866	qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
867	qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
868	/* execute micro codes */
869	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
870	qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
871	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
872	qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
873	if (code_off)
874		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
875	qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
876	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
877	qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
878	qat_hal_enable_ctx(handle, ae, (1 << ctx));
879	/* wait for micro codes to finish */
880	if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
881		return -EFAULT;
882	if (endpc) {
883		unsigned int ctx_status;
884
885		qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
886				    &ctx_status);
887		*endpc = ctx_status & handle->hal_handle->upc_mask;
888	}
889	/* retore to saved context */
890	qat_hal_disable_ctx(handle, ae, (1 << ctx));
891	if (inst_num <= MAX_EXEC_INST)
892		qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
893	qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
894	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
895			    handle->hal_handle->upc_mask & savpc);
896	qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
897	newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
898	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
899	qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
900	qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
901	qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
902	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
903			    LM_ADDR_0_INDIRECT, ind_lm_addr0);
904	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
905			    LM_ADDR_1_INDIRECT, ind_lm_addr1);
906	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
907			    INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
908	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
909			    INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
910	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
911			    FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
912	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
913			    CTX_SIG_EVENTS_INDIRECT, ind_sig);
914	qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
915	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
916
917	return 0;
918}
919
920static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
921			      unsigned char ae, unsigned char ctx,
922			      enum icp_qat_uof_regtype reg_type,
923			      unsigned short reg_num, unsigned int *data)
924{
925	unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
926	unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
927	unsigned short reg_addr;
928	int status = 0;
929	uint64_t insts, savuword;
930
931	reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
932	if (reg_addr == BAD_REGADDR) {
933		pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
934		return -EINVAL;
935	}
936	switch (reg_type) {
937	case ICP_GPA_REL:
938		insts = 0xA070000000ull | (reg_addr & 0x3ff);
939		break;
940	default:
941		insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
942		break;
943	}
944	qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
945	qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
946	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
947	ctx_enables &= IGNORE_W1C_MASK;
948	if (ctx != (savctx & ACS_ACNO))
949		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
950				  ctx & ACS_ACNO);
951	qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
952	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
953	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
954	uaddr = UA_ECS;
955	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
956	insts = qat_hal_set_uword_ecc(insts);
957	uwrd_lo = (unsigned int)(insts & 0xffffffff);
958	uwrd_hi = (unsigned int)(insts >> 0x20);
959	qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
960	qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
961	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
962	/* delay for at least 8 cycles */
963	qat_hal_wait_cycles(handle, ae, 0x8, 0);
964	/*
965	 * read ALU output
966	 * the instruction should have been executed
967	 * prior to clearing the ECS in putUwords
968	 */
969	qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
970	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
971	qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
972	if (ctx != (savctx & ACS_ACNO))
973		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
974				  savctx & ACS_ACNO);
975	qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
976	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
977
978	return status;
979}
980
981static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
982			      unsigned char ae, unsigned char ctx,
983			      enum icp_qat_uof_regtype reg_type,
984			      unsigned short reg_num, unsigned int data)
985{
986	unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
987	uint64_t insts[] = {
988		0x0F440000000ull,
989		0x0F040000000ull,
990		0x0F0000C0300ull,
991		0x0E000010000ull
992	};
993	const int num_inst = ARRAY_SIZE(insts), code_off = 1;
994	const int imm_w1 = 0, imm_w0 = 1;
995
996	dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
997	if (dest_addr == BAD_REGADDR) {
998		pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
999		return -EINVAL;
1000	}
1001
1002	data16lo = 0xffff & data;
1003	data16hi = 0xffff & (data >> 0x10);
1004	src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
1005					  (0xff & data16hi));
1006	src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
1007					   (0xff & data16lo));
1008	switch (reg_type) {
1009	case ICP_GPA_REL:
1010		insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1011		    ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1012		insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1013		    ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1014		break;
1015	default:
1016		insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1017		    ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1018
1019		insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1020		    ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1021		break;
1022	}
1023
1024	return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
1025				       code_off, num_inst * 0x5, NULL);
1026}
1027
1028int qat_hal_get_ins_num(void)
1029{
1030	return ARRAY_SIZE(inst_4b);
1031}
1032
1033static int qat_hal_concat_micro_code(uint64_t *micro_inst,
1034				     unsigned int inst_num, unsigned int size,
1035				     unsigned int addr, unsigned int *value)
1036{
1037	int i;
1038	unsigned int cur_value;
1039	const uint64_t *inst_arr;
1040	int fixup_offset;
1041	int usize = 0;
1042	int orig_num;
1043
1044	orig_num = inst_num;
1045	cur_value = value[0];
1046	inst_arr = inst_4b;
1047	usize = ARRAY_SIZE(inst_4b);
1048	fixup_offset = inst_num;
1049	for (i = 0; i < usize; i++)
1050		micro_inst[inst_num++] = inst_arr[i];
1051	INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
1052	fixup_offset++;
1053	INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
1054	fixup_offset++;
1055	INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
1056	fixup_offset++;
1057	INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
1058
1059	return inst_num - orig_num;
1060}
1061
1062static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
1063				      unsigned char ae, unsigned char ctx,
1064				      int *pfirst_exec, uint64_t *micro_inst,
1065				      unsigned int inst_num)
1066{
1067	int stat = 0;
1068	unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
1069	unsigned int gprb0 = 0, gprb1 = 0;
1070
1071	if (*pfirst_exec) {
1072		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
1073		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
1074		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
1075		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
1076		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
1077		*pfirst_exec = 0;
1078	}
1079	stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
1080				       inst_num * 0x5, NULL);
1081	if (stat != 0)
1082		return -EFAULT;
1083	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
1084	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
1085	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
1086	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
1087	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
1088
1089	return 0;
1090}
1091
1092int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
1093			unsigned char ae,
1094			struct icp_qat_uof_batch_init *lm_init_header)
1095{
1096	struct icp_qat_uof_batch_init *plm_init;
1097	uint64_t *micro_inst_arry;
1098	int micro_inst_num;
1099	int alloc_inst_size;
1100	int first_exec = 1;
1101	int stat = 0;
1102
1103	plm_init = lm_init_header->next;
1104	alloc_inst_size = lm_init_header->size;
1105	if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
1106		alloc_inst_size = handle->hal_handle->max_ustore;
1107	micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t),
1108					GFP_KERNEL);
1109	if (!micro_inst_arry)
1110		return -ENOMEM;
1111	micro_inst_num = 0;
1112	while (plm_init) {
1113		unsigned int addr, *value, size;
1114
1115		ae = plm_init->ae;
1116		addr = plm_init->addr;
1117		value = plm_init->value;
1118		size = plm_init->size;
1119		micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
1120							    micro_inst_num,
1121							    size, addr, value);
1122		plm_init = plm_init->next;
1123	}
1124	/* exec micro codes */
1125	if (micro_inst_arry && (micro_inst_num > 0)) {
1126		micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
1127		stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
1128						  micro_inst_arry,
1129						  micro_inst_num);
1130	}
1131	kfree(micro_inst_arry);
1132	return stat;
1133}
1134
1135static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1136				   unsigned char ae, unsigned char ctx,
1137				   enum icp_qat_uof_regtype reg_type,
1138				   unsigned short reg_num, unsigned int val)
1139{
1140	int status = 0;
1141	unsigned int reg_addr;
1142	unsigned int ctx_enables;
1143	unsigned short mask;
1144	unsigned short dr_offset = 0x10;
1145
1146	status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1147	if (CE_INUSE_CONTEXTS & ctx_enables) {
1148		if (ctx & 0x1) {
1149			pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
1150			return -EINVAL;
1151		}
1152		mask = 0x1f;
1153		dr_offset = 0x20;
1154	} else {
1155		mask = 0x0f;
1156	}
1157	if (reg_num & ~mask)
1158		return -EINVAL;
1159	reg_addr = reg_num + (ctx << 0x5);
1160	switch (reg_type) {
1161	case ICP_SR_RD_REL:
1162	case ICP_SR_REL:
1163		SET_AE_XFER(handle, ae, reg_addr, val);
1164		break;
1165	case ICP_DR_RD_REL:
1166	case ICP_DR_REL:
1167		SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
1168		break;
1169	default:
1170		status = -EINVAL;
1171		break;
1172	}
1173	return status;
1174}
1175
1176static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1177				   unsigned char ae, unsigned char ctx,
1178				   enum icp_qat_uof_regtype reg_type,
1179				   unsigned short reg_num, unsigned int data)
1180{
1181	unsigned int gprval, ctx_enables;
1182	unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
1183	    data16low;
1184	unsigned short reg_mask;
1185	int status = 0;
1186	uint64_t micro_inst[] = {
1187		0x0F440000000ull,
1188		0x0F040000000ull,
1189		0x0A000000000ull,
1190		0x0F0000C0300ull,
1191		0x0E000010000ull
1192	};
1193	const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
1194	const unsigned short gprnum = 0, dly = num_inst * 0x5;
1195
1196	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1197	if (CE_INUSE_CONTEXTS & ctx_enables) {
1198		if (ctx & 0x1) {
1199			pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
1200			return -EINVAL;
1201		}
1202		reg_mask = (unsigned short)~0x1f;
1203	} else {
1204		reg_mask = (unsigned short)~0xf;
1205	}
1206	if (reg_num & reg_mask)
1207		return -EINVAL;
1208	xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1209	if (xfr_addr == BAD_REGADDR) {
1210		pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
1211		return -EINVAL;
1212	}
1213	qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
1214	gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
1215	data16low = 0xffff & data;
1216	data16hi = 0xffff & (data >> 0x10);
1217	src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1218					  (unsigned short)(0xff & data16hi));
1219	src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1220					   (unsigned short)(0xff & data16low));
1221	micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
1222	    ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1223	micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
1224	    ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1225	micro_inst[0x2] = micro_inst[0x2] |
1226	    ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
1227	status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
1228					 code_off, dly, NULL);
1229	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
1230	return status;
1231}
1232
1233static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
1234			      unsigned char ae, unsigned char ctx,
1235			      unsigned short nn, unsigned int val)
1236{
1237	unsigned int ctx_enables;
1238	int stat = 0;
1239
1240	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1241	ctx_enables &= IGNORE_W1C_MASK;
1242	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
1243
1244	stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
1245	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1246	return stat;
1247}
1248
1249static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
1250				      *handle, unsigned char ae,
1251				      unsigned short absreg_num,
1252				      unsigned short *relreg,
1253				      unsigned char *ctx)
1254{
1255	unsigned int ctx_enables;
1256
1257	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1258	if (ctx_enables & CE_INUSE_CONTEXTS) {
1259		/* 4-ctx mode */
1260		*relreg = absreg_num & 0x1F;
1261		*ctx = (absreg_num >> 0x4) & 0x6;
1262	} else {
1263		/* 8-ctx mode */
1264		*relreg = absreg_num & 0x0F;
1265		*ctx = (absreg_num >> 0x4) & 0x7;
1266	}
1267	return 0;
1268}
1269
1270int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
1271		     unsigned char ae, unsigned char ctx_mask,
1272		     enum icp_qat_uof_regtype reg_type,
1273		     unsigned short reg_num, unsigned int regdata)
1274{
1275	int stat = 0;
1276	unsigned short reg;
1277	unsigned char ctx = 0;
1278	enum icp_qat_uof_regtype type;
1279
1280	if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
1281		return -EINVAL;
1282
1283	do {
1284		if (ctx_mask == 0) {
1285			qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
1286						   &ctx);
1287			type = reg_type - 1;
1288		} else {
1289			reg = reg_num;
1290			type = reg_type;
1291			if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1292				continue;
1293		}
1294		stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
1295		if (stat) {
1296			pr_err("QAT: write gpr fail\n");
1297			return -EINVAL;
1298		}
1299	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1300
1301	return 0;
1302}
1303
1304int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1305			 unsigned char ae, unsigned char ctx_mask,
1306			 enum icp_qat_uof_regtype reg_type,
1307			 unsigned short reg_num, unsigned int regdata)
1308{
1309	int stat = 0;
1310	unsigned short reg;
1311	unsigned char ctx = 0;
1312	enum icp_qat_uof_regtype type;
1313
1314	if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1315		return -EINVAL;
1316
1317	do {
1318		if (ctx_mask == 0) {
1319			qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
1320						   &ctx);
1321			type = reg_type - 3;
1322		} else {
1323			reg = reg_num;
1324			type = reg_type;
1325			if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1326				continue;
1327		}
1328		stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
1329					       regdata);
1330		if (stat) {
1331			pr_err("QAT: write wr xfer fail\n");
1332			return -EINVAL;
1333		}
1334	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1335
1336	return 0;
1337}
1338
1339int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1340			 unsigned char ae, unsigned char ctx_mask,
1341			 enum icp_qat_uof_regtype reg_type,
1342			 unsigned short reg_num, unsigned int regdata)
1343{
1344	int stat = 0;
1345	unsigned short reg;
1346	unsigned char ctx = 0;
1347	enum icp_qat_uof_regtype type;
1348
1349	if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1350		return -EINVAL;
1351
1352	do {
1353		if (ctx_mask == 0) {
1354			qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
1355						   &ctx);
1356			type = reg_type - 3;
1357		} else {
1358			reg = reg_num;
1359			type = reg_type;
1360			if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1361				continue;
1362		}
1363		stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
1364					       regdata);
1365		if (stat) {
1366			pr_err("QAT: write rd xfer fail\n");
1367			return -EINVAL;
1368		}
1369	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1370
1371	return 0;
1372}
1373
1374int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
1375		    unsigned char ae, unsigned char ctx_mask,
1376		    unsigned short reg_num, unsigned int regdata)
1377{
1378	int stat = 0;
1379	unsigned char ctx;
1380
1381	if (ctx_mask == 0)
1382		return -EINVAL;
1383
1384	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
1385		if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1386			continue;
1387		stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
1388		if (stat) {
1389			pr_err("QAT: write neigh error\n");
1390			return -EINVAL;
1391		}
1392	}
1393
1394	return 0;
1395}
1396