1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4 
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10 
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15 
16   Contact Information:
17   qat-linux@intel.com
18 
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24 
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34 
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
65 
66 #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
67 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 				       ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 				       ICP_QAT_HW_CIPHER_ENCRYPT)
70 
71 #define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
72 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 				       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 				       ICP_QAT_HW_CIPHER_DECRYPT)
75 
76 static DEFINE_MUTEX(algs_lock);
77 static unsigned int active_devs;
78 
79 struct qat_alg_buf {
80 	uint32_t len;
81 	uint32_t resrvd;
82 	uint64_t addr;
83 } __packed;
84 
85 struct qat_alg_buf_list {
86 	uint64_t resrvd;
87 	uint32_t num_bufs;
88 	uint32_t num_mapped_bufs;
89 	struct qat_alg_buf bufers[];
90 } __packed __aligned(64);
91 
92 /* Common content descriptor */
93 struct qat_alg_cd {
94 	union {
95 		struct qat_enc { /* Encrypt content desc */
96 			struct icp_qat_hw_cipher_algo_blk cipher;
97 			struct icp_qat_hw_auth_algo_blk hash;
98 		} qat_enc_cd;
99 		struct qat_dec { /* Decrytp content desc */
100 			struct icp_qat_hw_auth_algo_blk hash;
101 			struct icp_qat_hw_cipher_algo_blk cipher;
102 		} qat_dec_cd;
103 	};
104 } __aligned(64);
105 
106 struct qat_alg_aead_ctx {
107 	struct qat_alg_cd *enc_cd;
108 	struct qat_alg_cd *dec_cd;
109 	dma_addr_t enc_cd_paddr;
110 	dma_addr_t dec_cd_paddr;
111 	struct icp_qat_fw_la_bulk_req enc_fw_req;
112 	struct icp_qat_fw_la_bulk_req dec_fw_req;
113 	struct crypto_shash *hash_tfm;
114 	enum icp_qat_hw_auth_algo qat_hash_alg;
115 	struct qat_crypto_instance *inst;
116 	struct crypto_tfm *tfm;
117 	uint8_t salt[AES_BLOCK_SIZE];
118 	spinlock_t lock;	/* protects qat_alg_aead_ctx struct */
119 };
120 
121 struct qat_alg_ablkcipher_ctx {
122 	struct icp_qat_hw_cipher_algo_blk *enc_cd;
123 	struct icp_qat_hw_cipher_algo_blk *dec_cd;
124 	dma_addr_t enc_cd_paddr;
125 	dma_addr_t dec_cd_paddr;
126 	struct icp_qat_fw_la_bulk_req enc_fw_req;
127 	struct icp_qat_fw_la_bulk_req dec_fw_req;
128 	struct qat_crypto_instance *inst;
129 	struct crypto_tfm *tfm;
130 	spinlock_t lock;	/* protects qat_alg_ablkcipher_ctx struct */
131 };
132 
get_current_node(void)133 static int get_current_node(void)
134 {
135 	return cpu_data(current_thread_info()->cpu).phys_proc_id;
136 }
137 
qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)138 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
139 {
140 	switch (qat_hash_alg) {
141 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
142 		return ICP_QAT_HW_SHA1_STATE1_SZ;
143 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
144 		return ICP_QAT_HW_SHA256_STATE1_SZ;
145 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
146 		return ICP_QAT_HW_SHA512_STATE1_SZ;
147 	default:
148 		return -EFAULT;
149 	};
150 	return -EFAULT;
151 }
152 
qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk * hash,struct qat_alg_aead_ctx * ctx,const uint8_t * auth_key,unsigned int auth_keylen)153 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
154 				  struct qat_alg_aead_ctx *ctx,
155 				  const uint8_t *auth_key,
156 				  unsigned int auth_keylen)
157 {
158 	SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
159 	struct sha1_state sha1;
160 	struct sha256_state sha256;
161 	struct sha512_state sha512;
162 	int block_size = crypto_shash_blocksize(ctx->hash_tfm);
163 	int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
164 	char ipad[block_size];
165 	char opad[block_size];
166 	__be32 *hash_state_out;
167 	__be64 *hash512_state_out;
168 	int i, offset;
169 
170 	memset(ipad, 0, block_size);
171 	memset(opad, 0, block_size);
172 	shash->tfm = ctx->hash_tfm;
173 	shash->flags = 0x0;
174 
175 	if (auth_keylen > block_size) {
176 		int ret = crypto_shash_digest(shash, auth_key,
177 					      auth_keylen, ipad);
178 		if (ret)
179 			return ret;
180 
181 		memcpy(opad, ipad, digest_size);
182 	} else {
183 		memcpy(ipad, auth_key, auth_keylen);
184 		memcpy(opad, auth_key, auth_keylen);
185 	}
186 
187 	for (i = 0; i < block_size; i++) {
188 		char *ipad_ptr = ipad + i;
189 		char *opad_ptr = opad + i;
190 		*ipad_ptr ^= 0x36;
191 		*opad_ptr ^= 0x5C;
192 	}
193 
194 	if (crypto_shash_init(shash))
195 		return -EFAULT;
196 
197 	if (crypto_shash_update(shash, ipad, block_size))
198 		return -EFAULT;
199 
200 	hash_state_out = (__be32 *)hash->sha.state1;
201 	hash512_state_out = (__be64 *)hash_state_out;
202 
203 	switch (ctx->qat_hash_alg) {
204 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
205 		if (crypto_shash_export(shash, &sha1))
206 			return -EFAULT;
207 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
208 			*hash_state_out = cpu_to_be32(*(sha1.state + i));
209 		break;
210 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
211 		if (crypto_shash_export(shash, &sha256))
212 			return -EFAULT;
213 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
214 			*hash_state_out = cpu_to_be32(*(sha256.state + i));
215 		break;
216 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
217 		if (crypto_shash_export(shash, &sha512))
218 			return -EFAULT;
219 		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
220 			*hash512_state_out = cpu_to_be64(*(sha512.state + i));
221 		break;
222 	default:
223 		return -EFAULT;
224 	}
225 
226 	if (crypto_shash_init(shash))
227 		return -EFAULT;
228 
229 	if (crypto_shash_update(shash, opad, block_size))
230 		return -EFAULT;
231 
232 	offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
233 	hash_state_out = (__be32 *)(hash->sha.state1 + offset);
234 	hash512_state_out = (__be64 *)hash_state_out;
235 
236 	switch (ctx->qat_hash_alg) {
237 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
238 		if (crypto_shash_export(shash, &sha1))
239 			return -EFAULT;
240 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
241 			*hash_state_out = cpu_to_be32(*(sha1.state + i));
242 		break;
243 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
244 		if (crypto_shash_export(shash, &sha256))
245 			return -EFAULT;
246 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
247 			*hash_state_out = cpu_to_be32(*(sha256.state + i));
248 		break;
249 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
250 		if (crypto_shash_export(shash, &sha512))
251 			return -EFAULT;
252 		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
253 			*hash512_state_out = cpu_to_be64(*(sha512.state + i));
254 		break;
255 	default:
256 		return -EFAULT;
257 	}
258 	memzero_explicit(ipad, block_size);
259 	memzero_explicit(opad, block_size);
260 	return 0;
261 }
262 
qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr * header)263 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
264 {
265 	header->hdr_flags =
266 		ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
267 	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
268 	header->comn_req_flags =
269 		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
270 					    QAT_COMN_PTR_TYPE_SGL);
271 	ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
272 				  ICP_QAT_FW_LA_PARTIAL_NONE);
273 	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
274 					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
275 	ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
276 				ICP_QAT_FW_LA_NO_PROTO);
277 	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
278 				       ICP_QAT_FW_LA_NO_UPDATE_STATE);
279 }
280 
qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx * ctx,int alg,struct crypto_authenc_keys * keys)281 static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
282 					 int alg,
283 					 struct crypto_authenc_keys *keys)
284 {
285 	struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
286 	unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
287 	struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
288 	struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
289 	struct icp_qat_hw_auth_algo_blk *hash =
290 		(struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
291 		sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
292 	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
293 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
294 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
295 	void *ptr = &req_tmpl->cd_ctrl;
296 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
297 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
298 
299 	/* CD setup */
300 	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
301 	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
302 	hash->sha.inner_setup.auth_config.config =
303 		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
304 					     ctx->qat_hash_alg, digestsize);
305 	hash->sha.inner_setup.auth_counter.counter =
306 		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
307 
308 	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
309 		return -EFAULT;
310 
311 	/* Request setup */
312 	qat_alg_init_common_hdr(header);
313 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
314 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
315 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
316 	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
317 				   ICP_QAT_FW_LA_RET_AUTH_RES);
318 	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
319 				   ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
320 	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
321 	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
322 
323 	/* Cipher CD config setup */
324 	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
325 	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
326 	cipher_cd_ctrl->cipher_cfg_offset = 0;
327 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
328 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
329 	/* Auth CD config setup */
330 	hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
331 	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
332 	hash_cd_ctrl->inner_res_sz = digestsize;
333 	hash_cd_ctrl->final_sz = digestsize;
334 
335 	switch (ctx->qat_hash_alg) {
336 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
337 		hash_cd_ctrl->inner_state1_sz =
338 			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
339 		hash_cd_ctrl->inner_state2_sz =
340 			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
341 		break;
342 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
343 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
344 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
345 		break;
346 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
347 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
348 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
349 		break;
350 	default:
351 		break;
352 	}
353 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
354 			((sizeof(struct icp_qat_hw_auth_setup) +
355 			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
356 	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
357 	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
358 	return 0;
359 }
360 
qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx * ctx,int alg,struct crypto_authenc_keys * keys)361 static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
362 					 int alg,
363 					 struct crypto_authenc_keys *keys)
364 {
365 	struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
366 	unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
367 	struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
368 	struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
369 	struct icp_qat_hw_cipher_algo_blk *cipher =
370 		(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
371 		sizeof(struct icp_qat_hw_auth_setup) +
372 		roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
373 	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
374 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
375 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
376 	void *ptr = &req_tmpl->cd_ctrl;
377 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
378 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
379 	struct icp_qat_fw_la_auth_req_params *auth_param =
380 		(struct icp_qat_fw_la_auth_req_params *)
381 		((char *)&req_tmpl->serv_specif_rqpars +
382 		sizeof(struct icp_qat_fw_la_cipher_req_params));
383 
384 	/* CD setup */
385 	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
386 	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
387 	hash->sha.inner_setup.auth_config.config =
388 		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
389 					     ctx->qat_hash_alg,
390 					     digestsize);
391 	hash->sha.inner_setup.auth_counter.counter =
392 		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
393 
394 	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
395 		return -EFAULT;
396 
397 	/* Request setup */
398 	qat_alg_init_common_hdr(header);
399 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
400 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
401 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
402 	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
403 				   ICP_QAT_FW_LA_NO_RET_AUTH_RES);
404 	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
405 				   ICP_QAT_FW_LA_CMP_AUTH_RES);
406 	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
407 	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
408 
409 	/* Cipher CD config setup */
410 	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
411 	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
412 	cipher_cd_ctrl->cipher_cfg_offset =
413 		(sizeof(struct icp_qat_hw_auth_setup) +
414 		 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
415 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
416 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
417 
418 	/* Auth CD config setup */
419 	hash_cd_ctrl->hash_cfg_offset = 0;
420 	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
421 	hash_cd_ctrl->inner_res_sz = digestsize;
422 	hash_cd_ctrl->final_sz = digestsize;
423 
424 	switch (ctx->qat_hash_alg) {
425 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
426 		hash_cd_ctrl->inner_state1_sz =
427 			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
428 		hash_cd_ctrl->inner_state2_sz =
429 			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
430 		break;
431 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
432 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
433 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
434 		break;
435 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
436 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
437 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
438 		break;
439 	default:
440 		break;
441 	}
442 
443 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
444 			((sizeof(struct icp_qat_hw_auth_setup) +
445 			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
446 	auth_param->auth_res_sz = digestsize;
447 	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
448 	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
449 	return 0;
450 }
451 
qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx * ctx,struct icp_qat_fw_la_bulk_req * req,struct icp_qat_hw_cipher_algo_blk * cd,const uint8_t * key,unsigned int keylen)452 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
453 					struct icp_qat_fw_la_bulk_req *req,
454 					struct icp_qat_hw_cipher_algo_blk *cd,
455 					const uint8_t *key, unsigned int keylen)
456 {
457 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
458 	struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
459 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
460 
461 	memcpy(cd->aes.key, key, keylen);
462 	qat_alg_init_common_hdr(header);
463 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
464 	cd_pars->u.s.content_desc_params_sz =
465 				sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
466 	/* Cipher CD config setup */
467 	cd_ctrl->cipher_key_sz = keylen >> 3;
468 	cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
469 	cd_ctrl->cipher_cfg_offset = 0;
470 	ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
471 	ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
472 }
473 
qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx * ctx,int alg,const uint8_t * key,unsigned int keylen)474 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
475 					int alg, const uint8_t *key,
476 					unsigned int keylen)
477 {
478 	struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
479 	struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
480 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
481 
482 	qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
483 	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
484 	enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
485 }
486 
qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx * ctx,int alg,const uint8_t * key,unsigned int keylen)487 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
488 					int alg, const uint8_t *key,
489 					unsigned int keylen)
490 {
491 	struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
492 	struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
493 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
494 
495 	qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
496 	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
497 	dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
498 }
499 
qat_alg_validate_key(int key_len,int * alg)500 static int qat_alg_validate_key(int key_len, int *alg)
501 {
502 	switch (key_len) {
503 	case AES_KEYSIZE_128:
504 		*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
505 		break;
506 	case AES_KEYSIZE_192:
507 		*alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
508 		break;
509 	case AES_KEYSIZE_256:
510 		*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
511 		break;
512 	default:
513 		return -EINVAL;
514 	}
515 	return 0;
516 }
517 
qat_alg_aead_init_sessions(struct qat_alg_aead_ctx * ctx,const uint8_t * key,unsigned int keylen)518 static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
519 				      const uint8_t *key, unsigned int keylen)
520 {
521 	struct crypto_authenc_keys keys;
522 	int alg;
523 
524 	if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
525 		return -EFAULT;
526 
527 	if (crypto_authenc_extractkeys(&keys, key, keylen))
528 		goto bad_key;
529 
530 	if (qat_alg_validate_key(keys.enckeylen, &alg))
531 		goto bad_key;
532 
533 	if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
534 		goto error;
535 
536 	if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
537 		goto error;
538 
539 	return 0;
540 bad_key:
541 	crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
542 	return -EINVAL;
543 error:
544 	return -EFAULT;
545 }
546 
qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx * ctx,const uint8_t * key,unsigned int keylen)547 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
548 					    const uint8_t *key,
549 					    unsigned int keylen)
550 {
551 	int alg;
552 
553 	if (qat_alg_validate_key(keylen, &alg))
554 		goto bad_key;
555 
556 	qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen);
557 	qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen);
558 	return 0;
559 bad_key:
560 	crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
561 	return -EINVAL;
562 }
563 
qat_alg_aead_setkey(struct crypto_aead * tfm,const uint8_t * key,unsigned int keylen)564 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
565 			       unsigned int keylen)
566 {
567 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
568 	struct device *dev;
569 
570 	spin_lock(&ctx->lock);
571 	if (ctx->enc_cd) {
572 		/* rekeying */
573 		dev = &GET_DEV(ctx->inst->accel_dev);
574 		memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
575 		memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
576 		memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
577 		memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
578 	} else {
579 		/* new key */
580 		int node = get_current_node();
581 		struct qat_crypto_instance *inst =
582 				qat_crypto_get_instance_node(node);
583 		if (!inst) {
584 			spin_unlock(&ctx->lock);
585 			return -EINVAL;
586 		}
587 
588 		dev = &GET_DEV(inst->accel_dev);
589 		ctx->inst = inst;
590 		ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
591 						  &ctx->enc_cd_paddr,
592 						  GFP_ATOMIC);
593 		if (!ctx->enc_cd) {
594 			spin_unlock(&ctx->lock);
595 			return -ENOMEM;
596 		}
597 		ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
598 						  &ctx->dec_cd_paddr,
599 						  GFP_ATOMIC);
600 		if (!ctx->dec_cd) {
601 			spin_unlock(&ctx->lock);
602 			goto out_free_enc;
603 		}
604 	}
605 	spin_unlock(&ctx->lock);
606 	if (qat_alg_aead_init_sessions(ctx, key, keylen))
607 		goto out_free_all;
608 
609 	return 0;
610 
611 out_free_all:
612 	memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
613 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
614 			  ctx->dec_cd, ctx->dec_cd_paddr);
615 	ctx->dec_cd = NULL;
616 out_free_enc:
617 	memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
618 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
619 			  ctx->enc_cd, ctx->enc_cd_paddr);
620 	ctx->enc_cd = NULL;
621 	return -ENOMEM;
622 }
623 
qat_alg_free_bufl(struct qat_crypto_instance * inst,struct qat_crypto_request * qat_req)624 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
625 			      struct qat_crypto_request *qat_req)
626 {
627 	struct device *dev = &GET_DEV(inst->accel_dev);
628 	struct qat_alg_buf_list *bl = qat_req->buf.bl;
629 	struct qat_alg_buf_list *blout = qat_req->buf.blout;
630 	dma_addr_t blp = qat_req->buf.blp;
631 	dma_addr_t blpout = qat_req->buf.bloutp;
632 	size_t sz = qat_req->buf.sz;
633 	size_t sz_out = qat_req->buf.sz_out;
634 	int i;
635 
636 	for (i = 0; i < bl->num_bufs; i++)
637 		dma_unmap_single(dev, bl->bufers[i].addr,
638 				 bl->bufers[i].len, DMA_BIDIRECTIONAL);
639 
640 	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
641 	kfree(bl);
642 	if (blp != blpout) {
643 		/* If out of place operation dma unmap only data */
644 		int bufless = blout->num_bufs - blout->num_mapped_bufs;
645 
646 		for (i = bufless; i < blout->num_bufs; i++) {
647 			dma_unmap_single(dev, blout->bufers[i].addr,
648 					 blout->bufers[i].len,
649 					 DMA_BIDIRECTIONAL);
650 		}
651 		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
652 		kfree(blout);
653 	}
654 }
655 
qat_alg_sgl_to_bufl(struct qat_crypto_instance * inst,struct scatterlist * assoc,struct scatterlist * sgl,struct scatterlist * sglout,uint8_t * iv,uint8_t ivlen,struct qat_crypto_request * qat_req)656 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
657 			       struct scatterlist *assoc,
658 			       struct scatterlist *sgl,
659 			       struct scatterlist *sglout, uint8_t *iv,
660 			       uint8_t ivlen,
661 			       struct qat_crypto_request *qat_req)
662 {
663 	struct device *dev = &GET_DEV(inst->accel_dev);
664 	int i, bufs = 0, sg_nctr = 0;
665 	int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
666 	struct qat_alg_buf_list *bufl;
667 	struct qat_alg_buf_list *buflout = NULL;
668 	dma_addr_t blp;
669 	dma_addr_t bloutp = 0;
670 	struct scatterlist *sg;
671 	size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
672 			((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
673 
674 	if (unlikely(!n))
675 		return -EINVAL;
676 
677 	bufl = kzalloc_node(sz, GFP_ATOMIC,
678 			    dev_to_node(&GET_DEV(inst->accel_dev)));
679 	if (unlikely(!bufl))
680 		return -ENOMEM;
681 
682 	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
683 	if (unlikely(dma_mapping_error(dev, blp)))
684 		goto err;
685 
686 	for_each_sg(assoc, sg, assoc_n, i) {
687 		if (!sg->length)
688 			continue;
689 		bufl->bufers[bufs].addr = dma_map_single(dev,
690 							 sg_virt(sg),
691 							 sg->length,
692 							 DMA_BIDIRECTIONAL);
693 		bufl->bufers[bufs].len = sg->length;
694 		if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
695 			goto err;
696 		bufs++;
697 	}
698 	if (ivlen) {
699 		bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
700 							 DMA_BIDIRECTIONAL);
701 		bufl->bufers[bufs].len = ivlen;
702 		if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
703 			goto err;
704 		bufs++;
705 	}
706 
707 	for_each_sg(sgl, sg, n, i) {
708 		int y = sg_nctr + bufs;
709 
710 		if (!sg->length)
711 			continue;
712 
713 		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
714 						      sg->length,
715 						      DMA_BIDIRECTIONAL);
716 		bufl->bufers[y].len = sg->length;
717 		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
718 			goto err;
719 		sg_nctr++;
720 	}
721 	bufl->num_bufs = sg_nctr + bufs;
722 	qat_req->buf.bl = bufl;
723 	qat_req->buf.blp = blp;
724 	qat_req->buf.sz = sz;
725 	/* Handle out of place operation */
726 	if (sgl != sglout) {
727 		struct qat_alg_buf *bufers;
728 
729 		n = sg_nents(sglout);
730 		sz_out = sizeof(struct qat_alg_buf_list) +
731 			((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
732 		sg_nctr = 0;
733 		buflout = kzalloc_node(sz_out, GFP_ATOMIC,
734 				       dev_to_node(&GET_DEV(inst->accel_dev)));
735 		if (unlikely(!buflout))
736 			goto err;
737 		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
738 		if (unlikely(dma_mapping_error(dev, bloutp)))
739 			goto err;
740 		bufers = buflout->bufers;
741 		/* For out of place operation dma map only data and
742 		 * reuse assoc mapping and iv */
743 		for (i = 0; i < bufs; i++) {
744 			bufers[i].len = bufl->bufers[i].len;
745 			bufers[i].addr = bufl->bufers[i].addr;
746 		}
747 		for_each_sg(sglout, sg, n, i) {
748 			int y = sg_nctr + bufs;
749 
750 			if (!sg->length)
751 				continue;
752 
753 			bufers[y].addr = dma_map_single(dev, sg_virt(sg),
754 							sg->length,
755 							DMA_BIDIRECTIONAL);
756 			if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
757 				goto err;
758 			bufers[y].len = sg->length;
759 			sg_nctr++;
760 		}
761 		buflout->num_bufs = sg_nctr + bufs;
762 		buflout->num_mapped_bufs = sg_nctr;
763 		qat_req->buf.blout = buflout;
764 		qat_req->buf.bloutp = bloutp;
765 		qat_req->buf.sz_out = sz_out;
766 	} else {
767 		/* Otherwise set the src and dst to the same address */
768 		qat_req->buf.bloutp = qat_req->buf.blp;
769 		qat_req->buf.sz_out = 0;
770 	}
771 	return 0;
772 err:
773 	dev_err(dev, "Failed to map buf for dma\n");
774 	sg_nctr = 0;
775 	for (i = 0; i < n + bufs; i++)
776 		if (!dma_mapping_error(dev, bufl->bufers[i].addr))
777 			dma_unmap_single(dev, bufl->bufers[i].addr,
778 					 bufl->bufers[i].len,
779 					 DMA_BIDIRECTIONAL);
780 
781 	if (!dma_mapping_error(dev, blp))
782 		dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
783 	kfree(bufl);
784 	if (sgl != sglout && buflout) {
785 		n = sg_nents(sglout);
786 		for (i = bufs; i < n + bufs; i++)
787 			if (!dma_mapping_error(dev, buflout->bufers[i].addr))
788 				dma_unmap_single(dev, buflout->bufers[i].addr,
789 						 buflout->bufers[i].len,
790 						 DMA_BIDIRECTIONAL);
791 		if (!dma_mapping_error(dev, bloutp))
792 			dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
793 		kfree(buflout);
794 	}
795 	return -ENOMEM;
796 }
797 
qat_aead_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)798 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
799 				  struct qat_crypto_request *qat_req)
800 {
801 	struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
802 	struct qat_crypto_instance *inst = ctx->inst;
803 	struct aead_request *areq = qat_req->aead_req;
804 	uint8_t stat_filed = qat_resp->comn_resp.comn_status;
805 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
806 
807 	qat_alg_free_bufl(inst, qat_req);
808 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
809 		res = -EBADMSG;
810 	areq->base.complete(&areq->base, res);
811 }
812 
qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)813 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
814 					struct qat_crypto_request *qat_req)
815 {
816 	struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
817 	struct qat_crypto_instance *inst = ctx->inst;
818 	struct ablkcipher_request *areq = qat_req->ablkcipher_req;
819 	uint8_t stat_filed = qat_resp->comn_resp.comn_status;
820 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
821 
822 	qat_alg_free_bufl(inst, qat_req);
823 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
824 		res = -EINVAL;
825 	areq->base.complete(&areq->base, res);
826 }
827 
qat_alg_callback(void * resp)828 void qat_alg_callback(void *resp)
829 {
830 	struct icp_qat_fw_la_resp *qat_resp = resp;
831 	struct qat_crypto_request *qat_req =
832 				(void *)(__force long)qat_resp->opaque_data;
833 
834 	qat_req->cb(qat_resp, qat_req);
835 }
836 
qat_alg_aead_dec(struct aead_request * areq)837 static int qat_alg_aead_dec(struct aead_request *areq)
838 {
839 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
840 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
841 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
842 	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
843 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
844 	struct icp_qat_fw_la_auth_req_params *auth_param;
845 	struct icp_qat_fw_la_bulk_req *msg;
846 	int digst_size = crypto_aead_crt(aead_tfm)->authsize;
847 	int ret, ctr = 0;
848 
849 	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
850 				  areq->iv, AES_BLOCK_SIZE, qat_req);
851 	if (unlikely(ret))
852 		return ret;
853 
854 	msg = &qat_req->req;
855 	*msg = ctx->dec_fw_req;
856 	qat_req->aead_ctx = ctx;
857 	qat_req->aead_req = areq;
858 	qat_req->cb = qat_aead_alg_callback;
859 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
860 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
861 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
862 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
863 	cipher_param->cipher_length = areq->cryptlen - digst_size;
864 	cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
865 	memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
866 	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
867 	auth_param->auth_off = 0;
868 	auth_param->auth_len = areq->assoclen +
869 				cipher_param->cipher_length + AES_BLOCK_SIZE;
870 	do {
871 		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
872 	} while (ret == -EAGAIN && ctr++ < 10);
873 
874 	if (ret == -EAGAIN) {
875 		qat_alg_free_bufl(ctx->inst, qat_req);
876 		return -EBUSY;
877 	}
878 	return -EINPROGRESS;
879 }
880 
qat_alg_aead_enc_internal(struct aead_request * areq,uint8_t * iv,int enc_iv)881 static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
882 				     int enc_iv)
883 {
884 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
885 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
886 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
887 	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
888 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
889 	struct icp_qat_fw_la_auth_req_params *auth_param;
890 	struct icp_qat_fw_la_bulk_req *msg;
891 	int ret, ctr = 0;
892 
893 	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
894 				  iv, AES_BLOCK_SIZE, qat_req);
895 	if (unlikely(ret))
896 		return ret;
897 
898 	msg = &qat_req->req;
899 	*msg = ctx->enc_fw_req;
900 	qat_req->aead_ctx = ctx;
901 	qat_req->aead_req = areq;
902 	qat_req->cb = qat_aead_alg_callback;
903 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
904 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
905 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
906 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
907 	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
908 
909 	if (enc_iv) {
910 		cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
911 		cipher_param->cipher_offset = areq->assoclen;
912 	} else {
913 		memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
914 		cipher_param->cipher_length = areq->cryptlen;
915 		cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
916 	}
917 	auth_param->auth_off = 0;
918 	auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
919 
920 	do {
921 		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
922 	} while (ret == -EAGAIN && ctr++ < 10);
923 
924 	if (ret == -EAGAIN) {
925 		qat_alg_free_bufl(ctx->inst, qat_req);
926 		return -EBUSY;
927 	}
928 	return -EINPROGRESS;
929 }
930 
qat_alg_aead_enc(struct aead_request * areq)931 static int qat_alg_aead_enc(struct aead_request *areq)
932 {
933 	return qat_alg_aead_enc_internal(areq, areq->iv, 0);
934 }
935 
qat_alg_aead_genivenc(struct aead_givcrypt_request * req)936 static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
937 {
938 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
939 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
940 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
941 	__be64 seq;
942 
943 	memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
944 	seq = cpu_to_be64(req->seq);
945 	memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
946 	       &seq, sizeof(uint64_t));
947 	return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
948 }
949 
qat_alg_ablkcipher_setkey(struct crypto_ablkcipher * tfm,const uint8_t * key,unsigned int keylen)950 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
951 				     const uint8_t *key,
952 				     unsigned int keylen)
953 {
954 	struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
955 	struct device *dev;
956 
957 	spin_lock(&ctx->lock);
958 	if (ctx->enc_cd) {
959 		/* rekeying */
960 		dev = &GET_DEV(ctx->inst->accel_dev);
961 		memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
962 		memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
963 		memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
964 		memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
965 	} else {
966 		/* new key */
967 		int node = get_current_node();
968 		struct qat_crypto_instance *inst =
969 				qat_crypto_get_instance_node(node);
970 		if (!inst) {
971 			spin_unlock(&ctx->lock);
972 			return -EINVAL;
973 		}
974 
975 		dev = &GET_DEV(inst->accel_dev);
976 		ctx->inst = inst;
977 		ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
978 						  &ctx->enc_cd_paddr,
979 						  GFP_ATOMIC);
980 		if (!ctx->enc_cd) {
981 			spin_unlock(&ctx->lock);
982 			return -ENOMEM;
983 		}
984 		ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
985 						  &ctx->dec_cd_paddr,
986 						  GFP_ATOMIC);
987 		if (!ctx->dec_cd) {
988 			spin_unlock(&ctx->lock);
989 			goto out_free_enc;
990 		}
991 	}
992 	spin_unlock(&ctx->lock);
993 	if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen))
994 		goto out_free_all;
995 
996 	return 0;
997 
998 out_free_all:
999 	memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd));
1000 	dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1001 			  ctx->dec_cd, ctx->dec_cd_paddr);
1002 	ctx->dec_cd = NULL;
1003 out_free_enc:
1004 	memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd));
1005 	dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1006 			  ctx->enc_cd, ctx->enc_cd_paddr);
1007 	ctx->enc_cd = NULL;
1008 	return -ENOMEM;
1009 }
1010 
qat_alg_ablkcipher_encrypt(struct ablkcipher_request * req)1011 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
1012 {
1013 	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1014 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1015 	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1016 	struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1017 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
1018 	struct icp_qat_fw_la_bulk_req *msg;
1019 	int ret, ctr = 0;
1020 
1021 	ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
1022 				  NULL, 0, qat_req);
1023 	if (unlikely(ret))
1024 		return ret;
1025 
1026 	msg = &qat_req->req;
1027 	*msg = ctx->enc_fw_req;
1028 	qat_req->ablkcipher_ctx = ctx;
1029 	qat_req->ablkcipher_req = req;
1030 	qat_req->cb = qat_ablkcipher_alg_callback;
1031 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1032 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1033 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1034 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1035 	cipher_param->cipher_length = req->nbytes;
1036 	cipher_param->cipher_offset = 0;
1037 	memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1038 	do {
1039 		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1040 	} while (ret == -EAGAIN && ctr++ < 10);
1041 
1042 	if (ret == -EAGAIN) {
1043 		qat_alg_free_bufl(ctx->inst, qat_req);
1044 		return -EBUSY;
1045 	}
1046 	return -EINPROGRESS;
1047 }
1048 
qat_alg_ablkcipher_decrypt(struct ablkcipher_request * req)1049 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1050 {
1051 	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1052 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1053 	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1054 	struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1055 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
1056 	struct icp_qat_fw_la_bulk_req *msg;
1057 	int ret, ctr = 0;
1058 
1059 	ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
1060 				  NULL, 0, qat_req);
1061 	if (unlikely(ret))
1062 		return ret;
1063 
1064 	msg = &qat_req->req;
1065 	*msg = ctx->dec_fw_req;
1066 	qat_req->ablkcipher_ctx = ctx;
1067 	qat_req->ablkcipher_req = req;
1068 	qat_req->cb = qat_ablkcipher_alg_callback;
1069 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1070 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1071 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1072 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1073 	cipher_param->cipher_length = req->nbytes;
1074 	cipher_param->cipher_offset = 0;
1075 	memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1076 	do {
1077 		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1078 	} while (ret == -EAGAIN && ctr++ < 10);
1079 
1080 	if (ret == -EAGAIN) {
1081 		qat_alg_free_bufl(ctx->inst, qat_req);
1082 		return -EBUSY;
1083 	}
1084 	return -EINPROGRESS;
1085 }
1086 
qat_alg_aead_init(struct crypto_tfm * tfm,enum icp_qat_hw_auth_algo hash,const char * hash_name)1087 static int qat_alg_aead_init(struct crypto_tfm *tfm,
1088 			     enum icp_qat_hw_auth_algo hash,
1089 			     const char *hash_name)
1090 {
1091 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1092 
1093 	ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1094 	if (IS_ERR(ctx->hash_tfm))
1095 		return -EFAULT;
1096 	spin_lock_init(&ctx->lock);
1097 	ctx->qat_hash_alg = hash;
1098 	tfm->crt_aead.reqsize = sizeof(struct aead_request) +
1099 				sizeof(struct qat_crypto_request);
1100 	ctx->tfm = tfm;
1101 	return 0;
1102 }
1103 
qat_alg_aead_sha1_init(struct crypto_tfm * tfm)1104 static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
1105 {
1106 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1107 }
1108 
qat_alg_aead_sha256_init(struct crypto_tfm * tfm)1109 static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
1110 {
1111 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1112 }
1113 
qat_alg_aead_sha512_init(struct crypto_tfm * tfm)1114 static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
1115 {
1116 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1117 }
1118 
qat_alg_aead_exit(struct crypto_tfm * tfm)1119 static void qat_alg_aead_exit(struct crypto_tfm *tfm)
1120 {
1121 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1122 	struct qat_crypto_instance *inst = ctx->inst;
1123 	struct device *dev;
1124 
1125 	if (!IS_ERR(ctx->hash_tfm))
1126 		crypto_free_shash(ctx->hash_tfm);
1127 
1128 	if (!inst)
1129 		return;
1130 
1131 	dev = &GET_DEV(inst->accel_dev);
1132 	if (ctx->enc_cd) {
1133 		memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1134 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1135 				  ctx->enc_cd, ctx->enc_cd_paddr);
1136 	}
1137 	if (ctx->dec_cd) {
1138 		memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1139 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1140 				  ctx->dec_cd, ctx->dec_cd_paddr);
1141 	}
1142 	qat_crypto_put_instance(inst);
1143 }
1144 
qat_alg_ablkcipher_init(struct crypto_tfm * tfm)1145 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1146 {
1147 	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1148 
1149 	spin_lock_init(&ctx->lock);
1150 	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
1151 					sizeof(struct qat_crypto_request);
1152 	ctx->tfm = tfm;
1153 	return 0;
1154 }
1155 
qat_alg_ablkcipher_exit(struct crypto_tfm * tfm)1156 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1157 {
1158 	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1159 	struct qat_crypto_instance *inst = ctx->inst;
1160 	struct device *dev;
1161 
1162 	if (!inst)
1163 		return;
1164 
1165 	dev = &GET_DEV(inst->accel_dev);
1166 	if (ctx->enc_cd) {
1167 		memset(ctx->enc_cd, 0,
1168 		       sizeof(struct icp_qat_hw_cipher_algo_blk));
1169 		dma_free_coherent(dev,
1170 				  sizeof(struct icp_qat_hw_cipher_algo_blk),
1171 				  ctx->enc_cd, ctx->enc_cd_paddr);
1172 	}
1173 	if (ctx->dec_cd) {
1174 		memset(ctx->dec_cd, 0,
1175 		       sizeof(struct icp_qat_hw_cipher_algo_blk));
1176 		dma_free_coherent(dev,
1177 				  sizeof(struct icp_qat_hw_cipher_algo_blk),
1178 				  ctx->dec_cd, ctx->dec_cd_paddr);
1179 	}
1180 	qat_crypto_put_instance(inst);
1181 }
1182 
1183 static struct crypto_alg qat_algs[] = { {
1184 	.cra_name = "authenc(hmac(sha1),cbc(aes))",
1185 	.cra_driver_name = "qat_aes_cbc_hmac_sha1",
1186 	.cra_priority = 4001,
1187 	.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1188 	.cra_blocksize = AES_BLOCK_SIZE,
1189 	.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1190 	.cra_alignmask = 0,
1191 	.cra_type = &crypto_aead_type,
1192 	.cra_module = THIS_MODULE,
1193 	.cra_init = qat_alg_aead_sha1_init,
1194 	.cra_exit = qat_alg_aead_exit,
1195 	.cra_u = {
1196 		.aead = {
1197 			.setkey = qat_alg_aead_setkey,
1198 			.decrypt = qat_alg_aead_dec,
1199 			.encrypt = qat_alg_aead_enc,
1200 			.givencrypt = qat_alg_aead_genivenc,
1201 			.ivsize = AES_BLOCK_SIZE,
1202 			.maxauthsize = SHA1_DIGEST_SIZE,
1203 		},
1204 	},
1205 }, {
1206 	.cra_name = "authenc(hmac(sha256),cbc(aes))",
1207 	.cra_driver_name = "qat_aes_cbc_hmac_sha256",
1208 	.cra_priority = 4001,
1209 	.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1210 	.cra_blocksize = AES_BLOCK_SIZE,
1211 	.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1212 	.cra_alignmask = 0,
1213 	.cra_type = &crypto_aead_type,
1214 	.cra_module = THIS_MODULE,
1215 	.cra_init = qat_alg_aead_sha256_init,
1216 	.cra_exit = qat_alg_aead_exit,
1217 	.cra_u = {
1218 		.aead = {
1219 			.setkey = qat_alg_aead_setkey,
1220 			.decrypt = qat_alg_aead_dec,
1221 			.encrypt = qat_alg_aead_enc,
1222 			.givencrypt = qat_alg_aead_genivenc,
1223 			.ivsize = AES_BLOCK_SIZE,
1224 			.maxauthsize = SHA256_DIGEST_SIZE,
1225 		},
1226 	},
1227 }, {
1228 	.cra_name = "authenc(hmac(sha512),cbc(aes))",
1229 	.cra_driver_name = "qat_aes_cbc_hmac_sha512",
1230 	.cra_priority = 4001,
1231 	.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1232 	.cra_blocksize = AES_BLOCK_SIZE,
1233 	.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1234 	.cra_alignmask = 0,
1235 	.cra_type = &crypto_aead_type,
1236 	.cra_module = THIS_MODULE,
1237 	.cra_init = qat_alg_aead_sha512_init,
1238 	.cra_exit = qat_alg_aead_exit,
1239 	.cra_u = {
1240 		.aead = {
1241 			.setkey = qat_alg_aead_setkey,
1242 			.decrypt = qat_alg_aead_dec,
1243 			.encrypt = qat_alg_aead_enc,
1244 			.givencrypt = qat_alg_aead_genivenc,
1245 			.ivsize = AES_BLOCK_SIZE,
1246 			.maxauthsize = SHA512_DIGEST_SIZE,
1247 		},
1248 	},
1249 }, {
1250 	.cra_name = "cbc(aes)",
1251 	.cra_driver_name = "qat_aes_cbc",
1252 	.cra_priority = 4001,
1253 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1254 	.cra_blocksize = AES_BLOCK_SIZE,
1255 	.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1256 	.cra_alignmask = 0,
1257 	.cra_type = &crypto_ablkcipher_type,
1258 	.cra_module = THIS_MODULE,
1259 	.cra_init = qat_alg_ablkcipher_init,
1260 	.cra_exit = qat_alg_ablkcipher_exit,
1261 	.cra_u = {
1262 		.ablkcipher = {
1263 			.setkey = qat_alg_ablkcipher_setkey,
1264 			.decrypt = qat_alg_ablkcipher_decrypt,
1265 			.encrypt = qat_alg_ablkcipher_encrypt,
1266 			.min_keysize = AES_MIN_KEY_SIZE,
1267 			.max_keysize = AES_MAX_KEY_SIZE,
1268 			.ivsize = AES_BLOCK_SIZE,
1269 		},
1270 	},
1271 } };
1272 
qat_algs_register(void)1273 int qat_algs_register(void)
1274 {
1275 	int ret = 0;
1276 
1277 	mutex_lock(&algs_lock);
1278 	if (++active_devs == 1) {
1279 		int i;
1280 
1281 		for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1282 			qat_algs[i].cra_flags =
1283 				(qat_algs[i].cra_type == &crypto_aead_type) ?
1284 				CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
1285 				CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1286 
1287 		ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1288 	}
1289 	mutex_unlock(&algs_lock);
1290 	return ret;
1291 }
1292 
qat_algs_unregister(void)1293 int qat_algs_unregister(void)
1294 {
1295 	int ret = 0;
1296 
1297 	mutex_lock(&algs_lock);
1298 	if (--active_devs == 0)
1299 		ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1300 	mutex_unlock(&algs_lock);
1301 	return ret;
1302 }
1303 
qat_algs_init(void)1304 int qat_algs_init(void)
1305 {
1306 	crypto_get_default_rng();
1307 	return 0;
1308 }
1309 
qat_algs_exit(void)1310 void qat_algs_exit(void)
1311 {
1312 	crypto_put_default_rng();
1313 }
1314