1/*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
3 *
4 * Copyright 2011 Freescale Semiconductor, Inc.
5 *
6 * Based on caamalg.c crypto API driver.
7 *
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
10 *
11 * ---------------                     ---------------
12 * | JobDesc #1  |-------------------->|  ShareDesc  |
13 * | *(packet 1) |                     |  (hashKey)  |
14 * ---------------                     | (operation) |
15 *                                     ---------------
16 *
17 * relationship of subsequent job descriptors to shared descriptors:
18 *
19 * ---------------                     ---------------
20 * | JobDesc #2  |-------------------->|  ShareDesc  |
21 * | *(packet 2) |      |------------->|  (hashKey)  |
22 * ---------------      |    |-------->| (operation) |
23 *       .              |    |         | (load ctx2) |
24 *       .              |    |         ---------------
25 * ---------------      |    |
26 * | JobDesc #3  |------|    |
27 * | *(packet 3) |           |
28 * ---------------           |
29 *       .                   |
30 *       .                   |
31 * ---------------           |
32 * | JobDesc #4  |------------
33 * | *(packet 4) |
34 * ---------------
35 *
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
41 *
42 * So, a job desc looks like:
43 *
44 * ---------------------
45 * | Header            |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR       |
48 * | (output buffer)   |
49 * | (output length)   |
50 * | SEQ_IN_PTR        |
51 * | (input buffer)    |
52 * | (input length)    |
53 * ---------------------
54 */
55
56#include "compat.h"
57
58#include "regs.h"
59#include "intern.h"
60#include "desc_constr.h"
61#include "jr.h"
62#include "error.h"
63#include "sg_sw_sec4.h"
64#include "key_gen.h"
65
66#define CAAM_CRA_PRIORITY		3000
67
68/* max hash key is max split key size */
69#define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
70
71#define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
72#define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
73
74/* length of descriptors text */
75#define DESC_AHASH_BASE			(4 * CAAM_CMD_SZ)
76#define DESC_AHASH_UPDATE_LEN		(6 * CAAM_CMD_SZ)
77#define DESC_AHASH_UPDATE_FIRST_LEN	(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78#define DESC_AHASH_FINAL_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79#define DESC_AHASH_FINUP_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80#define DESC_AHASH_DIGEST_LEN		(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82#define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
83					 CAAM_MAX_HASH_KEY_SIZE)
84#define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86/* caam context sizes for hashes: running digest + 8 */
87#define HASH_MSG_LEN			8
88#define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90#ifdef DEBUG
91/* for print_hex_dumps with line references */
92#define debug(format, arg...) printk(format, arg)
93#else
94#define debug(format, arg...)
95#endif
96
97
98static struct list_head hash_list;
99
100/* ahash per-session context */
101struct caam_hash_ctx {
102	struct device *jrdev;
103	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
104	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
105	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
106	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
107	u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
108	dma_addr_t sh_desc_update_dma;
109	dma_addr_t sh_desc_update_first_dma;
110	dma_addr_t sh_desc_fin_dma;
111	dma_addr_t sh_desc_digest_dma;
112	dma_addr_t sh_desc_finup_dma;
113	u32 alg_type;
114	u32 alg_op;
115	u8 key[CAAM_MAX_HASH_KEY_SIZE];
116	dma_addr_t key_dma;
117	int ctx_len;
118	unsigned int split_key_len;
119	unsigned int split_key_pad_len;
120};
121
122/* ahash state */
123struct caam_hash_state {
124	dma_addr_t buf_dma;
125	dma_addr_t ctx_dma;
126	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127	int buflen_0;
128	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129	int buflen_1;
130	u8 caam_ctx[MAX_CTX_LEN];
131	int (*update)(struct ahash_request *req);
132	int (*final)(struct ahash_request *req);
133	int (*finup)(struct ahash_request *req);
134	int current_buf;
135};
136
137/* Common job descriptor seq in/out ptr routines */
138
139/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
140static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
141				      struct caam_hash_state *state,
142				      int ctx_len)
143{
144	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
145					ctx_len, DMA_FROM_DEVICE);
146	if (dma_mapping_error(jrdev, state->ctx_dma)) {
147		dev_err(jrdev, "unable to map ctx\n");
148		return -ENOMEM;
149	}
150
151	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
152
153	return 0;
154}
155
156/* Map req->result, and append seq_out_ptr command that points to it */
157static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
158						u8 *result, int digestsize)
159{
160	dma_addr_t dst_dma;
161
162	dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
163	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
164
165	return dst_dma;
166}
167
168/* Map current buffer in state and put it in link table */
169static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
170					    struct sec4_sg_entry *sec4_sg,
171					    u8 *buf, int buflen)
172{
173	dma_addr_t buf_dma;
174
175	buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
176	dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
177
178	return buf_dma;
179}
180
181/* Map req->src and put it in link table */
182static inline void src_map_to_sec4_sg(struct device *jrdev,
183				      struct scatterlist *src, int src_nents,
184				      struct sec4_sg_entry *sec4_sg,
185				      bool chained)
186{
187	dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
188	sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
189}
190
191/*
192 * Only put buffer in link table if it contains data, which is possible,
193 * since a buffer has previously been used, and needs to be unmapped,
194 */
195static inline dma_addr_t
196try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
197		       u8 *buf, dma_addr_t buf_dma, int buflen,
198		       int last_buflen)
199{
200	if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
201		dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
202	if (buflen)
203		buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
204	else
205		buf_dma = 0;
206
207	return buf_dma;
208}
209
210/* Map state->caam_ctx, and add it to link table */
211static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
212				     struct caam_hash_state *state, int ctx_len,
213				     struct sec4_sg_entry *sec4_sg, u32 flag)
214{
215	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
216	if (dma_mapping_error(jrdev, state->ctx_dma)) {
217		dev_err(jrdev, "unable to map ctx\n");
218		return -ENOMEM;
219	}
220
221	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
222
223	return 0;
224}
225
226/* Common shared descriptor commands */
227static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
228{
229	append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
230			  ctx->split_key_len, CLASS_2 |
231			  KEY_DEST_MDHA_SPLIT | KEY_ENC);
232}
233
234/* Append key if it has been set */
235static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
236{
237	u32 *key_jump_cmd;
238
239	init_sh_desc(desc, HDR_SHARE_SERIAL);
240
241	if (ctx->split_key_len) {
242		/* Skip if already shared */
243		key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
244					   JUMP_COND_SHRD);
245
246		append_key_ahash(desc, ctx);
247
248		set_jump_tgt_here(desc, key_jump_cmd);
249	}
250
251	/* Propagate errors from shared to job descriptor */
252	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
253}
254
255/*
256 * For ahash read data from seqin following state->caam_ctx,
257 * and write resulting class2 context to seqout, which may be state->caam_ctx
258 * or req->result
259 */
260static inline void ahash_append_load_str(u32 *desc, int digestsize)
261{
262	/* Calculate remaining bytes to read */
263	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
264
265	/* Read remaining bytes */
266	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
267			     FIFOLD_TYPE_MSG | KEY_VLF);
268
269	/* Store class2 context bytes */
270	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
271			 LDST_SRCDST_BYTE_CONTEXT);
272}
273
274/*
275 * For ahash update, final and finup, import context, read and write to seqout
276 */
277static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
278					 int digestsize,
279					 struct caam_hash_ctx *ctx)
280{
281	init_sh_desc_key_ahash(desc, ctx);
282
283	/* Import context from software */
284	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
285		   LDST_CLASS_2_CCB | ctx->ctx_len);
286
287	/* Class 2 operation */
288	append_operation(desc, op | state | OP_ALG_ENCRYPT);
289
290	/*
291	 * Load from buf and/or src and write to req->result or state->context
292	 */
293	ahash_append_load_str(desc, digestsize);
294}
295
296/* For ahash firsts and digest, read and write to seqout */
297static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
298				     int digestsize, struct caam_hash_ctx *ctx)
299{
300	init_sh_desc_key_ahash(desc, ctx);
301
302	/* Class 2 operation */
303	append_operation(desc, op | state | OP_ALG_ENCRYPT);
304
305	/*
306	 * Load from buf and/or src and write to req->result or state->context
307	 */
308	ahash_append_load_str(desc, digestsize);
309}
310
311static int ahash_set_sh_desc(struct crypto_ahash *ahash)
312{
313	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
314	int digestsize = crypto_ahash_digestsize(ahash);
315	struct device *jrdev = ctx->jrdev;
316	u32 have_key = 0;
317	u32 *desc;
318
319	if (ctx->split_key_len)
320		have_key = OP_ALG_AAI_HMAC_PRECOMP;
321
322	/* ahash_update shared descriptor */
323	desc = ctx->sh_desc_update;
324
325	init_sh_desc(desc, HDR_SHARE_SERIAL);
326
327	/* Import context from software */
328	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
329		   LDST_CLASS_2_CCB | ctx->ctx_len);
330
331	/* Class 2 operation */
332	append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
333			 OP_ALG_ENCRYPT);
334
335	/* Load data and write to result or context */
336	ahash_append_load_str(desc, ctx->ctx_len);
337
338	ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
339						 DMA_TO_DEVICE);
340	if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
341		dev_err(jrdev, "unable to map shared descriptor\n");
342		return -ENOMEM;
343	}
344#ifdef DEBUG
345	print_hex_dump(KERN_ERR,
346		       "ahash update shdesc@"__stringify(__LINE__)": ",
347		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
348#endif
349
350	/* ahash_update_first shared descriptor */
351	desc = ctx->sh_desc_update_first;
352
353	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
354			  ctx->ctx_len, ctx);
355
356	ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
357						       desc_bytes(desc),
358						       DMA_TO_DEVICE);
359	if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
360		dev_err(jrdev, "unable to map shared descriptor\n");
361		return -ENOMEM;
362	}
363#ifdef DEBUG
364	print_hex_dump(KERN_ERR,
365		       "ahash update first shdesc@"__stringify(__LINE__)": ",
366		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
367#endif
368
369	/* ahash_final shared descriptor */
370	desc = ctx->sh_desc_fin;
371
372	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
373			      OP_ALG_AS_FINALIZE, digestsize, ctx);
374
375	ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
376					      DMA_TO_DEVICE);
377	if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
378		dev_err(jrdev, "unable to map shared descriptor\n");
379		return -ENOMEM;
380	}
381#ifdef DEBUG
382	print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
383		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
384		       desc_bytes(desc), 1);
385#endif
386
387	/* ahash_finup shared descriptor */
388	desc = ctx->sh_desc_finup;
389
390	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
391			      OP_ALG_AS_FINALIZE, digestsize, ctx);
392
393	ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
394						DMA_TO_DEVICE);
395	if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
396		dev_err(jrdev, "unable to map shared descriptor\n");
397		return -ENOMEM;
398	}
399#ifdef DEBUG
400	print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
401		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
402		       desc_bytes(desc), 1);
403#endif
404
405	/* ahash_digest shared descriptor */
406	desc = ctx->sh_desc_digest;
407
408	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
409			  digestsize, ctx);
410
411	ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
412						 desc_bytes(desc),
413						 DMA_TO_DEVICE);
414	if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
415		dev_err(jrdev, "unable to map shared descriptor\n");
416		return -ENOMEM;
417	}
418#ifdef DEBUG
419	print_hex_dump(KERN_ERR,
420		       "ahash digest shdesc@"__stringify(__LINE__)": ",
421		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
422		       desc_bytes(desc), 1);
423#endif
424
425	return 0;
426}
427
428static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
429			      u32 keylen)
430{
431	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
432			       ctx->split_key_pad_len, key_in, keylen,
433			       ctx->alg_op);
434}
435
436/* Digest hash size if it is too large */
437static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
438			   u32 *keylen, u8 *key_out, u32 digestsize)
439{
440	struct device *jrdev = ctx->jrdev;
441	u32 *desc;
442	struct split_key_result result;
443	dma_addr_t src_dma, dst_dma;
444	int ret = 0;
445
446	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
447	if (!desc) {
448		dev_err(jrdev, "unable to allocate key input memory\n");
449		return -ENOMEM;
450	}
451
452	init_job_desc(desc, 0);
453
454	src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
455				 DMA_TO_DEVICE);
456	if (dma_mapping_error(jrdev, src_dma)) {
457		dev_err(jrdev, "unable to map key input memory\n");
458		kfree(desc);
459		return -ENOMEM;
460	}
461	dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
462				 DMA_FROM_DEVICE);
463	if (dma_mapping_error(jrdev, dst_dma)) {
464		dev_err(jrdev, "unable to map key output memory\n");
465		dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
466		kfree(desc);
467		return -ENOMEM;
468	}
469
470	/* Job descriptor to perform unkeyed hash on key_in */
471	append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
472			 OP_ALG_AS_INITFINAL);
473	append_seq_in_ptr(desc, src_dma, *keylen, 0);
474	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
475			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
476	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
477	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
478			 LDST_SRCDST_BYTE_CONTEXT);
479
480#ifdef DEBUG
481	print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
482		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
483	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
484		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
485#endif
486
487	result.err = 0;
488	init_completion(&result.completion);
489
490	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
491	if (!ret) {
492		/* in progress */
493		wait_for_completion_interruptible(&result.completion);
494		ret = result.err;
495#ifdef DEBUG
496		print_hex_dump(KERN_ERR,
497			       "digested key@"__stringify(__LINE__)": ",
498			       DUMP_PREFIX_ADDRESS, 16, 4, key_in,
499			       digestsize, 1);
500#endif
501	}
502	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
503	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
504
505	*keylen = digestsize;
506
507	kfree(desc);
508
509	return ret;
510}
511
512static int ahash_setkey(struct crypto_ahash *ahash,
513			const u8 *key, unsigned int keylen)
514{
515	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
516	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
517	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
518	struct device *jrdev = ctx->jrdev;
519	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
520	int digestsize = crypto_ahash_digestsize(ahash);
521	int ret = 0;
522	u8 *hashed_key = NULL;
523
524#ifdef DEBUG
525	printk(KERN_ERR "keylen %d\n", keylen);
526#endif
527
528	if (keylen > blocksize) {
529		hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
530				     GFP_DMA);
531		if (!hashed_key)
532			return -ENOMEM;
533		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
534				      digestsize);
535		if (ret)
536			goto badkey;
537		key = hashed_key;
538	}
539
540	/* Pick class 2 key length from algorithm submask */
541	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
542				      OP_ALG_ALGSEL_SHIFT] * 2;
543	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
544
545#ifdef DEBUG
546	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
547	       ctx->split_key_len, ctx->split_key_pad_len);
548	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
549		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
550#endif
551
552	ret = gen_split_hash_key(ctx, key, keylen);
553	if (ret)
554		goto badkey;
555
556	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
557				      DMA_TO_DEVICE);
558	if (dma_mapping_error(jrdev, ctx->key_dma)) {
559		dev_err(jrdev, "unable to map key i/o memory\n");
560		ret = -ENOMEM;
561		goto map_err;
562	}
563#ifdef DEBUG
564	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
565		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
566		       ctx->split_key_pad_len, 1);
567#endif
568
569	ret = ahash_set_sh_desc(ahash);
570	if (ret) {
571		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
572				 DMA_TO_DEVICE);
573	}
574
575map_err:
576	kfree(hashed_key);
577	return ret;
578badkey:
579	kfree(hashed_key);
580	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
581	return -EINVAL;
582}
583
584/*
585 * ahash_edesc - s/w-extended ahash descriptor
586 * @dst_dma: physical mapped address of req->result
587 * @sec4_sg_dma: physical mapped address of h/w link table
588 * @chained: if source is chained
589 * @src_nents: number of segments in input scatterlist
590 * @sec4_sg_bytes: length of dma mapped sec4_sg space
591 * @sec4_sg: pointer to h/w link table
592 * @hw_desc: the h/w job descriptor followed by any referenced link tables
593 */
594struct ahash_edesc {
595	dma_addr_t dst_dma;
596	dma_addr_t sec4_sg_dma;
597	bool chained;
598	int src_nents;
599	int sec4_sg_bytes;
600	struct sec4_sg_entry *sec4_sg;
601	u32 hw_desc[0];
602};
603
604static inline void ahash_unmap(struct device *dev,
605			struct ahash_edesc *edesc,
606			struct ahash_request *req, int dst_len)
607{
608	if (edesc->src_nents)
609		dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
610				     DMA_TO_DEVICE, edesc->chained);
611	if (edesc->dst_dma)
612		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
613
614	if (edesc->sec4_sg_bytes)
615		dma_unmap_single(dev, edesc->sec4_sg_dma,
616				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
617}
618
619static inline void ahash_unmap_ctx(struct device *dev,
620			struct ahash_edesc *edesc,
621			struct ahash_request *req, int dst_len, u32 flag)
622{
623	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
624	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
625	struct caam_hash_state *state = ahash_request_ctx(req);
626
627	if (state->ctx_dma)
628		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
629	ahash_unmap(dev, edesc, req, dst_len);
630}
631
632static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
633		       void *context)
634{
635	struct ahash_request *req = context;
636	struct ahash_edesc *edesc;
637	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
638	int digestsize = crypto_ahash_digestsize(ahash);
639#ifdef DEBUG
640	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
641	struct caam_hash_state *state = ahash_request_ctx(req);
642
643	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
644#endif
645
646	edesc = (struct ahash_edesc *)((char *)desc -
647		 offsetof(struct ahash_edesc, hw_desc));
648	if (err)
649		caam_jr_strstatus(jrdev, err);
650
651	ahash_unmap(jrdev, edesc, req, digestsize);
652	kfree(edesc);
653
654#ifdef DEBUG
655	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
656		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
657		       ctx->ctx_len, 1);
658	if (req->result)
659		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
660			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
661			       digestsize, 1);
662#endif
663
664	req->base.complete(&req->base, err);
665}
666
667static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
668			    void *context)
669{
670	struct ahash_request *req = context;
671	struct ahash_edesc *edesc;
672	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
673	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
674#ifdef DEBUG
675	struct caam_hash_state *state = ahash_request_ctx(req);
676	int digestsize = crypto_ahash_digestsize(ahash);
677
678	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
679#endif
680
681	edesc = (struct ahash_edesc *)((char *)desc -
682		 offsetof(struct ahash_edesc, hw_desc));
683	if (err)
684		caam_jr_strstatus(jrdev, err);
685
686	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
687	kfree(edesc);
688
689#ifdef DEBUG
690	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
691		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
692		       ctx->ctx_len, 1);
693	if (req->result)
694		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
695			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
696			       digestsize, 1);
697#endif
698
699	req->base.complete(&req->base, err);
700}
701
702static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
703			       void *context)
704{
705	struct ahash_request *req = context;
706	struct ahash_edesc *edesc;
707	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
708	int digestsize = crypto_ahash_digestsize(ahash);
709#ifdef DEBUG
710	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
711	struct caam_hash_state *state = ahash_request_ctx(req);
712
713	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
714#endif
715
716	edesc = (struct ahash_edesc *)((char *)desc -
717		 offsetof(struct ahash_edesc, hw_desc));
718	if (err)
719		caam_jr_strstatus(jrdev, err);
720
721	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
722	kfree(edesc);
723
724#ifdef DEBUG
725	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
726		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
727		       ctx->ctx_len, 1);
728	if (req->result)
729		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
730			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
731			       digestsize, 1);
732#endif
733
734	req->base.complete(&req->base, err);
735}
736
737static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
738			       void *context)
739{
740	struct ahash_request *req = context;
741	struct ahash_edesc *edesc;
742	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
743	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
744#ifdef DEBUG
745	struct caam_hash_state *state = ahash_request_ctx(req);
746	int digestsize = crypto_ahash_digestsize(ahash);
747
748	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
749#endif
750
751	edesc = (struct ahash_edesc *)((char *)desc -
752		 offsetof(struct ahash_edesc, hw_desc));
753	if (err)
754		caam_jr_strstatus(jrdev, err);
755
756	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
757	kfree(edesc);
758
759#ifdef DEBUG
760	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
761		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
762		       ctx->ctx_len, 1);
763	if (req->result)
764		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
765			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
766			       digestsize, 1);
767#endif
768
769	req->base.complete(&req->base, err);
770}
771
772/* submit update job descriptor */
773static int ahash_update_ctx(struct ahash_request *req)
774{
775	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
776	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
777	struct caam_hash_state *state = ahash_request_ctx(req);
778	struct device *jrdev = ctx->jrdev;
779	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
780		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
781	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
782	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
783	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
784	int *next_buflen = state->current_buf ? &state->buflen_0 :
785			   &state->buflen_1, last_buflen;
786	int in_len = *buflen + req->nbytes, to_hash;
787	u32 *sh_desc = ctx->sh_desc_update, *desc;
788	dma_addr_t ptr = ctx->sh_desc_update_dma;
789	int src_nents, sec4_sg_bytes, sec4_sg_src_index;
790	struct ahash_edesc *edesc;
791	bool chained = false;
792	int ret = 0;
793	int sh_len;
794
795	last_buflen = *next_buflen;
796	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
797	to_hash = in_len - *next_buflen;
798
799	if (to_hash) {
800		src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
801				       &chained);
802		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
803		sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
804				 sizeof(struct sec4_sg_entry);
805
806		/*
807		 * allocate space for base edesc and hw desc commands,
808		 * link tables
809		 */
810		edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
811				sec4_sg_bytes, GFP_DMA | flags);
812		if (!edesc) {
813			dev_err(jrdev,
814				"could not allocate extended descriptor\n");
815			return -ENOMEM;
816		}
817
818		edesc->src_nents = src_nents;
819		edesc->chained = chained;
820		edesc->sec4_sg_bytes = sec4_sg_bytes;
821		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
822				 DESC_JOB_IO_LEN;
823
824		ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
825					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
826		if (ret)
827			return ret;
828
829		state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
830							edesc->sec4_sg + 1,
831							buf, state->buf_dma,
832							*buflen, last_buflen);
833
834		if (src_nents) {
835			src_map_to_sec4_sg(jrdev, req->src, src_nents,
836					   edesc->sec4_sg + sec4_sg_src_index,
837					   chained);
838			if (*next_buflen) {
839				scatterwalk_map_and_copy(next_buf, req->src,
840							 to_hash - *buflen,
841							 *next_buflen, 0);
842				state->current_buf = !state->current_buf;
843			}
844		} else {
845			(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
846							SEC4_SG_LEN_FIN;
847		}
848
849		sh_len = desc_len(sh_desc);
850		desc = edesc->hw_desc;
851		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
852				     HDR_REVERSE);
853
854		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
855						     sec4_sg_bytes,
856						     DMA_TO_DEVICE);
857		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
858			dev_err(jrdev, "unable to map S/G table\n");
859			return -ENOMEM;
860		}
861
862		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
863				       to_hash, LDST_SGF);
864
865		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
866
867#ifdef DEBUG
868		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
869			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
870			       desc_bytes(desc), 1);
871#endif
872
873		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
874		if (!ret) {
875			ret = -EINPROGRESS;
876		} else {
877			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
878					   DMA_BIDIRECTIONAL);
879			kfree(edesc);
880		}
881	} else if (*next_buflen) {
882		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
883					 req->nbytes, 0);
884		*buflen = *next_buflen;
885		*next_buflen = last_buflen;
886	}
887#ifdef DEBUG
888	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
889		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
890	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
891		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
892		       *next_buflen, 1);
893#endif
894
895	return ret;
896}
897
898static int ahash_final_ctx(struct ahash_request *req)
899{
900	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
901	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
902	struct caam_hash_state *state = ahash_request_ctx(req);
903	struct device *jrdev = ctx->jrdev;
904	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
905		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
906	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
907	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
908	int last_buflen = state->current_buf ? state->buflen_0 :
909			  state->buflen_1;
910	u32 *sh_desc = ctx->sh_desc_fin, *desc;
911	dma_addr_t ptr = ctx->sh_desc_fin_dma;
912	int sec4_sg_bytes, sec4_sg_src_index;
913	int digestsize = crypto_ahash_digestsize(ahash);
914	struct ahash_edesc *edesc;
915	int ret = 0;
916	int sh_len;
917
918	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
919	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
920
921	/* allocate space for base edesc and hw desc commands, link tables */
922	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
923			sec4_sg_bytes, GFP_DMA | flags);
924	if (!edesc) {
925		dev_err(jrdev, "could not allocate extended descriptor\n");
926		return -ENOMEM;
927	}
928
929	sh_len = desc_len(sh_desc);
930	desc = edesc->hw_desc;
931	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
932
933	edesc->sec4_sg_bytes = sec4_sg_bytes;
934	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
935			 DESC_JOB_IO_LEN;
936	edesc->src_nents = 0;
937
938	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
939				 edesc->sec4_sg, DMA_TO_DEVICE);
940	if (ret)
941		return ret;
942
943	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
944						buf, state->buf_dma, buflen,
945						last_buflen);
946	(edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
947
948	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
949					    sec4_sg_bytes, DMA_TO_DEVICE);
950	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
951		dev_err(jrdev, "unable to map S/G table\n");
952		return -ENOMEM;
953	}
954
955	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
956			  LDST_SGF);
957
958	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
959						digestsize);
960	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
961		dev_err(jrdev, "unable to map dst\n");
962		return -ENOMEM;
963	}
964
965#ifdef DEBUG
966	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
967		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
968#endif
969
970	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
971	if (!ret) {
972		ret = -EINPROGRESS;
973	} else {
974		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
975		kfree(edesc);
976	}
977
978	return ret;
979}
980
981static int ahash_finup_ctx(struct ahash_request *req)
982{
983	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
984	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
985	struct caam_hash_state *state = ahash_request_ctx(req);
986	struct device *jrdev = ctx->jrdev;
987	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
988		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
989	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
990	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
991	int last_buflen = state->current_buf ? state->buflen_0 :
992			  state->buflen_1;
993	u32 *sh_desc = ctx->sh_desc_finup, *desc;
994	dma_addr_t ptr = ctx->sh_desc_finup_dma;
995	int sec4_sg_bytes, sec4_sg_src_index;
996	int src_nents;
997	int digestsize = crypto_ahash_digestsize(ahash);
998	struct ahash_edesc *edesc;
999	bool chained = false;
1000	int ret = 0;
1001	int sh_len;
1002
1003	src_nents = __sg_count(req->src, req->nbytes, &chained);
1004	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1005	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1006			 sizeof(struct sec4_sg_entry);
1007
1008	/* allocate space for base edesc and hw desc commands, link tables */
1009	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1010			sec4_sg_bytes, GFP_DMA | flags);
1011	if (!edesc) {
1012		dev_err(jrdev, "could not allocate extended descriptor\n");
1013		return -ENOMEM;
1014	}
1015
1016	sh_len = desc_len(sh_desc);
1017	desc = edesc->hw_desc;
1018	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1019
1020	edesc->src_nents = src_nents;
1021	edesc->chained = chained;
1022	edesc->sec4_sg_bytes = sec4_sg_bytes;
1023	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1024			 DESC_JOB_IO_LEN;
1025
1026	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1027				 edesc->sec4_sg, DMA_TO_DEVICE);
1028	if (ret)
1029		return ret;
1030
1031	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1032						buf, state->buf_dma, buflen,
1033						last_buflen);
1034
1035	src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1036			   sec4_sg_src_index, chained);
1037
1038	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1039					    sec4_sg_bytes, DMA_TO_DEVICE);
1040	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1041		dev_err(jrdev, "unable to map S/G table\n");
1042		return -ENOMEM;
1043	}
1044
1045	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1046			       buflen + req->nbytes, LDST_SGF);
1047
1048	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1049						digestsize);
1050	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1051		dev_err(jrdev, "unable to map dst\n");
1052		return -ENOMEM;
1053	}
1054
1055#ifdef DEBUG
1056	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1057		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1058#endif
1059
1060	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1061	if (!ret) {
1062		ret = -EINPROGRESS;
1063	} else {
1064		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1065		kfree(edesc);
1066	}
1067
1068	return ret;
1069}
1070
1071static int ahash_digest(struct ahash_request *req)
1072{
1073	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1074	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1075	struct device *jrdev = ctx->jrdev;
1076	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1077		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1078	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1079	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1080	int digestsize = crypto_ahash_digestsize(ahash);
1081	int src_nents, sec4_sg_bytes;
1082	dma_addr_t src_dma;
1083	struct ahash_edesc *edesc;
1084	bool chained = false;
1085	int ret = 0;
1086	u32 options;
1087	int sh_len;
1088
1089	src_nents = sg_count(req->src, req->nbytes, &chained);
1090	dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1091			   chained);
1092	sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1093
1094	/* allocate space for base edesc and hw desc commands, link tables */
1095	edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1096			DESC_JOB_IO_LEN, GFP_DMA | flags);
1097	if (!edesc) {
1098		dev_err(jrdev, "could not allocate extended descriptor\n");
1099		return -ENOMEM;
1100	}
1101	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1102			  DESC_JOB_IO_LEN;
1103	edesc->sec4_sg_bytes = sec4_sg_bytes;
1104	edesc->src_nents = src_nents;
1105	edesc->chained = chained;
1106
1107	sh_len = desc_len(sh_desc);
1108	desc = edesc->hw_desc;
1109	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1110
1111	if (src_nents) {
1112		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1113		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1114					    sec4_sg_bytes, DMA_TO_DEVICE);
1115		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1116			dev_err(jrdev, "unable to map S/G table\n");
1117			return -ENOMEM;
1118		}
1119		src_dma = edesc->sec4_sg_dma;
1120		options = LDST_SGF;
1121	} else {
1122		src_dma = sg_dma_address(req->src);
1123		options = 0;
1124	}
1125	append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1126
1127	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1128						digestsize);
1129	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1130		dev_err(jrdev, "unable to map dst\n");
1131		return -ENOMEM;
1132	}
1133
1134#ifdef DEBUG
1135	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1136		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1137#endif
1138
1139	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1140	if (!ret) {
1141		ret = -EINPROGRESS;
1142	} else {
1143		ahash_unmap(jrdev, edesc, req, digestsize);
1144		kfree(edesc);
1145	}
1146
1147	return ret;
1148}
1149
1150/* submit ahash final if it the first job descriptor */
1151static int ahash_final_no_ctx(struct ahash_request *req)
1152{
1153	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1154	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1155	struct caam_hash_state *state = ahash_request_ctx(req);
1156	struct device *jrdev = ctx->jrdev;
1157	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1158		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1159	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1160	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1161	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1162	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1163	int digestsize = crypto_ahash_digestsize(ahash);
1164	struct ahash_edesc *edesc;
1165	int ret = 0;
1166	int sh_len;
1167
1168	/* allocate space for base edesc and hw desc commands, link tables */
1169	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1170			GFP_DMA | flags);
1171	if (!edesc) {
1172		dev_err(jrdev, "could not allocate extended descriptor\n");
1173		return -ENOMEM;
1174	}
1175
1176	edesc->sec4_sg_bytes = 0;
1177	sh_len = desc_len(sh_desc);
1178	desc = edesc->hw_desc;
1179	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1180
1181	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1182	if (dma_mapping_error(jrdev, state->buf_dma)) {
1183		dev_err(jrdev, "unable to map src\n");
1184		return -ENOMEM;
1185	}
1186
1187	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1188
1189	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1190						digestsize);
1191	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1192		dev_err(jrdev, "unable to map dst\n");
1193		return -ENOMEM;
1194	}
1195	edesc->src_nents = 0;
1196
1197#ifdef DEBUG
1198	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1199		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1200#endif
1201
1202	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1203	if (!ret) {
1204		ret = -EINPROGRESS;
1205	} else {
1206		ahash_unmap(jrdev, edesc, req, digestsize);
1207		kfree(edesc);
1208	}
1209
1210	return ret;
1211}
1212
1213/* submit ahash update if it the first job descriptor after update */
1214static int ahash_update_no_ctx(struct ahash_request *req)
1215{
1216	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1217	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1218	struct caam_hash_state *state = ahash_request_ctx(req);
1219	struct device *jrdev = ctx->jrdev;
1220	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1221		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1222	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1223	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1224	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1225	int *next_buflen = state->current_buf ? &state->buflen_0 :
1226			   &state->buflen_1;
1227	int in_len = *buflen + req->nbytes, to_hash;
1228	int sec4_sg_bytes, src_nents;
1229	struct ahash_edesc *edesc;
1230	u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1231	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1232	bool chained = false;
1233	int ret = 0;
1234	int sh_len;
1235
1236	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1237	to_hash = in_len - *next_buflen;
1238
1239	if (to_hash) {
1240		src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1241				       &chained);
1242		sec4_sg_bytes = (1 + src_nents) *
1243				sizeof(struct sec4_sg_entry);
1244
1245		/*
1246		 * allocate space for base edesc and hw desc commands,
1247		 * link tables
1248		 */
1249		edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1250				sec4_sg_bytes, GFP_DMA | flags);
1251		if (!edesc) {
1252			dev_err(jrdev,
1253				"could not allocate extended descriptor\n");
1254			return -ENOMEM;
1255		}
1256
1257		edesc->src_nents = src_nents;
1258		edesc->chained = chained;
1259		edesc->sec4_sg_bytes = sec4_sg_bytes;
1260		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1261				 DESC_JOB_IO_LEN;
1262		edesc->dst_dma = 0;
1263
1264		state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1265						    buf, *buflen);
1266		src_map_to_sec4_sg(jrdev, req->src, src_nents,
1267				   edesc->sec4_sg + 1, chained);
1268		if (*next_buflen) {
1269			scatterwalk_map_and_copy(next_buf, req->src,
1270						 to_hash - *buflen,
1271						 *next_buflen, 0);
1272			state->current_buf = !state->current_buf;
1273		}
1274
1275		sh_len = desc_len(sh_desc);
1276		desc = edesc->hw_desc;
1277		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1278				     HDR_REVERSE);
1279
1280		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1281						    sec4_sg_bytes,
1282						    DMA_TO_DEVICE);
1283		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1284			dev_err(jrdev, "unable to map S/G table\n");
1285			return -ENOMEM;
1286		}
1287
1288		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1289
1290		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1291		if (ret)
1292			return ret;
1293
1294#ifdef DEBUG
1295		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1296			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1297			       desc_bytes(desc), 1);
1298#endif
1299
1300		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1301		if (!ret) {
1302			ret = -EINPROGRESS;
1303			state->update = ahash_update_ctx;
1304			state->finup = ahash_finup_ctx;
1305			state->final = ahash_final_ctx;
1306		} else {
1307			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1308					DMA_TO_DEVICE);
1309			kfree(edesc);
1310		}
1311	} else if (*next_buflen) {
1312		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1313					 req->nbytes, 0);
1314		*buflen = *next_buflen;
1315		*next_buflen = 0;
1316	}
1317#ifdef DEBUG
1318	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1319		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1320	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1321		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1322		       *next_buflen, 1);
1323#endif
1324
1325	return ret;
1326}
1327
1328/* submit ahash finup if it the first job descriptor after update */
1329static int ahash_finup_no_ctx(struct ahash_request *req)
1330{
1331	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1332	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1333	struct caam_hash_state *state = ahash_request_ctx(req);
1334	struct device *jrdev = ctx->jrdev;
1335	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1336		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1337	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1338	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1339	int last_buflen = state->current_buf ? state->buflen_0 :
1340			  state->buflen_1;
1341	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1342	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1343	int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1344	int digestsize = crypto_ahash_digestsize(ahash);
1345	struct ahash_edesc *edesc;
1346	bool chained = false;
1347	int sh_len;
1348	int ret = 0;
1349
1350	src_nents = __sg_count(req->src, req->nbytes, &chained);
1351	sec4_sg_src_index = 2;
1352	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1353			 sizeof(struct sec4_sg_entry);
1354
1355	/* allocate space for base edesc and hw desc commands, link tables */
1356	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1357			sec4_sg_bytes, GFP_DMA | flags);
1358	if (!edesc) {
1359		dev_err(jrdev, "could not allocate extended descriptor\n");
1360		return -ENOMEM;
1361	}
1362
1363	sh_len = desc_len(sh_desc);
1364	desc = edesc->hw_desc;
1365	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1366
1367	edesc->src_nents = src_nents;
1368	edesc->chained = chained;
1369	edesc->sec4_sg_bytes = sec4_sg_bytes;
1370	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1371			 DESC_JOB_IO_LEN;
1372
1373	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1374						state->buf_dma, buflen,
1375						last_buflen);
1376
1377	src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1378			   chained);
1379
1380	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1381					    sec4_sg_bytes, DMA_TO_DEVICE);
1382	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1383		dev_err(jrdev, "unable to map S/G table\n");
1384		return -ENOMEM;
1385	}
1386
1387	append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1388			       req->nbytes, LDST_SGF);
1389
1390	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1391						digestsize);
1392	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1393		dev_err(jrdev, "unable to map dst\n");
1394		return -ENOMEM;
1395	}
1396
1397#ifdef DEBUG
1398	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1399		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1400#endif
1401
1402	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1403	if (!ret) {
1404		ret = -EINPROGRESS;
1405	} else {
1406		ahash_unmap(jrdev, edesc, req, digestsize);
1407		kfree(edesc);
1408	}
1409
1410	return ret;
1411}
1412
1413/* submit first update job descriptor after init */
1414static int ahash_update_first(struct ahash_request *req)
1415{
1416	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1417	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1418	struct caam_hash_state *state = ahash_request_ctx(req);
1419	struct device *jrdev = ctx->jrdev;
1420	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1421		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1422	u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1423	int *next_buflen = state->current_buf ?
1424		&state->buflen_1 : &state->buflen_0;
1425	int to_hash;
1426	u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1427	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1428	int sec4_sg_bytes, src_nents;
1429	dma_addr_t src_dma;
1430	u32 options;
1431	struct ahash_edesc *edesc;
1432	bool chained = false;
1433	int ret = 0;
1434	int sh_len;
1435
1436	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1437				      1);
1438	to_hash = req->nbytes - *next_buflen;
1439
1440	if (to_hash) {
1441		src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1442				     &chained);
1443		dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1444				   DMA_TO_DEVICE, chained);
1445		sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1446
1447		/*
1448		 * allocate space for base edesc and hw desc commands,
1449		 * link tables
1450		 */
1451		edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1452				sec4_sg_bytes, GFP_DMA | flags);
1453		if (!edesc) {
1454			dev_err(jrdev,
1455				"could not allocate extended descriptor\n");
1456			return -ENOMEM;
1457		}
1458
1459		edesc->src_nents = src_nents;
1460		edesc->chained = chained;
1461		edesc->sec4_sg_bytes = sec4_sg_bytes;
1462		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1463				 DESC_JOB_IO_LEN;
1464		edesc->dst_dma = 0;
1465
1466		if (src_nents) {
1467			sg_to_sec4_sg_last(req->src, src_nents,
1468					   edesc->sec4_sg, 0);
1469			edesc->sec4_sg_dma = dma_map_single(jrdev,
1470							    edesc->sec4_sg,
1471							    sec4_sg_bytes,
1472							    DMA_TO_DEVICE);
1473			if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1474				dev_err(jrdev, "unable to map S/G table\n");
1475				return -ENOMEM;
1476			}
1477			src_dma = edesc->sec4_sg_dma;
1478			options = LDST_SGF;
1479		} else {
1480			src_dma = sg_dma_address(req->src);
1481			options = 0;
1482		}
1483
1484		if (*next_buflen)
1485			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1486						 *next_buflen, 0);
1487
1488		sh_len = desc_len(sh_desc);
1489		desc = edesc->hw_desc;
1490		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1491				     HDR_REVERSE);
1492
1493		append_seq_in_ptr(desc, src_dma, to_hash, options);
1494
1495		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1496		if (ret)
1497			return ret;
1498
1499#ifdef DEBUG
1500		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1501			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1502			       desc_bytes(desc), 1);
1503#endif
1504
1505		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1506				      req);
1507		if (!ret) {
1508			ret = -EINPROGRESS;
1509			state->update = ahash_update_ctx;
1510			state->finup = ahash_finup_ctx;
1511			state->final = ahash_final_ctx;
1512		} else {
1513			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1514					DMA_TO_DEVICE);
1515			kfree(edesc);
1516		}
1517	} else if (*next_buflen) {
1518		state->update = ahash_update_no_ctx;
1519		state->finup = ahash_finup_no_ctx;
1520		state->final = ahash_final_no_ctx;
1521		scatterwalk_map_and_copy(next_buf, req->src, 0,
1522					 req->nbytes, 0);
1523	}
1524#ifdef DEBUG
1525	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1526		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1527		       *next_buflen, 1);
1528#endif
1529
1530	return ret;
1531}
1532
1533static int ahash_finup_first(struct ahash_request *req)
1534{
1535	return ahash_digest(req);
1536}
1537
1538static int ahash_init(struct ahash_request *req)
1539{
1540	struct caam_hash_state *state = ahash_request_ctx(req);
1541
1542	state->update = ahash_update_first;
1543	state->finup = ahash_finup_first;
1544	state->final = ahash_final_no_ctx;
1545
1546	state->current_buf = 0;
1547	state->buf_dma = 0;
1548	state->buflen_0 = 0;
1549	state->buflen_1 = 0;
1550
1551	return 0;
1552}
1553
1554static int ahash_update(struct ahash_request *req)
1555{
1556	struct caam_hash_state *state = ahash_request_ctx(req);
1557
1558	return state->update(req);
1559}
1560
1561static int ahash_finup(struct ahash_request *req)
1562{
1563	struct caam_hash_state *state = ahash_request_ctx(req);
1564
1565	return state->finup(req);
1566}
1567
1568static int ahash_final(struct ahash_request *req)
1569{
1570	struct caam_hash_state *state = ahash_request_ctx(req);
1571
1572	return state->final(req);
1573}
1574
1575static int ahash_export(struct ahash_request *req, void *out)
1576{
1577	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1578	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1579	struct caam_hash_state *state = ahash_request_ctx(req);
1580
1581	memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1582	memcpy(out + sizeof(struct caam_hash_ctx), state,
1583	       sizeof(struct caam_hash_state));
1584	return 0;
1585}
1586
1587static int ahash_import(struct ahash_request *req, const void *in)
1588{
1589	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1590	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1591	struct caam_hash_state *state = ahash_request_ctx(req);
1592
1593	memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1594	memcpy(state, in + sizeof(struct caam_hash_ctx),
1595	       sizeof(struct caam_hash_state));
1596	return 0;
1597}
1598
1599struct caam_hash_template {
1600	char name[CRYPTO_MAX_ALG_NAME];
1601	char driver_name[CRYPTO_MAX_ALG_NAME];
1602	char hmac_name[CRYPTO_MAX_ALG_NAME];
1603	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1604	unsigned int blocksize;
1605	struct ahash_alg template_ahash;
1606	u32 alg_type;
1607	u32 alg_op;
1608};
1609
1610/* ahash descriptors */
1611static struct caam_hash_template driver_hash[] = {
1612	{
1613		.name = "sha1",
1614		.driver_name = "sha1-caam",
1615		.hmac_name = "hmac(sha1)",
1616		.hmac_driver_name = "hmac-sha1-caam",
1617		.blocksize = SHA1_BLOCK_SIZE,
1618		.template_ahash = {
1619			.init = ahash_init,
1620			.update = ahash_update,
1621			.final = ahash_final,
1622			.finup = ahash_finup,
1623			.digest = ahash_digest,
1624			.export = ahash_export,
1625			.import = ahash_import,
1626			.setkey = ahash_setkey,
1627			.halg = {
1628				.digestsize = SHA1_DIGEST_SIZE,
1629				},
1630			},
1631		.alg_type = OP_ALG_ALGSEL_SHA1,
1632		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1633	}, {
1634		.name = "sha224",
1635		.driver_name = "sha224-caam",
1636		.hmac_name = "hmac(sha224)",
1637		.hmac_driver_name = "hmac-sha224-caam",
1638		.blocksize = SHA224_BLOCK_SIZE,
1639		.template_ahash = {
1640			.init = ahash_init,
1641			.update = ahash_update,
1642			.final = ahash_final,
1643			.finup = ahash_finup,
1644			.digest = ahash_digest,
1645			.export = ahash_export,
1646			.import = ahash_import,
1647			.setkey = ahash_setkey,
1648			.halg = {
1649				.digestsize = SHA224_DIGEST_SIZE,
1650				},
1651			},
1652		.alg_type = OP_ALG_ALGSEL_SHA224,
1653		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1654	}, {
1655		.name = "sha256",
1656		.driver_name = "sha256-caam",
1657		.hmac_name = "hmac(sha256)",
1658		.hmac_driver_name = "hmac-sha256-caam",
1659		.blocksize = SHA256_BLOCK_SIZE,
1660		.template_ahash = {
1661			.init = ahash_init,
1662			.update = ahash_update,
1663			.final = ahash_final,
1664			.finup = ahash_finup,
1665			.digest = ahash_digest,
1666			.export = ahash_export,
1667			.import = ahash_import,
1668			.setkey = ahash_setkey,
1669			.halg = {
1670				.digestsize = SHA256_DIGEST_SIZE,
1671				},
1672			},
1673		.alg_type = OP_ALG_ALGSEL_SHA256,
1674		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1675	}, {
1676		.name = "sha384",
1677		.driver_name = "sha384-caam",
1678		.hmac_name = "hmac(sha384)",
1679		.hmac_driver_name = "hmac-sha384-caam",
1680		.blocksize = SHA384_BLOCK_SIZE,
1681		.template_ahash = {
1682			.init = ahash_init,
1683			.update = ahash_update,
1684			.final = ahash_final,
1685			.finup = ahash_finup,
1686			.digest = ahash_digest,
1687			.export = ahash_export,
1688			.import = ahash_import,
1689			.setkey = ahash_setkey,
1690			.halg = {
1691				.digestsize = SHA384_DIGEST_SIZE,
1692				},
1693			},
1694		.alg_type = OP_ALG_ALGSEL_SHA384,
1695		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1696	}, {
1697		.name = "sha512",
1698		.driver_name = "sha512-caam",
1699		.hmac_name = "hmac(sha512)",
1700		.hmac_driver_name = "hmac-sha512-caam",
1701		.blocksize = SHA512_BLOCK_SIZE,
1702		.template_ahash = {
1703			.init = ahash_init,
1704			.update = ahash_update,
1705			.final = ahash_final,
1706			.finup = ahash_finup,
1707			.digest = ahash_digest,
1708			.export = ahash_export,
1709			.import = ahash_import,
1710			.setkey = ahash_setkey,
1711			.halg = {
1712				.digestsize = SHA512_DIGEST_SIZE,
1713				},
1714			},
1715		.alg_type = OP_ALG_ALGSEL_SHA512,
1716		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1717	}, {
1718		.name = "md5",
1719		.driver_name = "md5-caam",
1720		.hmac_name = "hmac(md5)",
1721		.hmac_driver_name = "hmac-md5-caam",
1722		.blocksize = MD5_BLOCK_WORDS * 4,
1723		.template_ahash = {
1724			.init = ahash_init,
1725			.update = ahash_update,
1726			.final = ahash_final,
1727			.finup = ahash_finup,
1728			.digest = ahash_digest,
1729			.export = ahash_export,
1730			.import = ahash_import,
1731			.setkey = ahash_setkey,
1732			.halg = {
1733				.digestsize = MD5_DIGEST_SIZE,
1734				},
1735			},
1736		.alg_type = OP_ALG_ALGSEL_MD5,
1737		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1738	},
1739};
1740
1741struct caam_hash_alg {
1742	struct list_head entry;
1743	int alg_type;
1744	int alg_op;
1745	struct ahash_alg ahash_alg;
1746};
1747
1748static int caam_hash_cra_init(struct crypto_tfm *tfm)
1749{
1750	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1751	struct crypto_alg *base = tfm->__crt_alg;
1752	struct hash_alg_common *halg =
1753		 container_of(base, struct hash_alg_common, base);
1754	struct ahash_alg *alg =
1755		 container_of(halg, struct ahash_alg, halg);
1756	struct caam_hash_alg *caam_hash =
1757		 container_of(alg, struct caam_hash_alg, ahash_alg);
1758	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1759	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1760	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1761					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1762					 HASH_MSG_LEN + 32,
1763					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1764					 HASH_MSG_LEN + 64,
1765					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1766	int ret = 0;
1767
1768	/*
1769	 * Get a Job ring from Job Ring driver to ensure in-order
1770	 * crypto request processing per tfm
1771	 */
1772	ctx->jrdev = caam_jr_alloc();
1773	if (IS_ERR(ctx->jrdev)) {
1774		pr_err("Job Ring Device allocation for transform failed\n");
1775		return PTR_ERR(ctx->jrdev);
1776	}
1777	/* copy descriptor header template value */
1778	ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1779	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1780
1781	ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1782				  OP_ALG_ALGSEL_SHIFT];
1783
1784	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1785				 sizeof(struct caam_hash_state));
1786
1787	ret = ahash_set_sh_desc(ahash);
1788
1789	return ret;
1790}
1791
1792static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1793{
1794	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1795
1796	if (ctx->sh_desc_update_dma &&
1797	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1798		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1799				 desc_bytes(ctx->sh_desc_update),
1800				 DMA_TO_DEVICE);
1801	if (ctx->sh_desc_update_first_dma &&
1802	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1803		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1804				 desc_bytes(ctx->sh_desc_update_first),
1805				 DMA_TO_DEVICE);
1806	if (ctx->sh_desc_fin_dma &&
1807	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1808		dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1809				 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1810	if (ctx->sh_desc_digest_dma &&
1811	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1812		dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1813				 desc_bytes(ctx->sh_desc_digest),
1814				 DMA_TO_DEVICE);
1815	if (ctx->sh_desc_finup_dma &&
1816	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1817		dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1818				 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1819
1820	caam_jr_free(ctx->jrdev);
1821}
1822
1823static void __exit caam_algapi_hash_exit(void)
1824{
1825	struct caam_hash_alg *t_alg, *n;
1826
1827	if (!hash_list.next)
1828		return;
1829
1830	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1831		crypto_unregister_ahash(&t_alg->ahash_alg);
1832		list_del(&t_alg->entry);
1833		kfree(t_alg);
1834	}
1835}
1836
1837static struct caam_hash_alg *
1838caam_hash_alloc(struct caam_hash_template *template,
1839		bool keyed)
1840{
1841	struct caam_hash_alg *t_alg;
1842	struct ahash_alg *halg;
1843	struct crypto_alg *alg;
1844
1845	t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1846	if (!t_alg) {
1847		pr_err("failed to allocate t_alg\n");
1848		return ERR_PTR(-ENOMEM);
1849	}
1850
1851	t_alg->ahash_alg = template->template_ahash;
1852	halg = &t_alg->ahash_alg;
1853	alg = &halg->halg.base;
1854
1855	if (keyed) {
1856		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1857			 template->hmac_name);
1858		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1859			 template->hmac_driver_name);
1860	} else {
1861		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1862			 template->name);
1863		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1864			 template->driver_name);
1865	}
1866	alg->cra_module = THIS_MODULE;
1867	alg->cra_init = caam_hash_cra_init;
1868	alg->cra_exit = caam_hash_cra_exit;
1869	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1870	alg->cra_priority = CAAM_CRA_PRIORITY;
1871	alg->cra_blocksize = template->blocksize;
1872	alg->cra_alignmask = 0;
1873	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1874	alg->cra_type = &crypto_ahash_type;
1875
1876	t_alg->alg_type = template->alg_type;
1877	t_alg->alg_op = template->alg_op;
1878
1879	return t_alg;
1880}
1881
1882static int __init caam_algapi_hash_init(void)
1883{
1884	struct device_node *dev_node;
1885	struct platform_device *pdev;
1886	struct device *ctrldev;
1887	void *priv;
1888	int i = 0, err = 0;
1889
1890	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1891	if (!dev_node) {
1892		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1893		if (!dev_node)
1894			return -ENODEV;
1895	}
1896
1897	pdev = of_find_device_by_node(dev_node);
1898	if (!pdev) {
1899		of_node_put(dev_node);
1900		return -ENODEV;
1901	}
1902
1903	ctrldev = &pdev->dev;
1904	priv = dev_get_drvdata(ctrldev);
1905	of_node_put(dev_node);
1906
1907	/*
1908	 * If priv is NULL, it's probably because the caam driver wasn't
1909	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1910	 */
1911	if (!priv)
1912		return -ENODEV;
1913
1914	INIT_LIST_HEAD(&hash_list);
1915
1916	/* register crypto algorithms the device supports */
1917	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1918		/* TODO: check if h/w supports alg */
1919		struct caam_hash_alg *t_alg;
1920
1921		/* register hmac version */
1922		t_alg = caam_hash_alloc(&driver_hash[i], true);
1923		if (IS_ERR(t_alg)) {
1924			err = PTR_ERR(t_alg);
1925			pr_warn("%s alg allocation failed\n",
1926				driver_hash[i].driver_name);
1927			continue;
1928		}
1929
1930		err = crypto_register_ahash(&t_alg->ahash_alg);
1931		if (err) {
1932			pr_warn("%s alg registration failed\n",
1933				t_alg->ahash_alg.halg.base.cra_driver_name);
1934			kfree(t_alg);
1935		} else
1936			list_add_tail(&t_alg->entry, &hash_list);
1937
1938		/* register unkeyed version */
1939		t_alg = caam_hash_alloc(&driver_hash[i], false);
1940		if (IS_ERR(t_alg)) {
1941			err = PTR_ERR(t_alg);
1942			pr_warn("%s alg allocation failed\n",
1943				driver_hash[i].driver_name);
1944			continue;
1945		}
1946
1947		err = crypto_register_ahash(&t_alg->ahash_alg);
1948		if (err) {
1949			pr_warn("%s alg registration failed\n",
1950				t_alg->ahash_alg.halg.base.cra_driver_name);
1951			kfree(t_alg);
1952		} else
1953			list_add_tail(&t_alg->entry, &hash_list);
1954	}
1955
1956	return err;
1957}
1958
1959module_init(caam_algapi_hash_init);
1960module_exit(caam_algapi_hash_exit);
1961
1962MODULE_LICENSE("GPL");
1963MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1964MODULE_AUTHOR("Freescale Semiconductor - NMG");
1965