1/*
2 * Cryptographic API.
3 * Support for Nomadik hardware crypto engine.
4
5 * Copyright (C) ST-Ericsson SA 2010
6 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
7 * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
8 * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
9 * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
10 * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
11 * License terms: GNU General Public License (GPL) version 2
12 */
13
14#define pr_fmt(fmt) "hashX hashX: " fmt
15
16#include <linux/clk.h>
17#include <linux/device.h>
18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/klist.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/crypto.h>
26
27#include <linux/regulator/consumer.h>
28#include <linux/dmaengine.h>
29#include <linux/bitops.h>
30
31#include <crypto/internal/hash.h>
32#include <crypto/sha.h>
33#include <crypto/scatterwalk.h>
34#include <crypto/algapi.h>
35
36#include <linux/platform_data/crypto-ux500.h>
37
38#include "hash_alg.h"
39
40static int hash_mode;
41module_param(hash_mode, int, 0);
42MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
43
44/**
45 * Pre-calculated empty message digests.
46 */
47static const u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
48	0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
49	0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
50	0xaf, 0xd8, 0x07, 0x09
51};
52
53static const u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
54	0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
55	0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
56	0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
57	0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
58};
59
60/* HMAC-SHA1, no key */
61static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
62	0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
63	0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
64	0x70, 0x69, 0x0e, 0x1d
65};
66
67/* HMAC-SHA256, no key */
68static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
69	0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
70	0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
71	0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
72	0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
73};
74
75/**
76 * struct hash_driver_data - data specific to the driver.
77 *
78 * @device_list:	A list of registered devices to choose from.
79 * @device_allocation:	A semaphore initialized with number of devices.
80 */
81struct hash_driver_data {
82	struct klist		device_list;
83	struct semaphore	device_allocation;
84};
85
86static struct hash_driver_data	driver_data;
87
88/* Declaration of functions */
89/**
90 * hash_messagepad - Pads a message and write the nblw bits.
91 * @device_data:	Structure for the hash device.
92 * @message:		Last word of a message
93 * @index_bytes:	The number of bytes in the last message
94 *
95 * This function manages the final part of the digest calculation, when less
96 * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
97 *
98 */
99static void hash_messagepad(struct hash_device_data *device_data,
100			    const u32 *message, u8 index_bytes);
101
102/**
103 * release_hash_device - Releases a previously allocated hash device.
104 * @device_data:	Structure for the hash device.
105 *
106 */
107static void release_hash_device(struct hash_device_data *device_data)
108{
109	spin_lock(&device_data->ctx_lock);
110	device_data->current_ctx->device = NULL;
111	device_data->current_ctx = NULL;
112	spin_unlock(&device_data->ctx_lock);
113
114	/*
115	 * The down_interruptible part for this semaphore is called in
116	 * cryp_get_device_data.
117	 */
118	up(&driver_data.device_allocation);
119}
120
121static void hash_dma_setup_channel(struct hash_device_data *device_data,
122				   struct device *dev)
123{
124	struct hash_platform_data *platform_data = dev->platform_data;
125	struct dma_slave_config conf = {
126		.direction = DMA_MEM_TO_DEV,
127		.dst_addr = device_data->phybase + HASH_DMA_FIFO,
128		.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
129		.dst_maxburst = 16,
130	};
131
132	dma_cap_zero(device_data->dma.mask);
133	dma_cap_set(DMA_SLAVE, device_data->dma.mask);
134
135	device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
136	device_data->dma.chan_mem2hash =
137		dma_request_channel(device_data->dma.mask,
138				    platform_data->dma_filter,
139				    device_data->dma.cfg_mem2hash);
140
141	dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
142
143	init_completion(&device_data->dma.complete);
144}
145
146static void hash_dma_callback(void *data)
147{
148	struct hash_ctx *ctx = data;
149
150	complete(&ctx->device->dma.complete);
151}
152
153static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
154				 int len, enum dma_data_direction direction)
155{
156	struct dma_async_tx_descriptor *desc = NULL;
157	struct dma_chan *channel = NULL;
158	dma_cookie_t cookie;
159
160	if (direction != DMA_TO_DEVICE) {
161		dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
162			__func__);
163		return -EFAULT;
164	}
165
166	sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
167
168	channel = ctx->device->dma.chan_mem2hash;
169	ctx->device->dma.sg = sg;
170	ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
171			ctx->device->dma.sg, ctx->device->dma.nents,
172			direction);
173
174	if (!ctx->device->dma.sg_len) {
175		dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
176			__func__);
177		return -EFAULT;
178	}
179
180	dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
181		__func__);
182	desc = dmaengine_prep_slave_sg(channel,
183			ctx->device->dma.sg, ctx->device->dma.sg_len,
184			direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
185	if (!desc) {
186		dev_err(ctx->device->dev,
187			"%s: dmaengine_prep_slave_sg() failed!\n", __func__);
188		return -EFAULT;
189	}
190
191	desc->callback = hash_dma_callback;
192	desc->callback_param = ctx;
193
194	cookie = dmaengine_submit(desc);
195	dma_async_issue_pending(channel);
196
197	return 0;
198}
199
200static void hash_dma_done(struct hash_ctx *ctx)
201{
202	struct dma_chan *chan;
203
204	chan = ctx->device->dma.chan_mem2hash;
205	dmaengine_terminate_all(chan);
206	dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
207		     ctx->device->dma.sg_len, DMA_TO_DEVICE);
208}
209
210static int hash_dma_write(struct hash_ctx *ctx,
211			  struct scatterlist *sg, int len)
212{
213	int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
214	if (error) {
215		dev_dbg(ctx->device->dev,
216			"%s: hash_set_dma_transfer() failed\n", __func__);
217		return error;
218	}
219
220	return len;
221}
222
223/**
224 * get_empty_message_digest - Returns a pre-calculated digest for
225 * the empty message.
226 * @device_data:	Structure for the hash device.
227 * @zero_hash:		Buffer to return the empty message digest.
228 * @zero_hash_size:	Hash size of the empty message digest.
229 * @zero_digest:	True if zero_digest returned.
230 */
231static int get_empty_message_digest(
232		struct hash_device_data *device_data,
233		u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
234{
235	int ret = 0;
236	struct hash_ctx *ctx = device_data->current_ctx;
237	*zero_digest = false;
238
239	/**
240	 * Caller responsible for ctx != NULL.
241	 */
242
243	if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
244		if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
245			memcpy(zero_hash, &zero_message_hash_sha1[0],
246			       SHA1_DIGEST_SIZE);
247			*zero_hash_size = SHA1_DIGEST_SIZE;
248			*zero_digest = true;
249		} else if (HASH_ALGO_SHA256 ==
250				ctx->config.algorithm) {
251			memcpy(zero_hash, &zero_message_hash_sha256[0],
252			       SHA256_DIGEST_SIZE);
253			*zero_hash_size = SHA256_DIGEST_SIZE;
254			*zero_digest = true;
255		} else {
256			dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
257				__func__);
258			ret = -EINVAL;
259			goto out;
260		}
261	} else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
262		if (!ctx->keylen) {
263			if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
264				memcpy(zero_hash, &zero_message_hmac_sha1[0],
265				       SHA1_DIGEST_SIZE);
266				*zero_hash_size = SHA1_DIGEST_SIZE;
267				*zero_digest = true;
268			} else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
269				memcpy(zero_hash, &zero_message_hmac_sha256[0],
270				       SHA256_DIGEST_SIZE);
271				*zero_hash_size = SHA256_DIGEST_SIZE;
272				*zero_digest = true;
273			} else {
274				dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
275					__func__);
276				ret = -EINVAL;
277				goto out;
278			}
279		} else {
280			dev_dbg(device_data->dev,
281				"%s: Continue hash calculation, since hmac key available\n",
282				__func__);
283		}
284	}
285out:
286
287	return ret;
288}
289
290/**
291 * hash_disable_power - Request to disable power and clock.
292 * @device_data:	Structure for the hash device.
293 * @save_device_state:	If true, saves the current hw state.
294 *
295 * This function request for disabling power (regulator) and clock,
296 * and could also save current hw state.
297 */
298static int hash_disable_power(struct hash_device_data *device_data,
299			      bool save_device_state)
300{
301	int ret = 0;
302	struct device *dev = device_data->dev;
303
304	spin_lock(&device_data->power_state_lock);
305	if (!device_data->power_state)
306		goto out;
307
308	if (save_device_state) {
309		hash_save_state(device_data,
310				&device_data->state);
311		device_data->restore_dev_state = true;
312	}
313
314	clk_disable(device_data->clk);
315	ret = regulator_disable(device_data->regulator);
316	if (ret)
317		dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
318
319	device_data->power_state = false;
320
321out:
322	spin_unlock(&device_data->power_state_lock);
323
324	return ret;
325}
326
327/**
328 * hash_enable_power - Request to enable power and clock.
329 * @device_data:		Structure for the hash device.
330 * @restore_device_state:	If true, restores a previous saved hw state.
331 *
332 * This function request for enabling power (regulator) and clock,
333 * and could also restore a previously saved hw state.
334 */
335static int hash_enable_power(struct hash_device_data *device_data,
336			     bool restore_device_state)
337{
338	int ret = 0;
339	struct device *dev = device_data->dev;
340
341	spin_lock(&device_data->power_state_lock);
342	if (!device_data->power_state) {
343		ret = regulator_enable(device_data->regulator);
344		if (ret) {
345			dev_err(dev, "%s: regulator_enable() failed!\n",
346				__func__);
347			goto out;
348		}
349		ret = clk_enable(device_data->clk);
350		if (ret) {
351			dev_err(dev, "%s: clk_enable() failed!\n", __func__);
352			ret = regulator_disable(
353					device_data->regulator);
354			goto out;
355		}
356		device_data->power_state = true;
357	}
358
359	if (device_data->restore_dev_state) {
360		if (restore_device_state) {
361			device_data->restore_dev_state = false;
362			hash_resume_state(device_data, &device_data->state);
363		}
364	}
365out:
366	spin_unlock(&device_data->power_state_lock);
367
368	return ret;
369}
370
371/**
372 * hash_get_device_data - Checks for an available hash device and return it.
373 * @hash_ctx:		Structure for the hash context.
374 * @device_data:	Structure for the hash device.
375 *
376 * This function check for an available hash device and return it to
377 * the caller.
378 * Note! Caller need to release the device, calling up().
379 */
380static int hash_get_device_data(struct hash_ctx *ctx,
381				struct hash_device_data **device_data)
382{
383	int			ret;
384	struct klist_iter	device_iterator;
385	struct klist_node	*device_node;
386	struct hash_device_data *local_device_data = NULL;
387
388	/* Wait until a device is available */
389	ret = down_interruptible(&driver_data.device_allocation);
390	if (ret)
391		return ret;  /* Interrupted */
392
393	/* Select a device */
394	klist_iter_init(&driver_data.device_list, &device_iterator);
395	device_node = klist_next(&device_iterator);
396	while (device_node) {
397		local_device_data = container_of(device_node,
398					   struct hash_device_data, list_node);
399		spin_lock(&local_device_data->ctx_lock);
400		/* current_ctx allocates a device, NULL = unallocated */
401		if (local_device_data->current_ctx) {
402			device_node = klist_next(&device_iterator);
403		} else {
404			local_device_data->current_ctx = ctx;
405			ctx->device = local_device_data;
406			spin_unlock(&local_device_data->ctx_lock);
407			break;
408		}
409		spin_unlock(&local_device_data->ctx_lock);
410	}
411	klist_iter_exit(&device_iterator);
412
413	if (!device_node) {
414		/**
415		 * No free device found.
416		 * Since we allocated a device with down_interruptible, this
417		 * should not be able to happen.
418		 * Number of available devices, which are contained in
419		 * device_allocation, is therefore decremented by not doing
420		 * an up(device_allocation).
421		 */
422		return -EBUSY;
423	}
424
425	*device_data = local_device_data;
426
427	return 0;
428}
429
430/**
431 * hash_hw_write_key - Writes the key to the hardware registries.
432 *
433 * @device_data:	Structure for the hash device.
434 * @key:		Key to be written.
435 * @keylen:		The lengt of the key.
436 *
437 * Note! This function DOES NOT write to the NBLW registry, even though
438 * specified in the the hw design spec. Either due to incorrect info in the
439 * spec or due to a bug in the hw.
440 */
441static void hash_hw_write_key(struct hash_device_data *device_data,
442			      const u8 *key, unsigned int keylen)
443{
444	u32 word = 0;
445	int nwords = 1;
446
447	HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
448
449	while (keylen >= 4) {
450		u32 *key_word = (u32 *)key;
451
452		HASH_SET_DIN(key_word, nwords);
453		keylen -= 4;
454		key += 4;
455	}
456
457	/* Take care of the remaining bytes in the last word */
458	if (keylen) {
459		word = 0;
460		while (keylen) {
461			word |= (key[keylen - 1] << (8 * (keylen - 1)));
462			keylen--;
463		}
464
465		HASH_SET_DIN(&word, nwords);
466	}
467
468	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
469		cpu_relax();
470
471	HASH_SET_DCAL;
472
473	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
474		cpu_relax();
475}
476
477/**
478 * init_hash_hw - Initialise the hash hardware for a new calculation.
479 * @device_data:	Structure for the hash device.
480 * @ctx:		The hash context.
481 *
482 * This function will enable the bits needed to clear and start a new
483 * calculation.
484 */
485static int init_hash_hw(struct hash_device_data *device_data,
486			struct hash_ctx *ctx)
487{
488	int ret = 0;
489
490	ret = hash_setconfiguration(device_data, &ctx->config);
491	if (ret) {
492		dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
493			__func__);
494		return ret;
495	}
496
497	hash_begin(device_data, ctx);
498
499	if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
500		hash_hw_write_key(device_data, ctx->key, ctx->keylen);
501
502	return ret;
503}
504
505/**
506 * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
507 *
508 * @sg:		Scatterlist.
509 * @size:	Size in bytes.
510 * @aligned:	True if sg data aligned to work in DMA mode.
511 *
512 */
513static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
514{
515	int nents = 0;
516	bool aligned_data = true;
517
518	while (size > 0 && sg) {
519		nents++;
520		size -= sg->length;
521
522		/* hash_set_dma_transfer will align last nent */
523		if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
524		    (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
525			aligned_data = false;
526
527		sg = sg_next(sg);
528	}
529
530	if (aligned)
531		*aligned = aligned_data;
532
533	if (size != 0)
534		return -EFAULT;
535
536	return nents;
537}
538
539/**
540 * hash_dma_valid_data - checks for dma valid sg data.
541 * @sg:		Scatterlist.
542 * @datasize:	Datasize in bytes.
543 *
544 * NOTE! This function checks for dma valid sg data, since dma
545 * only accept datasizes of even wordsize.
546 */
547static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
548{
549	bool aligned;
550
551	/* Need to include at least one nent, else error */
552	if (hash_get_nents(sg, datasize, &aligned) < 1)
553		return false;
554
555	return aligned;
556}
557
558/**
559 * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
560 * @req: The hash request for the job.
561 *
562 * Initialize structures.
563 */
564static int hash_init(struct ahash_request *req)
565{
566	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
567	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
568	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
569
570	if (!ctx->key)
571		ctx->keylen = 0;
572
573	memset(&req_ctx->state, 0, sizeof(struct hash_state));
574	req_ctx->updated = 0;
575	if (hash_mode == HASH_MODE_DMA) {
576		if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
577			req_ctx->dma_mode = false; /* Don't use DMA */
578
579			pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
580				 __func__, HASH_DMA_ALIGN_SIZE);
581		} else {
582			if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
583			    hash_dma_valid_data(req->src, req->nbytes)) {
584				req_ctx->dma_mode = true;
585			} else {
586				req_ctx->dma_mode = false;
587				pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
588					 __func__,
589					 HASH_DMA_PERFORMANCE_MIN_SIZE);
590			}
591		}
592	}
593	return 0;
594}
595
596/**
597 * hash_processblock - This function processes a single block of 512 bits (64
598 *                     bytes), word aligned, starting at message.
599 * @device_data:	Structure for the hash device.
600 * @message:		Block (512 bits) of message to be written to
601 *			the HASH hardware.
602 *
603 */
604static void hash_processblock(struct hash_device_data *device_data,
605			      const u32 *message, int length)
606{
607	int len = length / HASH_BYTES_PER_WORD;
608	/*
609	 * NBLW bits. Reset the number of bits in last word (NBLW).
610	 */
611	HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
612
613	/*
614	 * Write message data to the HASH_DIN register.
615	 */
616	HASH_SET_DIN(message, len);
617}
618
619/**
620 * hash_messagepad - Pads a message and write the nblw bits.
621 * @device_data:	Structure for the hash device.
622 * @message:		Last word of a message.
623 * @index_bytes:	The number of bytes in the last message.
624 *
625 * This function manages the final part of the digest calculation, when less
626 * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
627 *
628 */
629static void hash_messagepad(struct hash_device_data *device_data,
630			    const u32 *message, u8 index_bytes)
631{
632	int nwords = 1;
633
634	/*
635	 * Clear hash str register, only clear NBLW
636	 * since DCAL will be reset by hardware.
637	 */
638	HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
639
640	/* Main loop */
641	while (index_bytes >= 4) {
642		HASH_SET_DIN(message, nwords);
643		index_bytes -= 4;
644		message++;
645	}
646
647	if (index_bytes)
648		HASH_SET_DIN(message, nwords);
649
650	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
651		cpu_relax();
652
653	/* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
654	HASH_SET_NBLW(index_bytes * 8);
655	dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
656		__func__, readl_relaxed(&device_data->base->din),
657		readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
658	HASH_SET_DCAL;
659	dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
660		__func__, readl_relaxed(&device_data->base->din),
661		readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
662
663	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
664		cpu_relax();
665}
666
667/**
668 * hash_incrementlength - Increments the length of the current message.
669 * @ctx: Hash context
670 * @incr: Length of message processed already
671 *
672 * Overflow cannot occur, because conditions for overflow are checked in
673 * hash_hw_update.
674 */
675static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
676{
677	ctx->state.length.low_word += incr;
678
679	/* Check for wrap-around */
680	if (ctx->state.length.low_word < incr)
681		ctx->state.length.high_word++;
682}
683
684/**
685 * hash_setconfiguration - Sets the required configuration for the hash
686 *                         hardware.
687 * @device_data:	Structure for the hash device.
688 * @config:		Pointer to a configuration structure.
689 */
690int hash_setconfiguration(struct hash_device_data *device_data,
691			  struct hash_config *config)
692{
693	int ret = 0;
694
695	if (config->algorithm != HASH_ALGO_SHA1 &&
696	    config->algorithm != HASH_ALGO_SHA256)
697		return -EPERM;
698
699	/*
700	 * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
701	 * to be written to HASH_DIN is considered as 32 bits.
702	 */
703	HASH_SET_DATA_FORMAT(config->data_format);
704
705	/*
706	 * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
707	 */
708	switch (config->algorithm) {
709	case HASH_ALGO_SHA1:
710		HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
711		break;
712
713	case HASH_ALGO_SHA256:
714		HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
715		break;
716
717	default:
718		dev_err(device_data->dev, "%s: Incorrect algorithm\n",
719			__func__);
720		return -EPERM;
721	}
722
723	/*
724	 * MODE bit. This bit selects between HASH or HMAC mode for the
725	 * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
726	 */
727	if (HASH_OPER_MODE_HASH == config->oper_mode)
728		HASH_CLEAR_BITS(&device_data->base->cr,
729				HASH_CR_MODE_MASK);
730	else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
731		HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
732		if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
733			/* Truncate key to blocksize */
734			dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
735			HASH_SET_BITS(&device_data->base->cr,
736				      HASH_CR_LKEY_MASK);
737		} else {
738			dev_dbg(device_data->dev, "%s: LKEY cleared\n",
739				__func__);
740			HASH_CLEAR_BITS(&device_data->base->cr,
741					HASH_CR_LKEY_MASK);
742		}
743	} else {	/* Wrong hash mode */
744		ret = -EPERM;
745		dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
746			__func__);
747	}
748	return ret;
749}
750
751/**
752 * hash_begin - This routine resets some globals and initializes the hash
753 *              hardware.
754 * @device_data:	Structure for the hash device.
755 * @ctx:		Hash context.
756 */
757void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
758{
759	/* HW and SW initializations */
760	/* Note: there is no need to initialize buffer and digest members */
761
762	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
763		cpu_relax();
764
765	/*
766	 * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
767	 * prepare the initialize the HASH accelerator to compute the message
768	 * digest of a new message.
769	 */
770	HASH_INITIALIZE;
771
772	/*
773	 * NBLW bits. Reset the number of bits in last word (NBLW).
774	 */
775	HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
776}
777
778static int hash_process_data(struct hash_device_data *device_data,
779			     struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
780			     int msg_length, u8 *data_buffer, u8 *buffer,
781			     u8 *index)
782{
783	int ret = 0;
784	u32 count;
785
786	do {
787		if ((*index + msg_length) < HASH_BLOCK_SIZE) {
788			for (count = 0; count < msg_length; count++) {
789				buffer[*index + count] =
790					*(data_buffer + count);
791			}
792			*index += msg_length;
793			msg_length = 0;
794		} else {
795			if (req_ctx->updated) {
796				ret = hash_resume_state(device_data,
797						&device_data->state);
798				memmove(req_ctx->state.buffer,
799					device_data->state.buffer,
800					HASH_BLOCK_SIZE / sizeof(u32));
801				if (ret) {
802					dev_err(device_data->dev,
803						"%s: hash_resume_state() failed!\n",
804						__func__);
805					goto out;
806				}
807			} else {
808				ret = init_hash_hw(device_data, ctx);
809				if (ret) {
810					dev_err(device_data->dev,
811						"%s: init_hash_hw() failed!\n",
812						__func__);
813					goto out;
814				}
815				req_ctx->updated = 1;
816			}
817			/*
818			 * If 'data_buffer' is four byte aligned and
819			 * local buffer does not have any data, we can
820			 * write data directly from 'data_buffer' to
821			 * HW peripheral, otherwise we first copy data
822			 * to a local buffer
823			 */
824			if ((0 == (((u32)data_buffer) % 4)) &&
825			    (0 == *index))
826				hash_processblock(device_data,
827						  (const u32 *)data_buffer,
828						  HASH_BLOCK_SIZE);
829			else {
830				for (count = 0;
831				     count < (u32)(HASH_BLOCK_SIZE - *index);
832				     count++) {
833					buffer[*index + count] =
834						*(data_buffer + count);
835				}
836				hash_processblock(device_data,
837						  (const u32 *)buffer,
838						  HASH_BLOCK_SIZE);
839			}
840			hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
841			data_buffer += (HASH_BLOCK_SIZE - *index);
842
843			msg_length -= (HASH_BLOCK_SIZE - *index);
844			*index = 0;
845
846			ret = hash_save_state(device_data,
847					&device_data->state);
848
849			memmove(device_data->state.buffer,
850				req_ctx->state.buffer,
851				HASH_BLOCK_SIZE / sizeof(u32));
852			if (ret) {
853				dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
854					__func__);
855				goto out;
856			}
857		}
858	} while (msg_length != 0);
859out:
860
861	return ret;
862}
863
864/**
865 * hash_dma_final - The hash dma final function for SHA1/SHA256.
866 * @req:	The hash request for the job.
867 */
868static int hash_dma_final(struct ahash_request *req)
869{
870	int ret = 0;
871	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
872	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
873	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
874	struct hash_device_data *device_data;
875	u8 digest[SHA256_DIGEST_SIZE];
876	int bytes_written = 0;
877
878	ret = hash_get_device_data(ctx, &device_data);
879	if (ret)
880		return ret;
881
882	dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
883
884	if (req_ctx->updated) {
885		ret = hash_resume_state(device_data, &device_data->state);
886
887		if (ret) {
888			dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
889				__func__);
890			goto out;
891		}
892	}
893
894	if (!req_ctx->updated) {
895		ret = hash_setconfiguration(device_data, &ctx->config);
896		if (ret) {
897			dev_err(device_data->dev,
898				"%s: hash_setconfiguration() failed!\n",
899				__func__);
900			goto out;
901		}
902
903		/* Enable DMA input */
904		if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
905			HASH_CLEAR_BITS(&device_data->base->cr,
906					HASH_CR_DMAE_MASK);
907		} else {
908			HASH_SET_BITS(&device_data->base->cr,
909				      HASH_CR_DMAE_MASK);
910			HASH_SET_BITS(&device_data->base->cr,
911				      HASH_CR_PRIVN_MASK);
912		}
913
914		HASH_INITIALIZE;
915
916		if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
917			hash_hw_write_key(device_data, ctx->key, ctx->keylen);
918
919		/* Number of bits in last word = (nbytes * 8) % 32 */
920		HASH_SET_NBLW((req->nbytes * 8) % 32);
921		req_ctx->updated = 1;
922	}
923
924	/* Store the nents in the dma struct. */
925	ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
926	if (!ctx->device->dma.nents) {
927		dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
928			__func__);
929		ret = ctx->device->dma.nents;
930		goto out;
931	}
932
933	bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
934	if (bytes_written != req->nbytes) {
935		dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
936			__func__);
937		ret = bytes_written;
938		goto out;
939	}
940
941	wait_for_completion(&ctx->device->dma.complete);
942	hash_dma_done(ctx);
943
944	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
945		cpu_relax();
946
947	if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
948		unsigned int keylen = ctx->keylen;
949		u8 *key = ctx->key;
950
951		dev_dbg(device_data->dev, "%s: keylen: %d\n",
952			__func__, ctx->keylen);
953		hash_hw_write_key(device_data, key, keylen);
954	}
955
956	hash_get_digest(device_data, digest, ctx->config.algorithm);
957	memcpy(req->result, digest, ctx->digestsize);
958
959out:
960	release_hash_device(device_data);
961
962	/**
963	 * Allocated in setkey, and only used in HMAC.
964	 */
965	kfree(ctx->key);
966
967	return ret;
968}
969
970/**
971 * hash_hw_final - The final hash calculation function
972 * @req:	The hash request for the job.
973 */
974static int hash_hw_final(struct ahash_request *req)
975{
976	int ret = 0;
977	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
978	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
979	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
980	struct hash_device_data *device_data;
981	u8 digest[SHA256_DIGEST_SIZE];
982
983	ret = hash_get_device_data(ctx, &device_data);
984	if (ret)
985		return ret;
986
987	dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
988
989	if (req_ctx->updated) {
990		ret = hash_resume_state(device_data, &device_data->state);
991
992		if (ret) {
993			dev_err(device_data->dev,
994				"%s: hash_resume_state() failed!\n", __func__);
995			goto out;
996		}
997	} else if (req->nbytes == 0 && ctx->keylen == 0) {
998		u8 zero_hash[SHA256_DIGEST_SIZE];
999		u32 zero_hash_size = 0;
1000		bool zero_digest = false;
1001		/**
1002		 * Use a pre-calculated empty message digest
1003		 * (workaround since hw return zeroes, hw bug!?)
1004		 */
1005		ret = get_empty_message_digest(device_data, &zero_hash[0],
1006				&zero_hash_size, &zero_digest);
1007		if (!ret && likely(zero_hash_size == ctx->digestsize) &&
1008		    zero_digest) {
1009			memcpy(req->result, &zero_hash[0], ctx->digestsize);
1010			goto out;
1011		} else if (!ret && !zero_digest) {
1012			dev_dbg(device_data->dev,
1013				"%s: HMAC zero msg with key, continue...\n",
1014				__func__);
1015		} else {
1016			dev_err(device_data->dev,
1017				"%s: ret=%d, or wrong digest size? %s\n",
1018				__func__, ret,
1019				zero_hash_size == ctx->digestsize ?
1020				"true" : "false");
1021			/* Return error */
1022			goto out;
1023		}
1024	} else if (req->nbytes == 0 && ctx->keylen > 0) {
1025		dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
1026			__func__);
1027		goto out;
1028	}
1029
1030	if (!req_ctx->updated) {
1031		ret = init_hash_hw(device_data, ctx);
1032		if (ret) {
1033			dev_err(device_data->dev,
1034				"%s: init_hash_hw() failed!\n", __func__);
1035			goto out;
1036		}
1037	}
1038
1039	if (req_ctx->state.index) {
1040		hash_messagepad(device_data, req_ctx->state.buffer,
1041				req_ctx->state.index);
1042	} else {
1043		HASH_SET_DCAL;
1044		while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1045			cpu_relax();
1046	}
1047
1048	if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
1049		unsigned int keylen = ctx->keylen;
1050		u8 *key = ctx->key;
1051
1052		dev_dbg(device_data->dev, "%s: keylen: %d\n",
1053			__func__, ctx->keylen);
1054		hash_hw_write_key(device_data, key, keylen);
1055	}
1056
1057	hash_get_digest(device_data, digest, ctx->config.algorithm);
1058	memcpy(req->result, digest, ctx->digestsize);
1059
1060out:
1061	release_hash_device(device_data);
1062
1063	/**
1064	 * Allocated in setkey, and only used in HMAC.
1065	 */
1066	kfree(ctx->key);
1067
1068	return ret;
1069}
1070
1071/**
1072 * hash_hw_update - Updates current HASH computation hashing another part of
1073 *                  the message.
1074 * @req:	Byte array containing the message to be hashed (caller
1075 *		allocated).
1076 */
1077int hash_hw_update(struct ahash_request *req)
1078{
1079	int ret = 0;
1080	u8 index = 0;
1081	u8 *buffer;
1082	struct hash_device_data *device_data;
1083	u8 *data_buffer;
1084	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1085	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1086	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1087	struct crypto_hash_walk walk;
1088	int msg_length = crypto_hash_walk_first(req, &walk);
1089
1090	/* Empty message ("") is correct indata */
1091	if (msg_length == 0)
1092		return ret;
1093
1094	index = req_ctx->state.index;
1095	buffer = (u8 *)req_ctx->state.buffer;
1096
1097	/* Check if ctx->state.length + msg_length
1098	   overflows */
1099	if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
1100	    HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
1101		pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
1102		return -EPERM;
1103	}
1104
1105	ret = hash_get_device_data(ctx, &device_data);
1106	if (ret)
1107		return ret;
1108
1109	/* Main loop */
1110	while (0 != msg_length) {
1111		data_buffer = walk.data;
1112		ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
1113				data_buffer, buffer, &index);
1114
1115		if (ret) {
1116			dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
1117				__func__);
1118			goto out;
1119		}
1120
1121		msg_length = crypto_hash_walk_done(&walk, 0);
1122	}
1123
1124	req_ctx->state.index = index;
1125	dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
1126		__func__, req_ctx->state.index, req_ctx->state.bit_index);
1127
1128out:
1129	release_hash_device(device_data);
1130
1131	return ret;
1132}
1133
1134/**
1135 * hash_resume_state - Function that resumes the state of an calculation.
1136 * @device_data:	Pointer to the device structure.
1137 * @device_state:	The state to be restored in the hash hardware
1138 */
1139int hash_resume_state(struct hash_device_data *device_data,
1140		      const struct hash_state *device_state)
1141{
1142	u32 temp_cr;
1143	s32 count;
1144	int hash_mode = HASH_OPER_MODE_HASH;
1145
1146	if (NULL == device_state) {
1147		dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1148			__func__);
1149		return -EPERM;
1150	}
1151
1152	/* Check correctness of index and length members */
1153	if (device_state->index > HASH_BLOCK_SIZE ||
1154	    (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
1155		dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1156			__func__);
1157		return -EPERM;
1158	}
1159
1160	/*
1161	 * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
1162	 * prepare the initialize the HASH accelerator to compute the message
1163	 * digest of a new message.
1164	 */
1165	HASH_INITIALIZE;
1166
1167	temp_cr = device_state->temp_cr;
1168	writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
1169
1170	if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1171		hash_mode = HASH_OPER_MODE_HMAC;
1172	else
1173		hash_mode = HASH_OPER_MODE_HASH;
1174
1175	for (count = 0; count < HASH_CSR_COUNT; count++) {
1176		if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1177			break;
1178
1179		writel_relaxed(device_state->csr[count],
1180			       &device_data->base->csrx[count]);
1181	}
1182
1183	writel_relaxed(device_state->csfull, &device_data->base->csfull);
1184	writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
1185
1186	writel_relaxed(device_state->str_reg, &device_data->base->str);
1187	writel_relaxed(temp_cr, &device_data->base->cr);
1188
1189	return 0;
1190}
1191
1192/**
1193 * hash_save_state - Function that saves the state of hardware.
1194 * @device_data:	Pointer to the device structure.
1195 * @device_state:	The strucure where the hardware state should be saved.
1196 */
1197int hash_save_state(struct hash_device_data *device_data,
1198		    struct hash_state *device_state)
1199{
1200	u32 temp_cr;
1201	u32 count;
1202	int hash_mode = HASH_OPER_MODE_HASH;
1203
1204	if (NULL == device_state) {
1205		dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1206			__func__);
1207		return -ENOTSUPP;
1208	}
1209
1210	/* Write dummy value to force digest intermediate calculation. This
1211	 * actually makes sure that there isn't any ongoing calculation in the
1212	 * hardware.
1213	 */
1214	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1215		cpu_relax();
1216
1217	temp_cr = readl_relaxed(&device_data->base->cr);
1218
1219	device_state->str_reg = readl_relaxed(&device_data->base->str);
1220
1221	device_state->din_reg = readl_relaxed(&device_data->base->din);
1222
1223	if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1224		hash_mode = HASH_OPER_MODE_HMAC;
1225	else
1226		hash_mode = HASH_OPER_MODE_HASH;
1227
1228	for (count = 0; count < HASH_CSR_COUNT; count++) {
1229		if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1230			break;
1231
1232		device_state->csr[count] =
1233			readl_relaxed(&device_data->base->csrx[count]);
1234	}
1235
1236	device_state->csfull = readl_relaxed(&device_data->base->csfull);
1237	device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
1238
1239	device_state->temp_cr = temp_cr;
1240
1241	return 0;
1242}
1243
1244/**
1245 * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
1246 * @device_data:
1247 *
1248 */
1249int hash_check_hw(struct hash_device_data *device_data)
1250{
1251	/* Checking Peripheral Ids  */
1252	if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
1253	    HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
1254	    HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
1255	    HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
1256	    HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
1257	    HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
1258	    HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
1259	    HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
1260		return 0;
1261	}
1262
1263	dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
1264	return -ENOTSUPP;
1265}
1266
1267/**
1268 * hash_get_digest - Gets the digest.
1269 * @device_data:	Pointer to the device structure.
1270 * @digest:		User allocated byte array for the calculated digest.
1271 * @algorithm:		The algorithm in use.
1272 */
1273void hash_get_digest(struct hash_device_data *device_data,
1274		     u8 *digest, int algorithm)
1275{
1276	u32 temp_hx_val, count;
1277	int loop_ctr;
1278
1279	if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
1280		dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
1281			__func__, algorithm);
1282		return;
1283	}
1284
1285	if (algorithm == HASH_ALGO_SHA1)
1286		loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
1287	else
1288		loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
1289
1290	dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n",
1291		__func__, (u32) digest);
1292
1293	/* Copy result into digest array */
1294	for (count = 0; count < loop_ctr; count++) {
1295		temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
1296		digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
1297		digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
1298		digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
1299		digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
1300	}
1301}
1302
1303/**
1304 * hash_update - The hash update function for SHA1/SHA2 (SHA256).
1305 * @req: The hash request for the job.
1306 */
1307static int ahash_update(struct ahash_request *req)
1308{
1309	int ret = 0;
1310	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1311
1312	if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
1313		ret = hash_hw_update(req);
1314	/* Skip update for DMA, all data will be passed to DMA in final */
1315
1316	if (ret) {
1317		pr_err("%s: hash_hw_update() failed!\n", __func__);
1318	}
1319
1320	return ret;
1321}
1322
1323/**
1324 * hash_final - The hash final function for SHA1/SHA2 (SHA256).
1325 * @req:	The hash request for the job.
1326 */
1327static int ahash_final(struct ahash_request *req)
1328{
1329	int ret = 0;
1330	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1331
1332	pr_debug("%s: data size: %d\n", __func__, req->nbytes);
1333
1334	if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
1335		ret = hash_dma_final(req);
1336	else
1337		ret = hash_hw_final(req);
1338
1339	if (ret) {
1340		pr_err("%s: hash_hw/dma_final() failed\n", __func__);
1341	}
1342
1343	return ret;
1344}
1345
1346static int hash_setkey(struct crypto_ahash *tfm,
1347		       const u8 *key, unsigned int keylen, int alg)
1348{
1349	int ret = 0;
1350	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1351
1352	/**
1353	 * Freed in final.
1354	 */
1355	ctx->key = kmemdup(key, keylen, GFP_KERNEL);
1356	if (!ctx->key) {
1357		pr_err("%s: Failed to allocate ctx->key for %d\n",
1358		       __func__, alg);
1359		return -ENOMEM;
1360	}
1361	ctx->keylen = keylen;
1362
1363	return ret;
1364}
1365
1366static int ahash_sha1_init(struct ahash_request *req)
1367{
1368	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1369	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1370
1371	ctx->config.data_format = HASH_DATA_8_BITS;
1372	ctx->config.algorithm = HASH_ALGO_SHA1;
1373	ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1374	ctx->digestsize = SHA1_DIGEST_SIZE;
1375
1376	return hash_init(req);
1377}
1378
1379static int ahash_sha256_init(struct ahash_request *req)
1380{
1381	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1382	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1383
1384	ctx->config.data_format = HASH_DATA_8_BITS;
1385	ctx->config.algorithm = HASH_ALGO_SHA256;
1386	ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1387	ctx->digestsize = SHA256_DIGEST_SIZE;
1388
1389	return hash_init(req);
1390}
1391
1392static int ahash_sha1_digest(struct ahash_request *req)
1393{
1394	int ret2, ret1;
1395
1396	ret1 = ahash_sha1_init(req);
1397	if (ret1)
1398		goto out;
1399
1400	ret1 = ahash_update(req);
1401	ret2 = ahash_final(req);
1402
1403out:
1404	return ret1 ? ret1 : ret2;
1405}
1406
1407static int ahash_sha256_digest(struct ahash_request *req)
1408{
1409	int ret2, ret1;
1410
1411	ret1 = ahash_sha256_init(req);
1412	if (ret1)
1413		goto out;
1414
1415	ret1 = ahash_update(req);
1416	ret2 = ahash_final(req);
1417
1418out:
1419	return ret1 ? ret1 : ret2;
1420}
1421
1422static int hmac_sha1_init(struct ahash_request *req)
1423{
1424	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1425	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1426
1427	ctx->config.data_format	= HASH_DATA_8_BITS;
1428	ctx->config.algorithm	= HASH_ALGO_SHA1;
1429	ctx->config.oper_mode	= HASH_OPER_MODE_HMAC;
1430	ctx->digestsize		= SHA1_DIGEST_SIZE;
1431
1432	return hash_init(req);
1433}
1434
1435static int hmac_sha256_init(struct ahash_request *req)
1436{
1437	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1438	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1439
1440	ctx->config.data_format	= HASH_DATA_8_BITS;
1441	ctx->config.algorithm	= HASH_ALGO_SHA256;
1442	ctx->config.oper_mode	= HASH_OPER_MODE_HMAC;
1443	ctx->digestsize		= SHA256_DIGEST_SIZE;
1444
1445	return hash_init(req);
1446}
1447
1448static int hmac_sha1_digest(struct ahash_request *req)
1449{
1450	int ret2, ret1;
1451
1452	ret1 = hmac_sha1_init(req);
1453	if (ret1)
1454		goto out;
1455
1456	ret1 = ahash_update(req);
1457	ret2 = ahash_final(req);
1458
1459out:
1460	return ret1 ? ret1 : ret2;
1461}
1462
1463static int hmac_sha256_digest(struct ahash_request *req)
1464{
1465	int ret2, ret1;
1466
1467	ret1 = hmac_sha256_init(req);
1468	if (ret1)
1469		goto out;
1470
1471	ret1 = ahash_update(req);
1472	ret2 = ahash_final(req);
1473
1474out:
1475	return ret1 ? ret1 : ret2;
1476}
1477
1478static int hmac_sha1_setkey(struct crypto_ahash *tfm,
1479			    const u8 *key, unsigned int keylen)
1480{
1481	return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
1482}
1483
1484static int hmac_sha256_setkey(struct crypto_ahash *tfm,
1485			      const u8 *key, unsigned int keylen)
1486{
1487	return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
1488}
1489
1490struct hash_algo_template {
1491	struct hash_config conf;
1492	struct ahash_alg hash;
1493};
1494
1495static int hash_cra_init(struct crypto_tfm *tfm)
1496{
1497	struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
1498	struct crypto_alg *alg = tfm->__crt_alg;
1499	struct hash_algo_template *hash_alg;
1500
1501	hash_alg = container_of(__crypto_ahash_alg(alg),
1502			struct hash_algo_template,
1503			hash);
1504
1505	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1506				 sizeof(struct hash_req_ctx));
1507
1508	ctx->config.data_format = HASH_DATA_8_BITS;
1509	ctx->config.algorithm = hash_alg->conf.algorithm;
1510	ctx->config.oper_mode = hash_alg->conf.oper_mode;
1511
1512	ctx->digestsize = hash_alg->hash.halg.digestsize;
1513
1514	return 0;
1515}
1516
1517static struct hash_algo_template hash_algs[] = {
1518	{
1519		.conf.algorithm = HASH_ALGO_SHA1,
1520		.conf.oper_mode = HASH_OPER_MODE_HASH,
1521		.hash = {
1522			.init = hash_init,
1523			.update = ahash_update,
1524			.final = ahash_final,
1525			.digest = ahash_sha1_digest,
1526			.halg.digestsize = SHA1_DIGEST_SIZE,
1527			.halg.statesize = sizeof(struct hash_ctx),
1528			.halg.base = {
1529				.cra_name = "sha1",
1530				.cra_driver_name = "sha1-ux500",
1531				.cra_flags = (CRYPTO_ALG_TYPE_AHASH |
1532					      CRYPTO_ALG_ASYNC),
1533				.cra_blocksize = SHA1_BLOCK_SIZE,
1534				.cra_ctxsize = sizeof(struct hash_ctx),
1535				.cra_init = hash_cra_init,
1536				.cra_module = THIS_MODULE,
1537			}
1538		}
1539	},
1540	{
1541		.conf.algorithm	= HASH_ALGO_SHA256,
1542		.conf.oper_mode	= HASH_OPER_MODE_HASH,
1543		.hash = {
1544			.init = hash_init,
1545			.update	= ahash_update,
1546			.final = ahash_final,
1547			.digest = ahash_sha256_digest,
1548			.halg.digestsize = SHA256_DIGEST_SIZE,
1549			.halg.statesize = sizeof(struct hash_ctx),
1550			.halg.base = {
1551				.cra_name = "sha256",
1552				.cra_driver_name = "sha256-ux500",
1553				.cra_flags = (CRYPTO_ALG_TYPE_AHASH |
1554					      CRYPTO_ALG_ASYNC),
1555				.cra_blocksize = SHA256_BLOCK_SIZE,
1556				.cra_ctxsize = sizeof(struct hash_ctx),
1557				.cra_type = &crypto_ahash_type,
1558				.cra_init = hash_cra_init,
1559				.cra_module = THIS_MODULE,
1560			}
1561		}
1562	},
1563	{
1564		.conf.algorithm = HASH_ALGO_SHA1,
1565		.conf.oper_mode = HASH_OPER_MODE_HMAC,
1566			.hash = {
1567			.init = hash_init,
1568			.update = ahash_update,
1569			.final = ahash_final,
1570			.digest = hmac_sha1_digest,
1571			.setkey = hmac_sha1_setkey,
1572			.halg.digestsize = SHA1_DIGEST_SIZE,
1573			.halg.statesize = sizeof(struct hash_ctx),
1574			.halg.base = {
1575				.cra_name = "hmac(sha1)",
1576				.cra_driver_name = "hmac-sha1-ux500",
1577				.cra_flags = (CRYPTO_ALG_TYPE_AHASH |
1578					      CRYPTO_ALG_ASYNC),
1579				.cra_blocksize = SHA1_BLOCK_SIZE,
1580				.cra_ctxsize = sizeof(struct hash_ctx),
1581				.cra_type = &crypto_ahash_type,
1582				.cra_init = hash_cra_init,
1583				.cra_module = THIS_MODULE,
1584			}
1585		}
1586	},
1587	{
1588		.conf.algorithm = HASH_ALGO_SHA256,
1589		.conf.oper_mode = HASH_OPER_MODE_HMAC,
1590		.hash = {
1591			.init = hash_init,
1592			.update = ahash_update,
1593			.final = ahash_final,
1594			.digest = hmac_sha256_digest,
1595			.setkey = hmac_sha256_setkey,
1596			.halg.digestsize = SHA256_DIGEST_SIZE,
1597			.halg.statesize = sizeof(struct hash_ctx),
1598			.halg.base = {
1599				.cra_name = "hmac(sha256)",
1600				.cra_driver_name = "hmac-sha256-ux500",
1601				.cra_flags = (CRYPTO_ALG_TYPE_AHASH |
1602					      CRYPTO_ALG_ASYNC),
1603				.cra_blocksize = SHA256_BLOCK_SIZE,
1604				.cra_ctxsize = sizeof(struct hash_ctx),
1605				.cra_type = &crypto_ahash_type,
1606				.cra_init = hash_cra_init,
1607				.cra_module = THIS_MODULE,
1608			}
1609		}
1610	}
1611};
1612
1613/**
1614 * hash_algs_register_all -
1615 */
1616static int ahash_algs_register_all(struct hash_device_data *device_data)
1617{
1618	int ret;
1619	int i;
1620	int count;
1621
1622	for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
1623		ret = crypto_register_ahash(&hash_algs[i].hash);
1624		if (ret) {
1625			count = i;
1626			dev_err(device_data->dev, "%s: alg registration failed\n",
1627				hash_algs[i].hash.halg.base.cra_driver_name);
1628			goto unreg;
1629		}
1630	}
1631	return 0;
1632unreg:
1633	for (i = 0; i < count; i++)
1634		crypto_unregister_ahash(&hash_algs[i].hash);
1635	return ret;
1636}
1637
1638/**
1639 * hash_algs_unregister_all -
1640 */
1641static void ahash_algs_unregister_all(struct hash_device_data *device_data)
1642{
1643	int i;
1644
1645	for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
1646		crypto_unregister_ahash(&hash_algs[i].hash);
1647}
1648
1649/**
1650 * ux500_hash_probe - Function that probes the hash hardware.
1651 * @pdev: The platform device.
1652 */
1653static int ux500_hash_probe(struct platform_device *pdev)
1654{
1655	int			ret = 0;
1656	struct resource		*res = NULL;
1657	struct hash_device_data *device_data;
1658	struct device		*dev = &pdev->dev;
1659
1660	device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
1661	if (!device_data) {
1662		ret = -ENOMEM;
1663		goto out;
1664	}
1665
1666	device_data->dev = dev;
1667	device_data->current_ctx = NULL;
1668
1669	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1670	if (!res) {
1671		dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
1672		ret = -ENODEV;
1673		goto out;
1674	}
1675
1676	device_data->phybase = res->start;
1677	device_data->base = devm_ioremap_resource(dev, res);
1678	if (IS_ERR(device_data->base)) {
1679		dev_err(dev, "%s: ioremap() failed!\n", __func__);
1680		ret = PTR_ERR(device_data->base);
1681		goto out;
1682	}
1683	spin_lock_init(&device_data->ctx_lock);
1684	spin_lock_init(&device_data->power_state_lock);
1685
1686	/* Enable power for HASH1 hardware block */
1687	device_data->regulator = regulator_get(dev, "v-ape");
1688	if (IS_ERR(device_data->regulator)) {
1689		dev_err(dev, "%s: regulator_get() failed!\n", __func__);
1690		ret = PTR_ERR(device_data->regulator);
1691		device_data->regulator = NULL;
1692		goto out;
1693	}
1694
1695	/* Enable the clock for HASH1 hardware block */
1696	device_data->clk = devm_clk_get(dev, NULL);
1697	if (IS_ERR(device_data->clk)) {
1698		dev_err(dev, "%s: clk_get() failed!\n", __func__);
1699		ret = PTR_ERR(device_data->clk);
1700		goto out_regulator;
1701	}
1702
1703	ret = clk_prepare(device_data->clk);
1704	if (ret) {
1705		dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
1706		goto out_regulator;
1707	}
1708
1709	/* Enable device power (and clock) */
1710	ret = hash_enable_power(device_data, false);
1711	if (ret) {
1712		dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1713		goto out_clk_unprepare;
1714	}
1715
1716	ret = hash_check_hw(device_data);
1717	if (ret) {
1718		dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
1719		goto out_power;
1720	}
1721
1722	if (hash_mode == HASH_MODE_DMA)
1723		hash_dma_setup_channel(device_data, dev);
1724
1725	platform_set_drvdata(pdev, device_data);
1726
1727	/* Put the new device into the device list... */
1728	klist_add_tail(&device_data->list_node, &driver_data.device_list);
1729	/* ... and signal that a new device is available. */
1730	up(&driver_data.device_allocation);
1731
1732	ret = ahash_algs_register_all(device_data);
1733	if (ret) {
1734		dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
1735			__func__);
1736		goto out_power;
1737	}
1738
1739	dev_info(dev, "successfully registered\n");
1740	return 0;
1741
1742out_power:
1743	hash_disable_power(device_data, false);
1744
1745out_clk_unprepare:
1746	clk_unprepare(device_data->clk);
1747
1748out_regulator:
1749	regulator_put(device_data->regulator);
1750
1751out:
1752	return ret;
1753}
1754
1755/**
1756 * ux500_hash_remove - Function that removes the hash device from the platform.
1757 * @pdev: The platform device.
1758 */
1759static int ux500_hash_remove(struct platform_device *pdev)
1760{
1761	struct hash_device_data *device_data;
1762	struct device		*dev = &pdev->dev;
1763
1764	device_data = platform_get_drvdata(pdev);
1765	if (!device_data) {
1766		dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1767		return -ENOMEM;
1768	}
1769
1770	/* Try to decrease the number of available devices. */
1771	if (down_trylock(&driver_data.device_allocation))
1772		return -EBUSY;
1773
1774	/* Check that the device is free */
1775	spin_lock(&device_data->ctx_lock);
1776	/* current_ctx allocates a device, NULL = unallocated */
1777	if (device_data->current_ctx) {
1778		/* The device is busy */
1779		spin_unlock(&device_data->ctx_lock);
1780		/* Return the device to the pool. */
1781		up(&driver_data.device_allocation);
1782		return -EBUSY;
1783	}
1784
1785	spin_unlock(&device_data->ctx_lock);
1786
1787	/* Remove the device from the list */
1788	if (klist_node_attached(&device_data->list_node))
1789		klist_remove(&device_data->list_node);
1790
1791	/* If this was the last device, remove the services */
1792	if (list_empty(&driver_data.device_list.k_list))
1793		ahash_algs_unregister_all(device_data);
1794
1795	if (hash_disable_power(device_data, false))
1796		dev_err(dev, "%s: hash_disable_power() failed\n",
1797			__func__);
1798
1799	clk_unprepare(device_data->clk);
1800	regulator_put(device_data->regulator);
1801
1802	return 0;
1803}
1804
1805/**
1806 * ux500_hash_shutdown - Function that shutdown the hash device.
1807 * @pdev: The platform device
1808 */
1809static void ux500_hash_shutdown(struct platform_device *pdev)
1810{
1811	struct hash_device_data *device_data;
1812
1813	device_data = platform_get_drvdata(pdev);
1814	if (!device_data) {
1815		dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
1816			__func__);
1817		return;
1818	}
1819
1820	/* Check that the device is free */
1821	spin_lock(&device_data->ctx_lock);
1822	/* current_ctx allocates a device, NULL = unallocated */
1823	if (!device_data->current_ctx) {
1824		if (down_trylock(&driver_data.device_allocation))
1825			dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
1826				__func__);
1827		/**
1828		 * (Allocate the device)
1829		 * Need to set this to non-null (dummy) value,
1830		 * to avoid usage if context switching.
1831		 */
1832		device_data->current_ctx++;
1833	}
1834	spin_unlock(&device_data->ctx_lock);
1835
1836	/* Remove the device from the list */
1837	if (klist_node_attached(&device_data->list_node))
1838		klist_remove(&device_data->list_node);
1839
1840	/* If this was the last device, remove the services */
1841	if (list_empty(&driver_data.device_list.k_list))
1842		ahash_algs_unregister_all(device_data);
1843
1844	if (hash_disable_power(device_data, false))
1845		dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
1846			__func__);
1847}
1848
1849#ifdef CONFIG_PM_SLEEP
1850/**
1851 * ux500_hash_suspend - Function that suspends the hash device.
1852 * @dev:	Device to suspend.
1853 */
1854static int ux500_hash_suspend(struct device *dev)
1855{
1856	int ret;
1857	struct hash_device_data *device_data;
1858	struct hash_ctx *temp_ctx = NULL;
1859
1860	device_data = dev_get_drvdata(dev);
1861	if (!device_data) {
1862		dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1863		return -ENOMEM;
1864	}
1865
1866	spin_lock(&device_data->ctx_lock);
1867	if (!device_data->current_ctx)
1868		device_data->current_ctx++;
1869	spin_unlock(&device_data->ctx_lock);
1870
1871	if (device_data->current_ctx == ++temp_ctx) {
1872		if (down_interruptible(&driver_data.device_allocation))
1873			dev_dbg(dev, "%s: down_interruptible() failed\n",
1874				__func__);
1875		ret = hash_disable_power(device_data, false);
1876
1877	} else {
1878		ret = hash_disable_power(device_data, true);
1879	}
1880
1881	if (ret)
1882		dev_err(dev, "%s: hash_disable_power()\n", __func__);
1883
1884	return ret;
1885}
1886
1887/**
1888 * ux500_hash_resume - Function that resume the hash device.
1889 * @dev:	Device to resume.
1890 */
1891static int ux500_hash_resume(struct device *dev)
1892{
1893	int ret = 0;
1894	struct hash_device_data *device_data;
1895	struct hash_ctx *temp_ctx = NULL;
1896
1897	device_data = dev_get_drvdata(dev);
1898	if (!device_data) {
1899		dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1900		return -ENOMEM;
1901	}
1902
1903	spin_lock(&device_data->ctx_lock);
1904	if (device_data->current_ctx == ++temp_ctx)
1905		device_data->current_ctx = NULL;
1906	spin_unlock(&device_data->ctx_lock);
1907
1908	if (!device_data->current_ctx)
1909		up(&driver_data.device_allocation);
1910	else
1911		ret = hash_enable_power(device_data, true);
1912
1913	if (ret)
1914		dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1915
1916	return ret;
1917}
1918#endif
1919
1920static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
1921
1922static const struct of_device_id ux500_hash_match[] = {
1923	{ .compatible = "stericsson,ux500-hash" },
1924	{ },
1925};
1926MODULE_DEVICE_TABLE(of, ux500_hash_match);
1927
1928static struct platform_driver hash_driver = {
1929	.probe  = ux500_hash_probe,
1930	.remove = ux500_hash_remove,
1931	.shutdown = ux500_hash_shutdown,
1932	.driver = {
1933		.name  = "hash1",
1934		.of_match_table = ux500_hash_match,
1935		.pm    = &ux500_hash_pm,
1936	}
1937};
1938
1939/**
1940 * ux500_hash_mod_init - The kernel module init function.
1941 */
1942static int __init ux500_hash_mod_init(void)
1943{
1944	klist_init(&driver_data.device_list, NULL, NULL);
1945	/* Initialize the semaphore to 0 devices (locked state) */
1946	sema_init(&driver_data.device_allocation, 0);
1947
1948	return platform_driver_register(&hash_driver);
1949}
1950
1951/**
1952 * ux500_hash_mod_fini - The kernel module exit function.
1953 */
1954static void __exit ux500_hash_mod_fini(void)
1955{
1956	platform_driver_unregister(&hash_driver);
1957}
1958
1959module_init(ux500_hash_mod_init);
1960module_exit(ux500_hash_mod_fini);
1961
1962MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
1963MODULE_LICENSE("GPL");
1964
1965MODULE_ALIAS_CRYPTO("sha1-all");
1966MODULE_ALIAS_CRYPTO("sha256-all");
1967MODULE_ALIAS_CRYPTO("hmac-sha1-all");
1968MODULE_ALIAS_CRYPTO("hmac-sha256-all");
1969