This source file includes following definitions.
- qce_ablkcipher_done
- qce_ablkcipher_async_req_handle
- qce_ablkcipher_setkey
- qce_des_setkey
- qce_des3_setkey
- qce_ablkcipher_crypt
- qce_ablkcipher_encrypt
- qce_ablkcipher_decrypt
- qce_ablkcipher_init
- qce_ablkcipher_exit
- qce_ablkcipher_register_one
- qce_ablkcipher_unregister
- qce_ablkcipher_register
1
2
3
4
5
6 #include <linux/device.h>
7 #include <linux/interrupt.h>
8 #include <linux/types.h>
9 #include <crypto/aes.h>
10 #include <crypto/internal/des.h>
11 #include <crypto/internal/skcipher.h>
12
13 #include "cipher.h"
14
15 static LIST_HEAD(ablkcipher_algs);
16
17 static void qce_ablkcipher_done(void *data)
18 {
19 struct crypto_async_request *async_req = data;
20 struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
21 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
22 struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
23 struct qce_device *qce = tmpl->qce;
24 enum dma_data_direction dir_src, dir_dst;
25 u32 status;
26 int error;
27 bool diff_dst;
28
29 diff_dst = (req->src != req->dst) ? true : false;
30 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
31 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
32
33 error = qce_dma_terminate_all(&qce->dma);
34 if (error)
35 dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
36 error);
37
38 if (diff_dst)
39 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
40 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
41
42 sg_free_table(&rctx->dst_tbl);
43
44 error = qce_check_status(qce, &status);
45 if (error < 0)
46 dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
47
48 qce->async_req_done(tmpl->qce, error);
49 }
50
51 static int
52 qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
53 {
54 struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
55 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
56 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
57 struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
58 struct qce_device *qce = tmpl->qce;
59 enum dma_data_direction dir_src, dir_dst;
60 struct scatterlist *sg;
61 bool diff_dst;
62 gfp_t gfp;
63 int ret;
64
65 rctx->iv = req->info;
66 rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
67 rctx->cryptlen = req->nbytes;
68
69 diff_dst = (req->src != req->dst) ? true : false;
70 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
71 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
72
73 rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
74 if (diff_dst)
75 rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
76 else
77 rctx->dst_nents = rctx->src_nents;
78 if (rctx->src_nents < 0) {
79 dev_err(qce->dev, "Invalid numbers of src SG.\n");
80 return rctx->src_nents;
81 }
82 if (rctx->dst_nents < 0) {
83 dev_err(qce->dev, "Invalid numbers of dst SG.\n");
84 return -rctx->dst_nents;
85 }
86
87 rctx->dst_nents += 1;
88
89 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
90 GFP_KERNEL : GFP_ATOMIC;
91
92 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
93 if (ret)
94 return ret;
95
96 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
97
98 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
99 if (IS_ERR(sg)) {
100 ret = PTR_ERR(sg);
101 goto error_free;
102 }
103
104 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
105 if (IS_ERR(sg)) {
106 ret = PTR_ERR(sg);
107 goto error_free;
108 }
109
110 sg_mark_end(sg);
111 rctx->dst_sg = rctx->dst_tbl.sgl;
112
113 ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
114 if (ret < 0)
115 goto error_free;
116
117 if (diff_dst) {
118 ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
119 if (ret < 0)
120 goto error_unmap_dst;
121 rctx->src_sg = req->src;
122 } else {
123 rctx->src_sg = rctx->dst_sg;
124 }
125
126 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
127 rctx->dst_sg, rctx->dst_nents,
128 qce_ablkcipher_done, async_req);
129 if (ret)
130 goto error_unmap_src;
131
132 qce_dma_issue_pending(&qce->dma);
133
134 ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
135 if (ret)
136 goto error_terminate;
137
138 return 0;
139
140 error_terminate:
141 qce_dma_terminate_all(&qce->dma);
142 error_unmap_src:
143 if (diff_dst)
144 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
145 error_unmap_dst:
146 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
147 error_free:
148 sg_free_table(&rctx->dst_tbl);
149 return ret;
150 }
151
152 static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
153 unsigned int keylen)
154 {
155 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
156 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
157 int ret;
158
159 if (!key || !keylen)
160 return -EINVAL;
161
162 switch (keylen) {
163 case AES_KEYSIZE_128:
164 case AES_KEYSIZE_256:
165 break;
166 default:
167 goto fallback;
168 }
169
170 ctx->enc_keylen = keylen;
171 memcpy(ctx->enc_key, key, keylen);
172 return 0;
173 fallback:
174 ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
175 if (!ret)
176 ctx->enc_keylen = keylen;
177 return ret;
178 }
179
180 static int qce_des_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
181 unsigned int keylen)
182 {
183 struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
184 int err;
185
186 err = verify_ablkcipher_des_key(ablk, key);
187 if (err)
188 return err;
189
190 ctx->enc_keylen = keylen;
191 memcpy(ctx->enc_key, key, keylen);
192 return 0;
193 }
194
195 static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
196 unsigned int keylen)
197 {
198 struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
199 int err;
200
201 err = verify_ablkcipher_des3_key(ablk, key);
202 if (err)
203 return err;
204
205 ctx->enc_keylen = keylen;
206 memcpy(ctx->enc_key, key, keylen);
207 return 0;
208 }
209
210 static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
211 {
212 struct crypto_tfm *tfm =
213 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
214 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
215 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
216 struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
217 int ret;
218
219 rctx->flags = tmpl->alg_flags;
220 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
221
222 if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
223 ctx->enc_keylen != AES_KEYSIZE_256) {
224 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
225
226 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
227 skcipher_request_set_callback(subreq, req->base.flags,
228 NULL, NULL);
229 skcipher_request_set_crypt(subreq, req->src, req->dst,
230 req->nbytes, req->info);
231 ret = encrypt ? crypto_skcipher_encrypt(subreq) :
232 crypto_skcipher_decrypt(subreq);
233 skcipher_request_zero(subreq);
234 return ret;
235 }
236
237 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
238 }
239
240 static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
241 {
242 return qce_ablkcipher_crypt(req, 1);
243 }
244
245 static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
246 {
247 return qce_ablkcipher_crypt(req, 0);
248 }
249
250 static int qce_ablkcipher_init(struct crypto_tfm *tfm)
251 {
252 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
253
254 memset(ctx, 0, sizeof(*ctx));
255 tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
256
257 ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm),
258 0, CRYPTO_ALG_NEED_FALLBACK);
259 return PTR_ERR_OR_ZERO(ctx->fallback);
260 }
261
262 static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
263 {
264 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
265
266 crypto_free_sync_skcipher(ctx->fallback);
267 }
268
269 struct qce_ablkcipher_def {
270 unsigned long flags;
271 const char *name;
272 const char *drv_name;
273 unsigned int blocksize;
274 unsigned int ivsize;
275 unsigned int min_keysize;
276 unsigned int max_keysize;
277 };
278
279 static const struct qce_ablkcipher_def ablkcipher_def[] = {
280 {
281 .flags = QCE_ALG_AES | QCE_MODE_ECB,
282 .name = "ecb(aes)",
283 .drv_name = "ecb-aes-qce",
284 .blocksize = AES_BLOCK_SIZE,
285 .ivsize = AES_BLOCK_SIZE,
286 .min_keysize = AES_MIN_KEY_SIZE,
287 .max_keysize = AES_MAX_KEY_SIZE,
288 },
289 {
290 .flags = QCE_ALG_AES | QCE_MODE_CBC,
291 .name = "cbc(aes)",
292 .drv_name = "cbc-aes-qce",
293 .blocksize = AES_BLOCK_SIZE,
294 .ivsize = AES_BLOCK_SIZE,
295 .min_keysize = AES_MIN_KEY_SIZE,
296 .max_keysize = AES_MAX_KEY_SIZE,
297 },
298 {
299 .flags = QCE_ALG_AES | QCE_MODE_CTR,
300 .name = "ctr(aes)",
301 .drv_name = "ctr-aes-qce",
302 .blocksize = AES_BLOCK_SIZE,
303 .ivsize = AES_BLOCK_SIZE,
304 .min_keysize = AES_MIN_KEY_SIZE,
305 .max_keysize = AES_MAX_KEY_SIZE,
306 },
307 {
308 .flags = QCE_ALG_AES | QCE_MODE_XTS,
309 .name = "xts(aes)",
310 .drv_name = "xts-aes-qce",
311 .blocksize = AES_BLOCK_SIZE,
312 .ivsize = AES_BLOCK_SIZE,
313 .min_keysize = AES_MIN_KEY_SIZE,
314 .max_keysize = AES_MAX_KEY_SIZE,
315 },
316 {
317 .flags = QCE_ALG_DES | QCE_MODE_ECB,
318 .name = "ecb(des)",
319 .drv_name = "ecb-des-qce",
320 .blocksize = DES_BLOCK_SIZE,
321 .ivsize = 0,
322 .min_keysize = DES_KEY_SIZE,
323 .max_keysize = DES_KEY_SIZE,
324 },
325 {
326 .flags = QCE_ALG_DES | QCE_MODE_CBC,
327 .name = "cbc(des)",
328 .drv_name = "cbc-des-qce",
329 .blocksize = DES_BLOCK_SIZE,
330 .ivsize = DES_BLOCK_SIZE,
331 .min_keysize = DES_KEY_SIZE,
332 .max_keysize = DES_KEY_SIZE,
333 },
334 {
335 .flags = QCE_ALG_3DES | QCE_MODE_ECB,
336 .name = "ecb(des3_ede)",
337 .drv_name = "ecb-3des-qce",
338 .blocksize = DES3_EDE_BLOCK_SIZE,
339 .ivsize = 0,
340 .min_keysize = DES3_EDE_KEY_SIZE,
341 .max_keysize = DES3_EDE_KEY_SIZE,
342 },
343 {
344 .flags = QCE_ALG_3DES | QCE_MODE_CBC,
345 .name = "cbc(des3_ede)",
346 .drv_name = "cbc-3des-qce",
347 .blocksize = DES3_EDE_BLOCK_SIZE,
348 .ivsize = DES3_EDE_BLOCK_SIZE,
349 .min_keysize = DES3_EDE_KEY_SIZE,
350 .max_keysize = DES3_EDE_KEY_SIZE,
351 },
352 };
353
354 static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
355 struct qce_device *qce)
356 {
357 struct qce_alg_template *tmpl;
358 struct crypto_alg *alg;
359 int ret;
360
361 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
362 if (!tmpl)
363 return -ENOMEM;
364
365 alg = &tmpl->alg.crypto;
366
367 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
368 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
369 def->drv_name);
370
371 alg->cra_blocksize = def->blocksize;
372 alg->cra_ablkcipher.ivsize = def->ivsize;
373 alg->cra_ablkcipher.min_keysize = def->min_keysize;
374 alg->cra_ablkcipher.max_keysize = def->max_keysize;
375 alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ? qce_des3_setkey :
376 IS_DES(def->flags) ? qce_des_setkey :
377 qce_ablkcipher_setkey;
378 alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
379 alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
380
381 alg->cra_priority = 300;
382 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
383 CRYPTO_ALG_NEED_FALLBACK;
384 alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
385 alg->cra_alignmask = 0;
386 alg->cra_type = &crypto_ablkcipher_type;
387 alg->cra_module = THIS_MODULE;
388 alg->cra_init = qce_ablkcipher_init;
389 alg->cra_exit = qce_ablkcipher_exit;
390
391 INIT_LIST_HEAD(&tmpl->entry);
392 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
393 tmpl->alg_flags = def->flags;
394 tmpl->qce = qce;
395
396 ret = crypto_register_alg(alg);
397 if (ret) {
398 kfree(tmpl);
399 dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
400 return ret;
401 }
402
403 list_add_tail(&tmpl->entry, &ablkcipher_algs);
404 dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
405 return 0;
406 }
407
408 static void qce_ablkcipher_unregister(struct qce_device *qce)
409 {
410 struct qce_alg_template *tmpl, *n;
411
412 list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
413 crypto_unregister_alg(&tmpl->alg.crypto);
414 list_del(&tmpl->entry);
415 kfree(tmpl);
416 }
417 }
418
419 static int qce_ablkcipher_register(struct qce_device *qce)
420 {
421 int ret, i;
422
423 for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
424 ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
425 if (ret)
426 goto err;
427 }
428
429 return 0;
430 err:
431 qce_ablkcipher_unregister(qce);
432 return ret;
433 }
434
435 const struct qce_algo_ops ablkcipher_ops = {
436 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
437 .register_algs = qce_ablkcipher_register,
438 .unregister_algs = qce_ablkcipher_unregister,
439 .async_req_handle = qce_ablkcipher_async_req_handle,
440 };