1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47
48 #include <linux/module.h>
49 #include <crypto/internal/rsa.h>
50 #include <crypto/internal/akcipher.h>
51 #include <crypto/akcipher.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/fips.h>
54 #include <crypto/scatterwalk.h>
55 #include "qat_rsapubkey-asn1.h"
56 #include "qat_rsaprivkey-asn1.h"
57 #include "icp_qat_fw_pke.h"
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62
63 static DEFINE_MUTEX(algs_lock);
64 static unsigned int active_devs;
65
66 struct qat_rsa_input_params {
67 union {
68 struct {
69 dma_addr_t m;
70 dma_addr_t e;
71 dma_addr_t n;
72 } enc;
73 struct {
74 dma_addr_t c;
75 dma_addr_t d;
76 dma_addr_t n;
77 } dec;
78 u64 in_tab[8];
79 };
80 } __packed __aligned(64);
81
82 struct qat_rsa_output_params {
83 union {
84 struct {
85 dma_addr_t c;
86 } enc;
87 struct {
88 dma_addr_t m;
89 } dec;
90 u64 out_tab[8];
91 };
92 } __packed __aligned(64);
93
94 struct qat_rsa_ctx {
95 char *n;
96 char *e;
97 char *d;
98 dma_addr_t dma_n;
99 dma_addr_t dma_e;
100 dma_addr_t dma_d;
101 unsigned int key_sz;
102 struct qat_crypto_instance *inst;
103 } __packed __aligned(64);
104
105 struct qat_rsa_request {
106 struct qat_rsa_input_params in;
107 struct qat_rsa_output_params out;
108 dma_addr_t phy_in;
109 dma_addr_t phy_out;
110 char *src_align;
111 char *dst_align;
112 struct icp_qat_fw_pke_request req;
113 struct qat_rsa_ctx *ctx;
114 int err;
115 } __aligned(64);
116
qat_rsa_cb(struct icp_qat_fw_pke_resp * resp)117 static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
118 {
119 struct akcipher_request *areq = (void *)(__force long)resp->opaque;
120 struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64);
121 struct device *dev = &GET_DEV(req->ctx->inst->accel_dev);
122 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
123 resp->pke_resp_hdr.comn_resp_flags);
124
125 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
126
127 if (req->src_align)
128 dma_free_coherent(dev, req->ctx->key_sz, req->src_align,
129 req->in.enc.m);
130 else
131 dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz,
132 DMA_TO_DEVICE);
133
134 areq->dst_len = req->ctx->key_sz;
135 if (req->dst_align) {
136 char *ptr = req->dst_align;
137
138 while (!(*ptr) && areq->dst_len) {
139 areq->dst_len--;
140 ptr++;
141 }
142
143 if (areq->dst_len != req->ctx->key_sz)
144 memmove(req->dst_align, ptr, areq->dst_len);
145
146 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
147 areq->dst_len, 1);
148
149 dma_free_coherent(dev, req->ctx->key_sz, req->dst_align,
150 req->out.enc.c);
151 } else {
152 char *ptr = sg_virt(areq->dst);
153
154 while (!(*ptr) && areq->dst_len) {
155 areq->dst_len--;
156 ptr++;
157 }
158
159 if (sg_virt(areq->dst) != ptr && areq->dst_len)
160 memmove(sg_virt(areq->dst), ptr, areq->dst_len);
161
162 dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz,
163 DMA_FROM_DEVICE);
164 }
165
166 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
167 DMA_TO_DEVICE);
168 dma_unmap_single(dev, req->phy_out,
169 sizeof(struct qat_rsa_output_params),
170 DMA_TO_DEVICE);
171
172 akcipher_request_complete(areq, err);
173 }
174
qat_alg_asym_callback(void * _resp)175 void qat_alg_asym_callback(void *_resp)
176 {
177 struct icp_qat_fw_pke_resp *resp = _resp;
178
179 qat_rsa_cb(resp);
180 }
181
182 #define PKE_RSA_EP_512 0x1c161b21
183 #define PKE_RSA_EP_1024 0x35111bf7
184 #define PKE_RSA_EP_1536 0x4d111cdc
185 #define PKE_RSA_EP_2048 0x6e111dba
186 #define PKE_RSA_EP_3072 0x7d111ea3
187 #define PKE_RSA_EP_4096 0xa5101f7e
188
qat_rsa_enc_fn_id(unsigned int len)189 static unsigned long qat_rsa_enc_fn_id(unsigned int len)
190 {
191 unsigned int bitslen = len << 3;
192
193 switch (bitslen) {
194 case 512:
195 return PKE_RSA_EP_512;
196 case 1024:
197 return PKE_RSA_EP_1024;
198 case 1536:
199 return PKE_RSA_EP_1536;
200 case 2048:
201 return PKE_RSA_EP_2048;
202 case 3072:
203 return PKE_RSA_EP_3072;
204 case 4096:
205 return PKE_RSA_EP_4096;
206 default:
207 return 0;
208 };
209 }
210
211 #define PKE_RSA_DP1_512 0x1c161b3c
212 #define PKE_RSA_DP1_1024 0x35111c12
213 #define PKE_RSA_DP1_1536 0x4d111cf7
214 #define PKE_RSA_DP1_2048 0x6e111dda
215 #define PKE_RSA_DP1_3072 0x7d111ebe
216 #define PKE_RSA_DP1_4096 0xa5101f98
217
qat_rsa_dec_fn_id(unsigned int len)218 static unsigned long qat_rsa_dec_fn_id(unsigned int len)
219 {
220 unsigned int bitslen = len << 3;
221
222 switch (bitslen) {
223 case 512:
224 return PKE_RSA_DP1_512;
225 case 1024:
226 return PKE_RSA_DP1_1024;
227 case 1536:
228 return PKE_RSA_DP1_1536;
229 case 2048:
230 return PKE_RSA_DP1_2048;
231 case 3072:
232 return PKE_RSA_DP1_3072;
233 case 4096:
234 return PKE_RSA_DP1_4096;
235 default:
236 return 0;
237 };
238 }
239
qat_rsa_enc(struct akcipher_request * req)240 static int qat_rsa_enc(struct akcipher_request *req)
241 {
242 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
243 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
244 struct qat_crypto_instance *inst = ctx->inst;
245 struct device *dev = &GET_DEV(inst->accel_dev);
246 struct qat_rsa_request *qat_req =
247 PTR_ALIGN(akcipher_request_ctx(req), 64);
248 struct icp_qat_fw_pke_request *msg = &qat_req->req;
249 int ret, ctr = 0;
250
251 if (unlikely(!ctx->n || !ctx->e))
252 return -EINVAL;
253
254 if (req->dst_len < ctx->key_sz) {
255 req->dst_len = ctx->key_sz;
256 return -EOVERFLOW;
257 }
258 memset(msg, '\0', sizeof(*msg));
259 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
260 ICP_QAT_FW_COMN_REQ_FLAG_SET);
261 msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
262 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
263 return -EINVAL;
264
265 qat_req->ctx = ctx;
266 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
267 msg->pke_hdr.comn_req_flags =
268 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
269 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
270
271 qat_req->in.enc.e = ctx->dma_e;
272 qat_req->in.enc.n = ctx->dma_n;
273 ret = -ENOMEM;
274
275 /*
276 * src can be of any size in valid range, but HW expects it to be the
277 * same as modulo n so in case it is different we need to allocate a
278 * new buf and copy src data.
279 * In other case we just need to map the user provided buffer.
280 * Also need to make sure that it is in contiguous buffer.
281 */
282 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
283 qat_req->src_align = NULL;
284 qat_req->in.enc.m = dma_map_single(dev, sg_virt(req->src),
285 req->src_len, DMA_TO_DEVICE);
286 if (unlikely(dma_mapping_error(dev, qat_req->in.enc.m)))
287 return ret;
288
289 } else {
290 int shift = ctx->key_sz - req->src_len;
291
292 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
293 &qat_req->in.enc.m,
294 GFP_KERNEL);
295 if (unlikely(!qat_req->src_align))
296 return ret;
297
298 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
299 0, req->src_len, 0);
300 }
301 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
302 qat_req->dst_align = NULL;
303 qat_req->out.enc.c = dma_map_single(dev, sg_virt(req->dst),
304 req->dst_len,
305 DMA_FROM_DEVICE);
306
307 if (unlikely(dma_mapping_error(dev, qat_req->out.enc.c)))
308 goto unmap_src;
309
310 } else {
311 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
312 &qat_req->out.enc.c,
313 GFP_KERNEL);
314 if (unlikely(!qat_req->dst_align))
315 goto unmap_src;
316
317 }
318 qat_req->in.in_tab[3] = 0;
319 qat_req->out.out_tab[1] = 0;
320 qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m,
321 sizeof(struct qat_rsa_input_params),
322 DMA_TO_DEVICE);
323 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
324 goto unmap_dst;
325
326 qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c,
327 sizeof(struct qat_rsa_output_params),
328 DMA_TO_DEVICE);
329 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
330 goto unmap_in_params;
331
332 msg->pke_mid.src_data_addr = qat_req->phy_in;
333 msg->pke_mid.dest_data_addr = qat_req->phy_out;
334 msg->pke_mid.opaque = (uint64_t)(__force long)req;
335 msg->input_param_count = 3;
336 msg->output_param_count = 1;
337 do {
338 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
339 } while (ret == -EBUSY && ctr++ < 100);
340
341 if (!ret)
342 return -EINPROGRESS;
343 unmap_src:
344 if (qat_req->src_align)
345 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
346 qat_req->in.enc.m);
347 else
348 if (!dma_mapping_error(dev, qat_req->in.enc.m))
349 dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
350 DMA_TO_DEVICE);
351 unmap_dst:
352 if (qat_req->dst_align)
353 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
354 qat_req->out.enc.c);
355 else
356 if (!dma_mapping_error(dev, qat_req->out.enc.c))
357 dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
358 DMA_FROM_DEVICE);
359 unmap_in_params:
360 if (!dma_mapping_error(dev, qat_req->phy_in))
361 dma_unmap_single(dev, qat_req->phy_in,
362 sizeof(struct qat_rsa_input_params),
363 DMA_TO_DEVICE);
364 if (!dma_mapping_error(dev, qat_req->phy_out))
365 dma_unmap_single(dev, qat_req->phy_out,
366 sizeof(struct qat_rsa_output_params),
367 DMA_TO_DEVICE);
368 return ret;
369 }
370
qat_rsa_dec(struct akcipher_request * req)371 static int qat_rsa_dec(struct akcipher_request *req)
372 {
373 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
374 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
375 struct qat_crypto_instance *inst = ctx->inst;
376 struct device *dev = &GET_DEV(inst->accel_dev);
377 struct qat_rsa_request *qat_req =
378 PTR_ALIGN(akcipher_request_ctx(req), 64);
379 struct icp_qat_fw_pke_request *msg = &qat_req->req;
380 int ret, ctr = 0;
381
382 if (unlikely(!ctx->n || !ctx->d))
383 return -EINVAL;
384
385 if (req->dst_len < ctx->key_sz) {
386 req->dst_len = ctx->key_sz;
387 return -EOVERFLOW;
388 }
389 memset(msg, '\0', sizeof(*msg));
390 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
391 ICP_QAT_FW_COMN_REQ_FLAG_SET);
392 msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz);
393 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
394 return -EINVAL;
395
396 qat_req->ctx = ctx;
397 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
398 msg->pke_hdr.comn_req_flags =
399 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
400 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
401
402 qat_req->in.dec.d = ctx->dma_d;
403 qat_req->in.dec.n = ctx->dma_n;
404 ret = -ENOMEM;
405
406 /*
407 * src can be of any size in valid range, but HW expects it to be the
408 * same as modulo n so in case it is different we need to allocate a
409 * new buf and copy src data.
410 * In other case we just need to map the user provided buffer.
411 * Also need to make sure that it is in contiguous buffer.
412 */
413 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
414 qat_req->src_align = NULL;
415 qat_req->in.dec.c = dma_map_single(dev, sg_virt(req->src),
416 req->dst_len, DMA_TO_DEVICE);
417 if (unlikely(dma_mapping_error(dev, qat_req->in.dec.c)))
418 return ret;
419
420 } else {
421 int shift = ctx->key_sz - req->src_len;
422
423 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
424 &qat_req->in.dec.c,
425 GFP_KERNEL);
426 if (unlikely(!qat_req->src_align))
427 return ret;
428
429 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
430 0, req->src_len, 0);
431 }
432 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
433 qat_req->dst_align = NULL;
434 qat_req->out.dec.m = dma_map_single(dev, sg_virt(req->dst),
435 req->dst_len,
436 DMA_FROM_DEVICE);
437
438 if (unlikely(dma_mapping_error(dev, qat_req->out.dec.m)))
439 goto unmap_src;
440
441 } else {
442 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
443 &qat_req->out.dec.m,
444 GFP_KERNEL);
445 if (unlikely(!qat_req->dst_align))
446 goto unmap_src;
447
448 }
449
450 qat_req->in.in_tab[3] = 0;
451 qat_req->out.out_tab[1] = 0;
452 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c,
453 sizeof(struct qat_rsa_input_params),
454 DMA_TO_DEVICE);
455 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
456 goto unmap_dst;
457
458 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m,
459 sizeof(struct qat_rsa_output_params),
460 DMA_TO_DEVICE);
461 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
462 goto unmap_in_params;
463
464 msg->pke_mid.src_data_addr = qat_req->phy_in;
465 msg->pke_mid.dest_data_addr = qat_req->phy_out;
466 msg->pke_mid.opaque = (uint64_t)(__force long)req;
467 msg->input_param_count = 3;
468 msg->output_param_count = 1;
469 do {
470 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
471 } while (ret == -EBUSY && ctr++ < 100);
472
473 if (!ret)
474 return -EINPROGRESS;
475 unmap_src:
476 if (qat_req->src_align)
477 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
478 qat_req->in.dec.c);
479 else
480 if (!dma_mapping_error(dev, qat_req->in.dec.c))
481 dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
482 DMA_TO_DEVICE);
483 unmap_dst:
484 if (qat_req->dst_align)
485 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
486 qat_req->out.dec.m);
487 else
488 if (!dma_mapping_error(dev, qat_req->out.dec.m))
489 dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
490 DMA_FROM_DEVICE);
491 unmap_in_params:
492 if (!dma_mapping_error(dev, qat_req->phy_in))
493 dma_unmap_single(dev, qat_req->phy_in,
494 sizeof(struct qat_rsa_input_params),
495 DMA_TO_DEVICE);
496 if (!dma_mapping_error(dev, qat_req->phy_out))
497 dma_unmap_single(dev, qat_req->phy_out,
498 sizeof(struct qat_rsa_output_params),
499 DMA_TO_DEVICE);
500 return ret;
501 }
502
qat_rsa_get_n(void * context,size_t hdrlen,unsigned char tag,const void * value,size_t vlen)503 int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
504 const void *value, size_t vlen)
505 {
506 struct qat_rsa_ctx *ctx = context;
507 struct qat_crypto_instance *inst = ctx->inst;
508 struct device *dev = &GET_DEV(inst->accel_dev);
509 const char *ptr = value;
510 int ret;
511
512 while (!*ptr && vlen) {
513 ptr++;
514 vlen--;
515 }
516
517 ctx->key_sz = vlen;
518 ret = -EINVAL;
519 /* In FIPS mode only allow key size 2K & 3K */
520 if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) {
521 pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
522 goto err;
523 }
524 /* invalid key size provided */
525 if (!qat_rsa_enc_fn_id(ctx->key_sz))
526 goto err;
527
528 ret = -ENOMEM;
529 ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
530 if (!ctx->n)
531 goto err;
532
533 memcpy(ctx->n, ptr, ctx->key_sz);
534 return 0;
535 err:
536 ctx->key_sz = 0;
537 ctx->n = NULL;
538 return ret;
539 }
540
qat_rsa_get_e(void * context,size_t hdrlen,unsigned char tag,const void * value,size_t vlen)541 int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
542 const void *value, size_t vlen)
543 {
544 struct qat_rsa_ctx *ctx = context;
545 struct qat_crypto_instance *inst = ctx->inst;
546 struct device *dev = &GET_DEV(inst->accel_dev);
547 const char *ptr = value;
548
549 while (!*ptr && vlen) {
550 ptr++;
551 vlen--;
552 }
553
554 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
555 ctx->e = NULL;
556 return -EINVAL;
557 }
558
559 ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
560 if (!ctx->e) {
561 ctx->e = NULL;
562 return -ENOMEM;
563 }
564 memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
565 return 0;
566 }
567
qat_rsa_get_d(void * context,size_t hdrlen,unsigned char tag,const void * value,size_t vlen)568 int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
569 const void *value, size_t vlen)
570 {
571 struct qat_rsa_ctx *ctx = context;
572 struct qat_crypto_instance *inst = ctx->inst;
573 struct device *dev = &GET_DEV(inst->accel_dev);
574 const char *ptr = value;
575 int ret;
576
577 while (!*ptr && vlen) {
578 ptr++;
579 vlen--;
580 }
581
582 ret = -EINVAL;
583 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
584 goto err;
585
586 /* In FIPS mode only allow key size 2K & 3K */
587 if (fips_enabled && (vlen != 256 && vlen != 384)) {
588 pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
589 goto err;
590 }
591
592 ret = -ENOMEM;
593 ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
594 if (!ctx->n)
595 goto err;
596
597 memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
598 return 0;
599 err:
600 ctx->d = NULL;
601 return ret;
602 }
603
qat_rsa_setkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen,bool private)604 static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
605 unsigned int keylen, bool private)
606 {
607 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
608 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
609 int ret;
610
611 /* Free the old key if any */
612 if (ctx->n)
613 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
614 if (ctx->e)
615 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
616 if (ctx->d) {
617 memset(ctx->d, '\0', ctx->key_sz);
618 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
619 }
620
621 ctx->n = NULL;
622 ctx->e = NULL;
623 ctx->d = NULL;
624
625 if (private)
626 ret = asn1_ber_decoder(&qat_rsaprivkey_decoder, ctx, key,
627 keylen);
628 else
629 ret = asn1_ber_decoder(&qat_rsapubkey_decoder, ctx, key,
630 keylen);
631 if (ret < 0)
632 goto free;
633
634 if (!ctx->n || !ctx->e) {
635 /* invalid key provided */
636 ret = -EINVAL;
637 goto free;
638 }
639 if (private && !ctx->d) {
640 /* invalid private key provided */
641 ret = -EINVAL;
642 goto free;
643 }
644
645 return 0;
646 free:
647 if (ctx->d) {
648 memset(ctx->d, '\0', ctx->key_sz);
649 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
650 ctx->d = NULL;
651 }
652 if (ctx->e) {
653 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
654 ctx->e = NULL;
655 }
656 if (ctx->n) {
657 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
658 ctx->n = NULL;
659 ctx->key_sz = 0;
660 }
661 return ret;
662 }
663
qat_rsa_setpubkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)664 static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
665 unsigned int keylen)
666 {
667 return qat_rsa_setkey(tfm, key, keylen, false);
668 }
669
qat_rsa_setprivkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)670 static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
671 unsigned int keylen)
672 {
673 return qat_rsa_setkey(tfm, key, keylen, true);
674 }
675
qat_rsa_max_size(struct crypto_akcipher * tfm)676 static int qat_rsa_max_size(struct crypto_akcipher *tfm)
677 {
678 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
679
680 return (ctx->n) ? ctx->key_sz : -EINVAL;
681 }
682
qat_rsa_init_tfm(struct crypto_akcipher * tfm)683 static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
684 {
685 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
686 struct qat_crypto_instance *inst =
687 qat_crypto_get_instance_node(get_current_node());
688
689 if (!inst)
690 return -EINVAL;
691
692 ctx->key_sz = 0;
693 ctx->inst = inst;
694 return 0;
695 }
696
qat_rsa_exit_tfm(struct crypto_akcipher * tfm)697 static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
698 {
699 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
700 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
701
702 if (ctx->n)
703 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
704 if (ctx->e)
705 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
706 if (ctx->d) {
707 memset(ctx->d, '\0', ctx->key_sz);
708 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
709 }
710 qat_crypto_put_instance(ctx->inst);
711 ctx->n = NULL;
712 ctx->d = NULL;
713 ctx->d = NULL;
714 }
715
716 static struct akcipher_alg rsa = {
717 .encrypt = qat_rsa_enc,
718 .decrypt = qat_rsa_dec,
719 .sign = qat_rsa_dec,
720 .verify = qat_rsa_enc,
721 .set_pub_key = qat_rsa_setpubkey,
722 .set_priv_key = qat_rsa_setprivkey,
723 .max_size = qat_rsa_max_size,
724 .init = qat_rsa_init_tfm,
725 .exit = qat_rsa_exit_tfm,
726 .reqsize = sizeof(struct qat_rsa_request) + 64,
727 .base = {
728 .cra_name = "rsa",
729 .cra_driver_name = "qat-rsa",
730 .cra_priority = 1000,
731 .cra_module = THIS_MODULE,
732 .cra_ctxsize = sizeof(struct qat_rsa_ctx),
733 },
734 };
735
qat_asym_algs_register(void)736 int qat_asym_algs_register(void)
737 {
738 int ret = 0;
739
740 mutex_lock(&algs_lock);
741 if (++active_devs == 1) {
742 rsa.base.cra_flags = 0;
743 ret = crypto_register_akcipher(&rsa);
744 }
745 mutex_unlock(&algs_lock);
746 return ret;
747 }
748
qat_asym_algs_unregister(void)749 void qat_asym_algs_unregister(void)
750 {
751 mutex_lock(&algs_lock);
752 if (--active_devs == 0)
753 crypto_unregister_akcipher(&rsa);
754 mutex_unlock(&algs_lock);
755 }
756