1/* 2 * algif_skcipher: User-space interface for skcipher algorithms 3 * 4 * This file provides the user-space API for symmetric key ciphers. 5 * 6 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 */ 14 15#include <crypto/scatterwalk.h> 16#include <crypto/skcipher.h> 17#include <crypto/if_alg.h> 18#include <linux/init.h> 19#include <linux/list.h> 20#include <linux/kernel.h> 21#include <linux/mm.h> 22#include <linux/module.h> 23#include <linux/net.h> 24#include <net/sock.h> 25 26struct skcipher_sg_list { 27 struct list_head list; 28 29 int cur; 30 31 struct scatterlist sg[0]; 32}; 33 34struct skcipher_tfm { 35 struct crypto_ablkcipher *skcipher; 36 bool has_key; 37}; 38 39struct skcipher_ctx { 40 struct list_head tsgl; 41 struct af_alg_sgl rsgl; 42 43 void *iv; 44 45 struct af_alg_completion completion; 46 47 atomic_t inflight; 48 unsigned used; 49 50 unsigned int len; 51 bool more; 52 bool merge; 53 bool enc; 54 55 struct ablkcipher_request req; 56}; 57 58struct skcipher_async_rsgl { 59 struct af_alg_sgl sgl; 60 struct list_head list; 61}; 62 63struct skcipher_async_req { 64 struct kiocb *iocb; 65 struct skcipher_async_rsgl first_sgl; 66 struct list_head list; 67 struct scatterlist *tsg; 68 char iv[]; 69}; 70 71#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \ 72 crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))) 73 74#define GET_REQ_SIZE(ctx) \ 75 crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)) 76 77#define GET_IV_SIZE(ctx) \ 78 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req)) 79 80#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ 81 sizeof(struct scatterlist) - 1) 82 83static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) 84{ 85 struct skcipher_async_rsgl *rsgl, *tmp; 86 struct scatterlist *sgl; 87 struct scatterlist *sg; 88 int i, n; 89 90 list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) { 91 af_alg_free_sg(&rsgl->sgl); 92 if (rsgl != &sreq->first_sgl) 93 kfree(rsgl); 94 } 95 sgl = sreq->tsg; 96 n = sg_nents(sgl); 97 for_each_sg(sgl, sg, n, i) 98 put_page(sg_page(sg)); 99 100 kfree(sreq->tsg); 101} 102 103static void skcipher_async_cb(struct crypto_async_request *req, int err) 104{ 105 struct sock *sk = req->data; 106 struct alg_sock *ask = alg_sk(sk); 107 struct skcipher_ctx *ctx = ask->private; 108 struct skcipher_async_req *sreq = GET_SREQ(req, ctx); 109 struct kiocb *iocb = sreq->iocb; 110 111 atomic_dec(&ctx->inflight); 112 skcipher_free_async_sgls(sreq); 113 kfree(req); 114 iocb->ki_complete(iocb, err, err); 115} 116 117static inline int skcipher_sndbuf(struct sock *sk) 118{ 119 struct alg_sock *ask = alg_sk(sk); 120 struct skcipher_ctx *ctx = ask->private; 121 122 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - 123 ctx->used, 0); 124} 125 126static inline bool skcipher_writable(struct sock *sk) 127{ 128 return PAGE_SIZE <= skcipher_sndbuf(sk); 129} 130 131static int skcipher_alloc_sgl(struct sock *sk) 132{ 133 struct alg_sock *ask = alg_sk(sk); 134 struct skcipher_ctx *ctx = ask->private; 135 struct skcipher_sg_list *sgl; 136 struct scatterlist *sg = NULL; 137 138 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 139 if (!list_empty(&ctx->tsgl)) 140 sg = sgl->sg; 141 142 if (!sg || sgl->cur >= MAX_SGL_ENTS) { 143 sgl = sock_kmalloc(sk, sizeof(*sgl) + 144 sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), 145 GFP_KERNEL); 146 if (!sgl) 147 return -ENOMEM; 148 149 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); 150 sgl->cur = 0; 151 152 if (sg) 153 scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); 154 155 list_add_tail(&sgl->list, &ctx->tsgl); 156 } 157 158 return 0; 159} 160 161static void skcipher_pull_sgl(struct sock *sk, int used, int put) 162{ 163 struct alg_sock *ask = alg_sk(sk); 164 struct skcipher_ctx *ctx = ask->private; 165 struct skcipher_sg_list *sgl; 166 struct scatterlist *sg; 167 int i; 168 169 while (!list_empty(&ctx->tsgl)) { 170 sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, 171 list); 172 sg = sgl->sg; 173 174 for (i = 0; i < sgl->cur; i++) { 175 int plen = min_t(int, used, sg[i].length); 176 177 if (!sg_page(sg + i)) 178 continue; 179 180 sg[i].length -= plen; 181 sg[i].offset += plen; 182 183 used -= plen; 184 ctx->used -= plen; 185 186 if (sg[i].length) 187 return; 188 if (put) 189 put_page(sg_page(sg + i)); 190 sg_assign_page(sg + i, NULL); 191 } 192 193 list_del(&sgl->list); 194 sock_kfree_s(sk, sgl, 195 sizeof(*sgl) + sizeof(sgl->sg[0]) * 196 (MAX_SGL_ENTS + 1)); 197 } 198 199 if (!ctx->used) 200 ctx->merge = 0; 201} 202 203static void skcipher_free_sgl(struct sock *sk) 204{ 205 struct alg_sock *ask = alg_sk(sk); 206 struct skcipher_ctx *ctx = ask->private; 207 208 skcipher_pull_sgl(sk, ctx->used, 1); 209} 210 211static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) 212{ 213 long timeout; 214 DEFINE_WAIT(wait); 215 int err = -ERESTARTSYS; 216 217 if (flags & MSG_DONTWAIT) 218 return -EAGAIN; 219 220 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 221 222 for (;;) { 223 if (signal_pending(current)) 224 break; 225 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 226 timeout = MAX_SCHEDULE_TIMEOUT; 227 if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) { 228 err = 0; 229 break; 230 } 231 } 232 finish_wait(sk_sleep(sk), &wait); 233 234 return err; 235} 236 237static void skcipher_wmem_wakeup(struct sock *sk) 238{ 239 struct socket_wq *wq; 240 241 if (!skcipher_writable(sk)) 242 return; 243 244 rcu_read_lock(); 245 wq = rcu_dereference(sk->sk_wq); 246 if (wq_has_sleeper(wq)) 247 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 248 POLLRDNORM | 249 POLLRDBAND); 250 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 251 rcu_read_unlock(); 252} 253 254static int skcipher_wait_for_data(struct sock *sk, unsigned flags) 255{ 256 struct alg_sock *ask = alg_sk(sk); 257 struct skcipher_ctx *ctx = ask->private; 258 long timeout; 259 DEFINE_WAIT(wait); 260 int err = -ERESTARTSYS; 261 262 if (flags & MSG_DONTWAIT) { 263 return -EAGAIN; 264 } 265 266 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 267 268 for (;;) { 269 if (signal_pending(current)) 270 break; 271 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 272 timeout = MAX_SCHEDULE_TIMEOUT; 273 if (sk_wait_event(sk, &timeout, ctx->used)) { 274 err = 0; 275 break; 276 } 277 } 278 finish_wait(sk_sleep(sk), &wait); 279 280 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 281 282 return err; 283} 284 285static void skcipher_data_wakeup(struct sock *sk) 286{ 287 struct alg_sock *ask = alg_sk(sk); 288 struct skcipher_ctx *ctx = ask->private; 289 struct socket_wq *wq; 290 291 if (!ctx->used) 292 return; 293 294 rcu_read_lock(); 295 wq = rcu_dereference(sk->sk_wq); 296 if (wq_has_sleeper(wq)) 297 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 298 POLLRDNORM | 299 POLLRDBAND); 300 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 301 rcu_read_unlock(); 302} 303 304static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, 305 size_t size) 306{ 307 struct sock *sk = sock->sk; 308 struct alg_sock *ask = alg_sk(sk); 309 struct skcipher_ctx *ctx = ask->private; 310 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); 311 unsigned ivsize = crypto_ablkcipher_ivsize(tfm); 312 struct skcipher_sg_list *sgl; 313 struct af_alg_control con = {}; 314 long copied = 0; 315 bool enc = 0; 316 bool init = 0; 317 int err; 318 int i; 319 320 if (msg->msg_controllen) { 321 err = af_alg_cmsg_send(msg, &con); 322 if (err) 323 return err; 324 325 init = 1; 326 switch (con.op) { 327 case ALG_OP_ENCRYPT: 328 enc = 1; 329 break; 330 case ALG_OP_DECRYPT: 331 enc = 0; 332 break; 333 default: 334 return -EINVAL; 335 } 336 337 if (con.iv && con.iv->ivlen != ivsize) 338 return -EINVAL; 339 } 340 341 err = -EINVAL; 342 343 lock_sock(sk); 344 if (!ctx->more && ctx->used) 345 goto unlock; 346 347 if (init) { 348 ctx->enc = enc; 349 if (con.iv) 350 memcpy(ctx->iv, con.iv->iv, ivsize); 351 } 352 353 while (size) { 354 struct scatterlist *sg; 355 unsigned long len = size; 356 int plen; 357 358 if (ctx->merge) { 359 sgl = list_entry(ctx->tsgl.prev, 360 struct skcipher_sg_list, list); 361 sg = sgl->sg + sgl->cur - 1; 362 len = min_t(unsigned long, len, 363 PAGE_SIZE - sg->offset - sg->length); 364 365 err = memcpy_from_msg(page_address(sg_page(sg)) + 366 sg->offset + sg->length, 367 msg, len); 368 if (err) 369 goto unlock; 370 371 sg->length += len; 372 ctx->merge = (sg->offset + sg->length) & 373 (PAGE_SIZE - 1); 374 375 ctx->used += len; 376 copied += len; 377 size -= len; 378 continue; 379 } 380 381 if (!skcipher_writable(sk)) { 382 err = skcipher_wait_for_wmem(sk, msg->msg_flags); 383 if (err) 384 goto unlock; 385 } 386 387 len = min_t(unsigned long, len, skcipher_sndbuf(sk)); 388 389 err = skcipher_alloc_sgl(sk); 390 if (err) 391 goto unlock; 392 393 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 394 sg = sgl->sg; 395 if (sgl->cur) 396 sg_unmark_end(sg + sgl->cur - 1); 397 do { 398 i = sgl->cur; 399 plen = min_t(int, len, PAGE_SIZE); 400 401 sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); 402 err = -ENOMEM; 403 if (!sg_page(sg + i)) 404 goto unlock; 405 406 err = memcpy_from_msg(page_address(sg_page(sg + i)), 407 msg, plen); 408 if (err) { 409 __free_page(sg_page(sg + i)); 410 sg_assign_page(sg + i, NULL); 411 goto unlock; 412 } 413 414 sg[i].length = plen; 415 len -= plen; 416 ctx->used += plen; 417 copied += plen; 418 size -= plen; 419 sgl->cur++; 420 } while (len && sgl->cur < MAX_SGL_ENTS); 421 422 if (!size) 423 sg_mark_end(sg + sgl->cur - 1); 424 425 ctx->merge = plen & (PAGE_SIZE - 1); 426 } 427 428 err = 0; 429 430 ctx->more = msg->msg_flags & MSG_MORE; 431 432unlock: 433 skcipher_data_wakeup(sk); 434 release_sock(sk); 435 436 return copied ?: err; 437} 438 439static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, 440 int offset, size_t size, int flags) 441{ 442 struct sock *sk = sock->sk; 443 struct alg_sock *ask = alg_sk(sk); 444 struct skcipher_ctx *ctx = ask->private; 445 struct skcipher_sg_list *sgl; 446 int err = -EINVAL; 447 448 if (flags & MSG_SENDPAGE_NOTLAST) 449 flags |= MSG_MORE; 450 451 lock_sock(sk); 452 if (!ctx->more && ctx->used) 453 goto unlock; 454 455 if (!size) 456 goto done; 457 458 if (!skcipher_writable(sk)) { 459 err = skcipher_wait_for_wmem(sk, flags); 460 if (err) 461 goto unlock; 462 } 463 464 err = skcipher_alloc_sgl(sk); 465 if (err) 466 goto unlock; 467 468 ctx->merge = 0; 469 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 470 471 if (sgl->cur) 472 sg_unmark_end(sgl->sg + sgl->cur - 1); 473 474 sg_mark_end(sgl->sg + sgl->cur); 475 get_page(page); 476 sg_set_page(sgl->sg + sgl->cur, page, size, offset); 477 sgl->cur++; 478 ctx->used += size; 479 480done: 481 ctx->more = flags & MSG_MORE; 482 483unlock: 484 skcipher_data_wakeup(sk); 485 release_sock(sk); 486 487 return err ?: size; 488} 489 490static int skcipher_all_sg_nents(struct skcipher_ctx *ctx) 491{ 492 struct skcipher_sg_list *sgl; 493 struct scatterlist *sg; 494 int nents = 0; 495 496 list_for_each_entry(sgl, &ctx->tsgl, list) { 497 sg = sgl->sg; 498 499 while (!sg->length) 500 sg++; 501 502 nents += sg_nents(sg); 503 } 504 return nents; 505} 506 507static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, 508 int flags) 509{ 510 struct sock *sk = sock->sk; 511 struct alg_sock *ask = alg_sk(sk); 512 struct skcipher_ctx *ctx = ask->private; 513 struct skcipher_sg_list *sgl; 514 struct scatterlist *sg; 515 struct skcipher_async_req *sreq; 516 struct ablkcipher_request *req; 517 struct skcipher_async_rsgl *last_rsgl = NULL; 518 unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx); 519 unsigned int reqlen = sizeof(struct skcipher_async_req) + 520 GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx); 521 int err = -ENOMEM; 522 bool mark = false; 523 524 lock_sock(sk); 525 req = kmalloc(reqlen, GFP_KERNEL); 526 if (unlikely(!req)) 527 goto unlock; 528 529 sreq = GET_SREQ(req, ctx); 530 sreq->iocb = msg->msg_iocb; 531 memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl)); 532 INIT_LIST_HEAD(&sreq->list); 533 sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL); 534 if (unlikely(!sreq->tsg)) { 535 kfree(req); 536 goto unlock; 537 } 538 sg_init_table(sreq->tsg, tx_nents); 539 memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx)); 540 ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req)); 541 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 542 skcipher_async_cb, sk); 543 544 while (iov_iter_count(&msg->msg_iter)) { 545 struct skcipher_async_rsgl *rsgl; 546 int used; 547 548 if (!ctx->used) { 549 err = skcipher_wait_for_data(sk, flags); 550 if (err) 551 goto free; 552 } 553 sgl = list_first_entry(&ctx->tsgl, 554 struct skcipher_sg_list, list); 555 sg = sgl->sg; 556 557 while (!sg->length) 558 sg++; 559 560 used = min_t(unsigned long, ctx->used, 561 iov_iter_count(&msg->msg_iter)); 562 used = min_t(unsigned long, used, sg->length); 563 564 if (txbufs == tx_nents) { 565 struct scatterlist *tmp; 566 int x; 567 /* Ran out of tx slots in async request 568 * need to expand */ 569 tmp = kcalloc(tx_nents * 2, sizeof(*tmp), 570 GFP_KERNEL); 571 if (!tmp) 572 goto free; 573 574 sg_init_table(tmp, tx_nents * 2); 575 for (x = 0; x < tx_nents; x++) 576 sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]), 577 sreq->tsg[x].length, 578 sreq->tsg[x].offset); 579 kfree(sreq->tsg); 580 sreq->tsg = tmp; 581 tx_nents *= 2; 582 mark = true; 583 } 584 /* Need to take over the tx sgl from ctx 585 * to the asynch req - these sgls will be freed later */ 586 sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length, 587 sg->offset); 588 589 if (list_empty(&sreq->list)) { 590 rsgl = &sreq->first_sgl; 591 list_add_tail(&rsgl->list, &sreq->list); 592 } else { 593 rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL); 594 if (!rsgl) { 595 err = -ENOMEM; 596 goto free; 597 } 598 list_add_tail(&rsgl->list, &sreq->list); 599 } 600 601 used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used); 602 err = used; 603 if (used < 0) 604 goto free; 605 if (last_rsgl) 606 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); 607 608 last_rsgl = rsgl; 609 len += used; 610 skcipher_pull_sgl(sk, used, 0); 611 iov_iter_advance(&msg->msg_iter, used); 612 } 613 614 if (mark) 615 sg_mark_end(sreq->tsg + txbufs - 1); 616 617 ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, 618 len, sreq->iv); 619 err = ctx->enc ? crypto_ablkcipher_encrypt(req) : 620 crypto_ablkcipher_decrypt(req); 621 if (err == -EINPROGRESS) { 622 atomic_inc(&ctx->inflight); 623 err = -EIOCBQUEUED; 624 goto unlock; 625 } 626free: 627 skcipher_free_async_sgls(sreq); 628 kfree(req); 629unlock: 630 skcipher_wmem_wakeup(sk); 631 release_sock(sk); 632 return err; 633} 634 635static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, 636 int flags) 637{ 638 struct sock *sk = sock->sk; 639 struct alg_sock *ask = alg_sk(sk); 640 struct skcipher_ctx *ctx = ask->private; 641 unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm( 642 &ctx->req)); 643 struct skcipher_sg_list *sgl; 644 struct scatterlist *sg; 645 int err = -EAGAIN; 646 int used; 647 long copied = 0; 648 649 lock_sock(sk); 650 while (msg_data_left(msg)) { 651 sgl = list_first_entry(&ctx->tsgl, 652 struct skcipher_sg_list, list); 653 sg = sgl->sg; 654 655 while (!sg->length) 656 sg++; 657 658 if (!ctx->used) { 659 err = skcipher_wait_for_data(sk, flags); 660 if (err) 661 goto unlock; 662 } 663 664 used = min_t(unsigned long, ctx->used, msg_data_left(msg)); 665 666 used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used); 667 err = used; 668 if (err < 0) 669 goto unlock; 670 671 if (ctx->more || used < ctx->used) 672 used -= used % bs; 673 674 err = -EINVAL; 675 if (!used) 676 goto free; 677 678 ablkcipher_request_set_crypt(&ctx->req, sg, 679 ctx->rsgl.sg, used, 680 ctx->iv); 681 682 err = af_alg_wait_for_completion( 683 ctx->enc ? 684 crypto_ablkcipher_encrypt(&ctx->req) : 685 crypto_ablkcipher_decrypt(&ctx->req), 686 &ctx->completion); 687 688free: 689 af_alg_free_sg(&ctx->rsgl); 690 691 if (err) 692 goto unlock; 693 694 copied += used; 695 skcipher_pull_sgl(sk, used, 1); 696 iov_iter_advance(&msg->msg_iter, used); 697 } 698 699 err = 0; 700 701unlock: 702 skcipher_wmem_wakeup(sk); 703 release_sock(sk); 704 705 return copied ?: err; 706} 707 708static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg, 709 size_t ignored, int flags) 710{ 711 return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? 712 skcipher_recvmsg_async(sock, msg, flags) : 713 skcipher_recvmsg_sync(sock, msg, flags); 714} 715 716static unsigned int skcipher_poll(struct file *file, struct socket *sock, 717 poll_table *wait) 718{ 719 struct sock *sk = sock->sk; 720 struct alg_sock *ask = alg_sk(sk); 721 struct skcipher_ctx *ctx = ask->private; 722 unsigned int mask; 723 724 sock_poll_wait(file, sk_sleep(sk), wait); 725 mask = 0; 726 727 if (ctx->used) 728 mask |= POLLIN | POLLRDNORM; 729 730 if (skcipher_writable(sk)) 731 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 732 733 return mask; 734} 735 736static struct proto_ops algif_skcipher_ops = { 737 .family = PF_ALG, 738 739 .connect = sock_no_connect, 740 .socketpair = sock_no_socketpair, 741 .getname = sock_no_getname, 742 .ioctl = sock_no_ioctl, 743 .listen = sock_no_listen, 744 .shutdown = sock_no_shutdown, 745 .getsockopt = sock_no_getsockopt, 746 .mmap = sock_no_mmap, 747 .bind = sock_no_bind, 748 .accept = sock_no_accept, 749 .setsockopt = sock_no_setsockopt, 750 751 .release = af_alg_release, 752 .sendmsg = skcipher_sendmsg, 753 .sendpage = skcipher_sendpage, 754 .recvmsg = skcipher_recvmsg, 755 .poll = skcipher_poll, 756}; 757 758static int skcipher_check_key(struct socket *sock) 759{ 760 int err = 0; 761 struct sock *psk; 762 struct alg_sock *pask; 763 struct skcipher_tfm *tfm; 764 struct sock *sk = sock->sk; 765 struct alg_sock *ask = alg_sk(sk); 766 767 lock_sock(sk); 768 if (ask->refcnt) 769 goto unlock_child; 770 771 psk = ask->parent; 772 pask = alg_sk(ask->parent); 773 tfm = pask->private; 774 775 err = -ENOKEY; 776 lock_sock_nested(psk, SINGLE_DEPTH_NESTING); 777 if (!tfm->has_key) 778 goto unlock; 779 780 if (!pask->refcnt++) 781 sock_hold(psk); 782 783 ask->refcnt = 1; 784 sock_put(psk); 785 786 err = 0; 787 788unlock: 789 release_sock(psk); 790unlock_child: 791 release_sock(sk); 792 793 return err; 794} 795 796static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg, 797 size_t size) 798{ 799 int err; 800 801 err = skcipher_check_key(sock); 802 if (err) 803 return err; 804 805 return skcipher_sendmsg(sock, msg, size); 806} 807 808static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page, 809 int offset, size_t size, int flags) 810{ 811 int err; 812 813 err = skcipher_check_key(sock); 814 if (err) 815 return err; 816 817 return skcipher_sendpage(sock, page, offset, size, flags); 818} 819 820static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg, 821 size_t ignored, int flags) 822{ 823 int err; 824 825 err = skcipher_check_key(sock); 826 if (err) 827 return err; 828 829 return skcipher_recvmsg(sock, msg, ignored, flags); 830} 831 832static struct proto_ops algif_skcipher_ops_nokey = { 833 .family = PF_ALG, 834 835 .connect = sock_no_connect, 836 .socketpair = sock_no_socketpair, 837 .getname = sock_no_getname, 838 .ioctl = sock_no_ioctl, 839 .listen = sock_no_listen, 840 .shutdown = sock_no_shutdown, 841 .getsockopt = sock_no_getsockopt, 842 .mmap = sock_no_mmap, 843 .bind = sock_no_bind, 844 .accept = sock_no_accept, 845 .setsockopt = sock_no_setsockopt, 846 847 .release = af_alg_release, 848 .sendmsg = skcipher_sendmsg_nokey, 849 .sendpage = skcipher_sendpage_nokey, 850 .recvmsg = skcipher_recvmsg_nokey, 851 .poll = skcipher_poll, 852}; 853 854static void *skcipher_bind(const char *name, u32 type, u32 mask) 855{ 856 struct skcipher_tfm *tfm; 857 struct crypto_ablkcipher *skcipher; 858 859 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); 860 if (!tfm) 861 return ERR_PTR(-ENOMEM); 862 863 skcipher = crypto_alloc_ablkcipher(name, type, mask); 864 if (IS_ERR(skcipher)) { 865 kfree(tfm); 866 return ERR_CAST(skcipher); 867 } 868 869 tfm->skcipher = skcipher; 870 871 return tfm; 872} 873 874static void skcipher_release(void *private) 875{ 876 struct skcipher_tfm *tfm = private; 877 878 crypto_free_ablkcipher(tfm->skcipher); 879 kfree(tfm); 880} 881 882static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) 883{ 884 struct skcipher_tfm *tfm = private; 885 int err; 886 887 err = crypto_ablkcipher_setkey(tfm->skcipher, key, keylen); 888 tfm->has_key = !err; 889 890 return err; 891} 892 893static void skcipher_wait(struct sock *sk) 894{ 895 struct alg_sock *ask = alg_sk(sk); 896 struct skcipher_ctx *ctx = ask->private; 897 int ctr = 0; 898 899 while (atomic_read(&ctx->inflight) && ctr++ < 100) 900 msleep(100); 901} 902 903static void skcipher_sock_destruct(struct sock *sk) 904{ 905 struct alg_sock *ask = alg_sk(sk); 906 struct skcipher_ctx *ctx = ask->private; 907 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); 908 909 if (atomic_read(&ctx->inflight)) 910 skcipher_wait(sk); 911 912 skcipher_free_sgl(sk); 913 sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm)); 914 sock_kfree_s(sk, ctx, ctx->len); 915 af_alg_release_parent(sk); 916} 917 918static int skcipher_accept_parent_nokey(void *private, struct sock *sk) 919{ 920 struct skcipher_ctx *ctx; 921 struct alg_sock *ask = alg_sk(sk); 922 struct skcipher_tfm *tfm = private; 923 struct crypto_ablkcipher *skcipher = tfm->skcipher; 924 unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(skcipher); 925 926 ctx = sock_kmalloc(sk, len, GFP_KERNEL); 927 if (!ctx) 928 return -ENOMEM; 929 930 ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(skcipher), 931 GFP_KERNEL); 932 if (!ctx->iv) { 933 sock_kfree_s(sk, ctx, len); 934 return -ENOMEM; 935 } 936 937 memset(ctx->iv, 0, crypto_ablkcipher_ivsize(skcipher)); 938 939 INIT_LIST_HEAD(&ctx->tsgl); 940 ctx->len = len; 941 ctx->used = 0; 942 ctx->more = 0; 943 ctx->merge = 0; 944 ctx->enc = 0; 945 atomic_set(&ctx->inflight, 0); 946 af_alg_init_completion(&ctx->completion); 947 948 ask->private = ctx; 949 950 ablkcipher_request_set_tfm(&ctx->req, skcipher); 951 ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, 952 af_alg_complete, &ctx->completion); 953 954 sk->sk_destruct = skcipher_sock_destruct; 955 956 return 0; 957} 958 959static int skcipher_accept_parent(void *private, struct sock *sk) 960{ 961 struct skcipher_tfm *tfm = private; 962 963 if (!tfm->has_key) 964 return -ENOKEY; 965 966 return skcipher_accept_parent_nokey(private, sk); 967} 968 969static const struct af_alg_type algif_type_skcipher = { 970 .bind = skcipher_bind, 971 .release = skcipher_release, 972 .setkey = skcipher_setkey, 973 .accept = skcipher_accept_parent, 974 .accept_nokey = skcipher_accept_parent_nokey, 975 .ops = &algif_skcipher_ops, 976 .ops_nokey = &algif_skcipher_ops_nokey, 977 .name = "skcipher", 978 .owner = THIS_MODULE 979}; 980 981static int __init algif_skcipher_init(void) 982{ 983 return af_alg_register_type(&algif_type_skcipher); 984} 985 986static void __exit algif_skcipher_exit(void) 987{ 988 int err = af_alg_unregister_type(&algif_type_skcipher); 989 BUG_ON(err); 990} 991 992module_init(algif_skcipher_init); 993module_exit(algif_skcipher_exit); 994MODULE_LICENSE("GPL"); 995