This source file includes following definitions.
- any_tfm
- any_tfm_aead
- crypt_iv_plain_gen
- crypt_iv_plain64_gen
- crypt_iv_plain64be_gen
- crypt_iv_essiv_gen
- crypt_iv_benbi_ctr
- crypt_iv_benbi_dtr
- crypt_iv_benbi_gen
- crypt_iv_null_gen
- crypt_iv_lmk_dtr
- crypt_iv_lmk_ctr
- crypt_iv_lmk_init
- crypt_iv_lmk_wipe
- crypt_iv_lmk_one
- crypt_iv_lmk_gen
- crypt_iv_lmk_post
- crypt_iv_tcw_dtr
- crypt_iv_tcw_ctr
- crypt_iv_tcw_init
- crypt_iv_tcw_wipe
- crypt_iv_tcw_whitening
- crypt_iv_tcw_gen
- crypt_iv_tcw_post
- crypt_iv_random_gen
- crypt_iv_eboiv_ctr
- crypt_iv_eboiv_gen
- crypt_integrity_aead
- crypt_integrity_hmac
- crypt_get_sg_data
- dm_crypt_integrity_io_alloc
- crypt_integrity_ctr
- crypt_convert_init
- dmreq_of_req
- req_of_dmreq
- iv_of_dmreq
- org_iv_of_dmreq
- org_sector_of_dmreq
- org_tag_of_dmreq
- tag_from_dmreq
- iv_tag_from_dmreq
- crypt_convert_block_aead
- crypt_convert_block_skcipher
- crypt_alloc_req_skcipher
- crypt_alloc_req_aead
- crypt_alloc_req
- crypt_free_req_skcipher
- crypt_free_req_aead
- crypt_free_req
- crypt_convert
- crypt_alloc_buffer
- crypt_free_buffer_pages
- crypt_io_init
- crypt_inc_pending
- crypt_dec_pending
- crypt_endio
- clone_init
- kcryptd_io_read
- kcryptd_io_read_work
- kcryptd_queue_read
- kcryptd_io_write
- dmcrypt_write
- kcryptd_crypt_write_io_submit
- kcryptd_crypt_write_convert
- kcryptd_crypt_read_done
- kcryptd_crypt_read_convert
- kcryptd_async_done
- kcryptd_crypt
- kcryptd_queue_crypt
- crypt_free_tfms_aead
- crypt_free_tfms_skcipher
- crypt_free_tfms
- crypt_alloc_tfms_skcipher
- crypt_alloc_tfms_aead
- crypt_alloc_tfms
- crypt_subkey_size
- crypt_authenckey_size
- crypt_copy_authenckey
- crypt_setkey
- contains_whitespace
- crypt_set_keyring_key
- get_key_size
- crypt_set_keyring_key
- get_key_size
- crypt_set_key
- crypt_wipe_key
- crypt_calculate_pages_per_client
- crypt_page_alloc
- crypt_page_free
- crypt_dtr
- crypt_ctr_ivmode
- crypt_ctr_auth_cipher
- crypt_ctr_cipher_new
- crypt_ctr_cipher_old
- crypt_ctr_cipher
- crypt_ctr_optional
- crypt_ctr
- crypt_map
- crypt_status
- crypt_postsuspend
- crypt_preresume
- crypt_resume
- crypt_message
- crypt_iterate_devices
- crypt_io_hints
- dm_crypt_init
- dm_crypt_exit
1
2
3
4
5
6
7
8
9
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/key.h>
16 #include <linux/bio.h>
17 #include <linux/blkdev.h>
18 #include <linux/mempool.h>
19 #include <linux/slab.h>
20 #include <linux/crypto.h>
21 #include <linux/workqueue.h>
22 #include <linux/kthread.h>
23 #include <linux/backing-dev.h>
24 #include <linux/atomic.h>
25 #include <linux/scatterlist.h>
26 #include <linux/rbtree.h>
27 #include <linux/ctype.h>
28 #include <asm/page.h>
29 #include <asm/unaligned.h>
30 #include <crypto/hash.h>
31 #include <crypto/md5.h>
32 #include <crypto/algapi.h>
33 #include <crypto/skcipher.h>
34 #include <crypto/aead.h>
35 #include <crypto/authenc.h>
36 #include <linux/rtnetlink.h>
37 #include <keys/user-type.h>
38
39 #include <linux/device-mapper.h>
40
41 #define DM_MSG_PREFIX "crypt"
42
43
44
45
46 struct convert_context {
47 struct completion restart;
48 struct bio *bio_in;
49 struct bio *bio_out;
50 struct bvec_iter iter_in;
51 struct bvec_iter iter_out;
52 u64 cc_sector;
53 atomic_t cc_pending;
54 union {
55 struct skcipher_request *req;
56 struct aead_request *req_aead;
57 } r;
58
59 };
60
61
62
63
64 struct dm_crypt_io {
65 struct crypt_config *cc;
66 struct bio *base_bio;
67 u8 *integrity_metadata;
68 bool integrity_metadata_from_pool;
69 struct work_struct work;
70
71 struct convert_context ctx;
72
73 atomic_t io_pending;
74 blk_status_t error;
75 sector_t sector;
76
77 struct rb_node rb_node;
78 } CRYPTO_MINALIGN_ATTR;
79
80 struct dm_crypt_request {
81 struct convert_context *ctx;
82 struct scatterlist sg_in[4];
83 struct scatterlist sg_out[4];
84 u64 iv_sector;
85 };
86
87 struct crypt_config;
88
89 struct crypt_iv_operations {
90 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
91 const char *opts);
92 void (*dtr)(struct crypt_config *cc);
93 int (*init)(struct crypt_config *cc);
94 int (*wipe)(struct crypt_config *cc);
95 int (*generator)(struct crypt_config *cc, u8 *iv,
96 struct dm_crypt_request *dmreq);
97 int (*post)(struct crypt_config *cc, u8 *iv,
98 struct dm_crypt_request *dmreq);
99 };
100
101 struct iv_benbi_private {
102 int shift;
103 };
104
105 #define LMK_SEED_SIZE 64
106 struct iv_lmk_private {
107 struct crypto_shash *hash_tfm;
108 u8 *seed;
109 };
110
111 #define TCW_WHITENING_SIZE 16
112 struct iv_tcw_private {
113 struct crypto_shash *crc32_tfm;
114 u8 *iv_seed;
115 u8 *whitening;
116 };
117
118
119
120
121
122 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
123 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
124
125 enum cipher_flags {
126 CRYPT_MODE_INTEGRITY_AEAD,
127 CRYPT_IV_LARGE_SECTORS,
128 };
129
130
131
132
133 struct crypt_config {
134 struct dm_dev *dev;
135 sector_t start;
136
137 struct percpu_counter n_allocated_pages;
138
139 struct workqueue_struct *io_queue;
140 struct workqueue_struct *crypt_queue;
141
142 spinlock_t write_thread_lock;
143 struct task_struct *write_thread;
144 struct rb_root write_tree;
145
146 char *cipher_string;
147 char *cipher_auth;
148 char *key_string;
149
150 const struct crypt_iv_operations *iv_gen_ops;
151 union {
152 struct iv_benbi_private benbi;
153 struct iv_lmk_private lmk;
154 struct iv_tcw_private tcw;
155 } iv_gen_private;
156 u64 iv_offset;
157 unsigned int iv_size;
158 unsigned short int sector_size;
159 unsigned char sector_shift;
160
161 union {
162 struct crypto_skcipher **tfms;
163 struct crypto_aead **tfms_aead;
164 } cipher_tfm;
165 unsigned tfms_count;
166 unsigned long cipher_flags;
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181 unsigned int dmreq_start;
182
183 unsigned int per_bio_data_size;
184
185 unsigned long flags;
186 unsigned int key_size;
187 unsigned int key_parts;
188 unsigned int key_extra_size;
189 unsigned int key_mac_size;
190
191 unsigned int integrity_tag_size;
192 unsigned int integrity_iv_size;
193 unsigned int on_disk_tag_size;
194
195
196
197
198
199 unsigned tag_pool_max_sectors;
200 mempool_t tag_pool;
201 mempool_t req_pool;
202 mempool_t page_pool;
203
204 struct bio_set bs;
205 struct mutex bio_alloc_lock;
206
207 u8 *authenc_key;
208 u8 key[0];
209 };
210
211 #define MIN_IOS 64
212 #define MAX_TAG_SIZE 480
213 #define POOL_ENTRY_SIZE 512
214
215 static DEFINE_SPINLOCK(dm_crypt_clients_lock);
216 static unsigned dm_crypt_clients_n = 0;
217 static volatile unsigned long dm_crypt_pages_per_client;
218 #define DM_CRYPT_MEMORY_PERCENT 2
219 #define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_PAGES * 16)
220
221 static void clone_init(struct dm_crypt_io *, struct bio *);
222 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
223 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
224 struct scatterlist *sg);
225
226
227
228
229 static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
230 {
231 return cc->cipher_tfm.tfms[0];
232 }
233
234 static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
235 {
236 return cc->cipher_tfm.tfms_aead[0];
237 }
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
291 struct dm_crypt_request *dmreq)
292 {
293 memset(iv, 0, cc->iv_size);
294 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
295
296 return 0;
297 }
298
299 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
300 struct dm_crypt_request *dmreq)
301 {
302 memset(iv, 0, cc->iv_size);
303 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
304
305 return 0;
306 }
307
308 static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
309 struct dm_crypt_request *dmreq)
310 {
311 memset(iv, 0, cc->iv_size);
312
313 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
314
315 return 0;
316 }
317
318 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
319 struct dm_crypt_request *dmreq)
320 {
321
322
323
324
325 memset(iv, 0, cc->iv_size);
326 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
327
328 return 0;
329 }
330
331 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
332 const char *opts)
333 {
334 unsigned bs;
335 int log;
336
337 if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags))
338 bs = crypto_aead_blocksize(any_tfm_aead(cc));
339 else
340 bs = crypto_skcipher_blocksize(any_tfm(cc));
341 log = ilog2(bs);
342
343
344
345
346 if (1 << log != bs) {
347 ti->error = "cypher blocksize is not a power of 2";
348 return -EINVAL;
349 }
350
351 if (log > 9) {
352 ti->error = "cypher blocksize is > 512";
353 return -EINVAL;
354 }
355
356 cc->iv_gen_private.benbi.shift = 9 - log;
357
358 return 0;
359 }
360
361 static void crypt_iv_benbi_dtr(struct crypt_config *cc)
362 {
363 }
364
365 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
366 struct dm_crypt_request *dmreq)
367 {
368 __be64 val;
369
370 memset(iv, 0, cc->iv_size - sizeof(u64));
371
372 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
373 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
374
375 return 0;
376 }
377
378 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
379 struct dm_crypt_request *dmreq)
380 {
381 memset(iv, 0, cc->iv_size);
382
383 return 0;
384 }
385
386 static void crypt_iv_lmk_dtr(struct crypt_config *cc)
387 {
388 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
389
390 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
391 crypto_free_shash(lmk->hash_tfm);
392 lmk->hash_tfm = NULL;
393
394 kzfree(lmk->seed);
395 lmk->seed = NULL;
396 }
397
398 static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
399 const char *opts)
400 {
401 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
402
403 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
404 ti->error = "Unsupported sector size for LMK";
405 return -EINVAL;
406 }
407
408 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
409 if (IS_ERR(lmk->hash_tfm)) {
410 ti->error = "Error initializing LMK hash";
411 return PTR_ERR(lmk->hash_tfm);
412 }
413
414
415 if (cc->key_parts == cc->tfms_count) {
416 lmk->seed = NULL;
417 return 0;
418 }
419
420 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
421 if (!lmk->seed) {
422 crypt_iv_lmk_dtr(cc);
423 ti->error = "Error kmallocing seed storage in LMK";
424 return -ENOMEM;
425 }
426
427 return 0;
428 }
429
430 static int crypt_iv_lmk_init(struct crypt_config *cc)
431 {
432 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
433 int subkey_size = cc->key_size / cc->key_parts;
434
435
436 if (lmk->seed)
437 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
438 crypto_shash_digestsize(lmk->hash_tfm));
439
440 return 0;
441 }
442
443 static int crypt_iv_lmk_wipe(struct crypt_config *cc)
444 {
445 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
446
447 if (lmk->seed)
448 memset(lmk->seed, 0, LMK_SEED_SIZE);
449
450 return 0;
451 }
452
453 static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
454 struct dm_crypt_request *dmreq,
455 u8 *data)
456 {
457 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
458 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
459 struct md5_state md5state;
460 __le32 buf[4];
461 int i, r;
462
463 desc->tfm = lmk->hash_tfm;
464
465 r = crypto_shash_init(desc);
466 if (r)
467 return r;
468
469 if (lmk->seed) {
470 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
471 if (r)
472 return r;
473 }
474
475
476 r = crypto_shash_update(desc, data + 16, 16 * 31);
477 if (r)
478 return r;
479
480
481 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
482 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
483 buf[2] = cpu_to_le32(4024);
484 buf[3] = 0;
485 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
486 if (r)
487 return r;
488
489
490 r = crypto_shash_export(desc, &md5state);
491 if (r)
492 return r;
493
494 for (i = 0; i < MD5_HASH_WORDS; i++)
495 __cpu_to_le32s(&md5state.hash[i]);
496 memcpy(iv, &md5state.hash, cc->iv_size);
497
498 return 0;
499 }
500
501 static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
502 struct dm_crypt_request *dmreq)
503 {
504 struct scatterlist *sg;
505 u8 *src;
506 int r = 0;
507
508 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
509 sg = crypt_get_sg_data(cc, dmreq->sg_in);
510 src = kmap_atomic(sg_page(sg));
511 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
512 kunmap_atomic(src);
513 } else
514 memset(iv, 0, cc->iv_size);
515
516 return r;
517 }
518
519 static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
520 struct dm_crypt_request *dmreq)
521 {
522 struct scatterlist *sg;
523 u8 *dst;
524 int r;
525
526 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
527 return 0;
528
529 sg = crypt_get_sg_data(cc, dmreq->sg_out);
530 dst = kmap_atomic(sg_page(sg));
531 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
532
533
534 if (!r)
535 crypto_xor(dst + sg->offset, iv, cc->iv_size);
536
537 kunmap_atomic(dst);
538 return r;
539 }
540
541 static void crypt_iv_tcw_dtr(struct crypt_config *cc)
542 {
543 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
544
545 kzfree(tcw->iv_seed);
546 tcw->iv_seed = NULL;
547 kzfree(tcw->whitening);
548 tcw->whitening = NULL;
549
550 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
551 crypto_free_shash(tcw->crc32_tfm);
552 tcw->crc32_tfm = NULL;
553 }
554
555 static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
556 const char *opts)
557 {
558 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
559
560 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
561 ti->error = "Unsupported sector size for TCW";
562 return -EINVAL;
563 }
564
565 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
566 ti->error = "Wrong key size for TCW";
567 return -EINVAL;
568 }
569
570 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
571 if (IS_ERR(tcw->crc32_tfm)) {
572 ti->error = "Error initializing CRC32 in TCW";
573 return PTR_ERR(tcw->crc32_tfm);
574 }
575
576 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
577 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
578 if (!tcw->iv_seed || !tcw->whitening) {
579 crypt_iv_tcw_dtr(cc);
580 ti->error = "Error allocating seed storage in TCW";
581 return -ENOMEM;
582 }
583
584 return 0;
585 }
586
587 static int crypt_iv_tcw_init(struct crypt_config *cc)
588 {
589 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
590 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
591
592 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
593 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
594 TCW_WHITENING_SIZE);
595
596 return 0;
597 }
598
599 static int crypt_iv_tcw_wipe(struct crypt_config *cc)
600 {
601 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
602
603 memset(tcw->iv_seed, 0, cc->iv_size);
604 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
605
606 return 0;
607 }
608
609 static int crypt_iv_tcw_whitening(struct crypt_config *cc,
610 struct dm_crypt_request *dmreq,
611 u8 *data)
612 {
613 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
614 __le64 sector = cpu_to_le64(dmreq->iv_sector);
615 u8 buf[TCW_WHITENING_SIZE];
616 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
617 int i, r;
618
619
620 crypto_xor_cpy(buf, tcw->whitening, (u8 *)§or, 8);
621 crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)§or, 8);
622
623
624 desc->tfm = tcw->crc32_tfm;
625 for (i = 0; i < 4; i++) {
626 r = crypto_shash_init(desc);
627 if (r)
628 goto out;
629 r = crypto_shash_update(desc, &buf[i * 4], 4);
630 if (r)
631 goto out;
632 r = crypto_shash_final(desc, &buf[i * 4]);
633 if (r)
634 goto out;
635 }
636 crypto_xor(&buf[0], &buf[12], 4);
637 crypto_xor(&buf[4], &buf[8], 4);
638
639
640 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
641 crypto_xor(data + i * 8, buf, 8);
642 out:
643 memzero_explicit(buf, sizeof(buf));
644 return r;
645 }
646
647 static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
648 struct dm_crypt_request *dmreq)
649 {
650 struct scatterlist *sg;
651 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
652 __le64 sector = cpu_to_le64(dmreq->iv_sector);
653 u8 *src;
654 int r = 0;
655
656
657 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
658 sg = crypt_get_sg_data(cc, dmreq->sg_in);
659 src = kmap_atomic(sg_page(sg));
660 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
661 kunmap_atomic(src);
662 }
663
664
665 crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)§or, 8);
666 if (cc->iv_size > 8)
667 crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or,
668 cc->iv_size - 8);
669
670 return r;
671 }
672
673 static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
674 struct dm_crypt_request *dmreq)
675 {
676 struct scatterlist *sg;
677 u8 *dst;
678 int r;
679
680 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
681 return 0;
682
683
684 sg = crypt_get_sg_data(cc, dmreq->sg_out);
685 dst = kmap_atomic(sg_page(sg));
686 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
687 kunmap_atomic(dst);
688
689 return r;
690 }
691
692 static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
693 struct dm_crypt_request *dmreq)
694 {
695
696 get_random_bytes(iv, cc->iv_size);
697 return 0;
698 }
699
700 static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
701 const char *opts)
702 {
703 if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags)) {
704 ti->error = "AEAD transforms not supported for EBOIV";
705 return -EINVAL;
706 }
707
708 if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
709 ti->error = "Block size of EBOIV cipher does "
710 "not match IV size of block cipher";
711 return -EINVAL;
712 }
713
714 return 0;
715 }
716
717 static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
718 struct dm_crypt_request *dmreq)
719 {
720 u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
721 struct skcipher_request *req;
722 struct scatterlist src, dst;
723 struct crypto_wait wait;
724 int err;
725
726 req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
727 if (!req)
728 return -ENOMEM;
729
730 memset(buf, 0, cc->iv_size);
731 *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
732
733 sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
734 sg_init_one(&dst, iv, cc->iv_size);
735 skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
736 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
737 err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
738 skcipher_request_free(req);
739
740 return err;
741 }
742
743 static const struct crypt_iv_operations crypt_iv_plain_ops = {
744 .generator = crypt_iv_plain_gen
745 };
746
747 static const struct crypt_iv_operations crypt_iv_plain64_ops = {
748 .generator = crypt_iv_plain64_gen
749 };
750
751 static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
752 .generator = crypt_iv_plain64be_gen
753 };
754
755 static const struct crypt_iv_operations crypt_iv_essiv_ops = {
756 .generator = crypt_iv_essiv_gen
757 };
758
759 static const struct crypt_iv_operations crypt_iv_benbi_ops = {
760 .ctr = crypt_iv_benbi_ctr,
761 .dtr = crypt_iv_benbi_dtr,
762 .generator = crypt_iv_benbi_gen
763 };
764
765 static const struct crypt_iv_operations crypt_iv_null_ops = {
766 .generator = crypt_iv_null_gen
767 };
768
769 static const struct crypt_iv_operations crypt_iv_lmk_ops = {
770 .ctr = crypt_iv_lmk_ctr,
771 .dtr = crypt_iv_lmk_dtr,
772 .init = crypt_iv_lmk_init,
773 .wipe = crypt_iv_lmk_wipe,
774 .generator = crypt_iv_lmk_gen,
775 .post = crypt_iv_lmk_post
776 };
777
778 static const struct crypt_iv_operations crypt_iv_tcw_ops = {
779 .ctr = crypt_iv_tcw_ctr,
780 .dtr = crypt_iv_tcw_dtr,
781 .init = crypt_iv_tcw_init,
782 .wipe = crypt_iv_tcw_wipe,
783 .generator = crypt_iv_tcw_gen,
784 .post = crypt_iv_tcw_post
785 };
786
787 static struct crypt_iv_operations crypt_iv_random_ops = {
788 .generator = crypt_iv_random_gen
789 };
790
791 static struct crypt_iv_operations crypt_iv_eboiv_ops = {
792 .ctr = crypt_iv_eboiv_ctr,
793 .generator = crypt_iv_eboiv_gen
794 };
795
796
797
798
799 static bool crypt_integrity_aead(struct crypt_config *cc)
800 {
801 return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
802 }
803
804 static bool crypt_integrity_hmac(struct crypt_config *cc)
805 {
806 return crypt_integrity_aead(cc) && cc->key_mac_size;
807 }
808
809
810 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
811 struct scatterlist *sg)
812 {
813 if (unlikely(crypt_integrity_aead(cc)))
814 return &sg[2];
815
816 return sg;
817 }
818
819 static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
820 {
821 struct bio_integrity_payload *bip;
822 unsigned int tag_len;
823 int ret;
824
825 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
826 return 0;
827
828 bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
829 if (IS_ERR(bip))
830 return PTR_ERR(bip);
831
832 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
833
834 bip->bip_iter.bi_size = tag_len;
835 bip->bip_iter.bi_sector = io->cc->start + io->sector;
836
837 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
838 tag_len, offset_in_page(io->integrity_metadata));
839 if (unlikely(ret != tag_len))
840 return -ENOMEM;
841
842 return 0;
843 }
844
845 static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
846 {
847 #ifdef CONFIG_BLK_DEV_INTEGRITY
848 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
849 struct mapped_device *md = dm_table_get_md(ti->table);
850
851
852 if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
853 ti->error = "Integrity profile not supported.";
854 return -EINVAL;
855 }
856
857 if (bi->tag_size != cc->on_disk_tag_size ||
858 bi->tuple_size != cc->on_disk_tag_size) {
859 ti->error = "Integrity profile tag size mismatch.";
860 return -EINVAL;
861 }
862 if (1 << bi->interval_exp != cc->sector_size) {
863 ti->error = "Integrity profile sector size mismatch.";
864 return -EINVAL;
865 }
866
867 if (crypt_integrity_aead(cc)) {
868 cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
869 DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
870 cc->integrity_tag_size, cc->integrity_iv_size);
871
872 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
873 ti->error = "Integrity AEAD auth tag size is not supported.";
874 return -EINVAL;
875 }
876 } else if (cc->integrity_iv_size)
877 DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
878 cc->integrity_iv_size);
879
880 if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
881 ti->error = "Not enough space for integrity tag in the profile.";
882 return -EINVAL;
883 }
884
885 return 0;
886 #else
887 ti->error = "Integrity profile not supported.";
888 return -EINVAL;
889 #endif
890 }
891
892 static void crypt_convert_init(struct crypt_config *cc,
893 struct convert_context *ctx,
894 struct bio *bio_out, struct bio *bio_in,
895 sector_t sector)
896 {
897 ctx->bio_in = bio_in;
898 ctx->bio_out = bio_out;
899 if (bio_in)
900 ctx->iter_in = bio_in->bi_iter;
901 if (bio_out)
902 ctx->iter_out = bio_out->bi_iter;
903 ctx->cc_sector = sector + cc->iv_offset;
904 init_completion(&ctx->restart);
905 }
906
907 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
908 void *req)
909 {
910 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
911 }
912
913 static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
914 {
915 return (void *)((char *)dmreq - cc->dmreq_start);
916 }
917
918 static u8 *iv_of_dmreq(struct crypt_config *cc,
919 struct dm_crypt_request *dmreq)
920 {
921 if (crypt_integrity_aead(cc))
922 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
923 crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
924 else
925 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
926 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
927 }
928
929 static u8 *org_iv_of_dmreq(struct crypt_config *cc,
930 struct dm_crypt_request *dmreq)
931 {
932 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
933 }
934
935 static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
936 struct dm_crypt_request *dmreq)
937 {
938 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
939 return (__le64 *) ptr;
940 }
941
942 static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
943 struct dm_crypt_request *dmreq)
944 {
945 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
946 cc->iv_size + sizeof(uint64_t);
947 return (unsigned int*)ptr;
948 }
949
950 static void *tag_from_dmreq(struct crypt_config *cc,
951 struct dm_crypt_request *dmreq)
952 {
953 struct convert_context *ctx = dmreq->ctx;
954 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
955
956 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
957 cc->on_disk_tag_size];
958 }
959
960 static void *iv_tag_from_dmreq(struct crypt_config *cc,
961 struct dm_crypt_request *dmreq)
962 {
963 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
964 }
965
966 static int crypt_convert_block_aead(struct crypt_config *cc,
967 struct convert_context *ctx,
968 struct aead_request *req,
969 unsigned int tag_offset)
970 {
971 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
972 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
973 struct dm_crypt_request *dmreq;
974 u8 *iv, *org_iv, *tag_iv, *tag;
975 __le64 *sector;
976 int r = 0;
977
978 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
979
980
981 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
982 return -EIO;
983
984 dmreq = dmreq_of_req(cc, req);
985 dmreq->iv_sector = ctx->cc_sector;
986 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
987 dmreq->iv_sector >>= cc->sector_shift;
988 dmreq->ctx = ctx;
989
990 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
991
992 sector = org_sector_of_dmreq(cc, dmreq);
993 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
994
995 iv = iv_of_dmreq(cc, dmreq);
996 org_iv = org_iv_of_dmreq(cc, dmreq);
997 tag = tag_from_dmreq(cc, dmreq);
998 tag_iv = iv_tag_from_dmreq(cc, dmreq);
999
1000
1001
1002
1003
1004
1005 sg_init_table(dmreq->sg_in, 4);
1006 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
1007 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
1008 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1009 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1010
1011 sg_init_table(dmreq->sg_out, 4);
1012 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
1013 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
1014 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1015 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1016
1017 if (cc->iv_gen_ops) {
1018
1019 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1020 memcpy(org_iv, tag_iv, cc->iv_size);
1021 } else {
1022 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1023 if (r < 0)
1024 return r;
1025
1026 if (cc->integrity_iv_size)
1027 memcpy(tag_iv, org_iv, cc->iv_size);
1028 }
1029
1030 memcpy(iv, org_iv, cc->iv_size);
1031 }
1032
1033 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1034 if (bio_data_dir(ctx->bio_in) == WRITE) {
1035 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1036 cc->sector_size, iv);
1037 r = crypto_aead_encrypt(req);
1038 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
1039 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1040 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1041 } else {
1042 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1043 cc->sector_size + cc->integrity_tag_size, iv);
1044 r = crypto_aead_decrypt(req);
1045 }
1046
1047 if (r == -EBADMSG) {
1048 char b[BDEVNAME_SIZE];
1049 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
1050 (unsigned long long)le64_to_cpu(*sector));
1051 }
1052
1053 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1054 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1055
1056 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1057 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1058
1059 return r;
1060 }
1061
1062 static int crypt_convert_block_skcipher(struct crypt_config *cc,
1063 struct convert_context *ctx,
1064 struct skcipher_request *req,
1065 unsigned int tag_offset)
1066 {
1067 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1068 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1069 struct scatterlist *sg_in, *sg_out;
1070 struct dm_crypt_request *dmreq;
1071 u8 *iv, *org_iv, *tag_iv;
1072 __le64 *sector;
1073 int r = 0;
1074
1075
1076 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1077 return -EIO;
1078
1079 dmreq = dmreq_of_req(cc, req);
1080 dmreq->iv_sector = ctx->cc_sector;
1081 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1082 dmreq->iv_sector >>= cc->sector_shift;
1083 dmreq->ctx = ctx;
1084
1085 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1086
1087 iv = iv_of_dmreq(cc, dmreq);
1088 org_iv = org_iv_of_dmreq(cc, dmreq);
1089 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1090
1091 sector = org_sector_of_dmreq(cc, dmreq);
1092 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1093
1094
1095 sg_in = &dmreq->sg_in[0];
1096 sg_out = &dmreq->sg_out[0];
1097
1098 sg_init_table(sg_in, 1);
1099 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1100
1101 sg_init_table(sg_out, 1);
1102 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1103
1104 if (cc->iv_gen_ops) {
1105
1106 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1107 memcpy(org_iv, tag_iv, cc->integrity_iv_size);
1108 } else {
1109 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1110 if (r < 0)
1111 return r;
1112
1113 if (cc->integrity_iv_size)
1114 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
1115 }
1116
1117 memcpy(iv, org_iv, cc->iv_size);
1118 }
1119
1120 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
1121
1122 if (bio_data_dir(ctx->bio_in) == WRITE)
1123 r = crypto_skcipher_encrypt(req);
1124 else
1125 r = crypto_skcipher_decrypt(req);
1126
1127 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1128 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1129
1130 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1131 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1132
1133 return r;
1134 }
1135
1136 static void kcryptd_async_done(struct crypto_async_request *async_req,
1137 int error);
1138
1139 static void crypt_alloc_req_skcipher(struct crypt_config *cc,
1140 struct convert_context *ctx)
1141 {
1142 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
1143
1144 if (!ctx->r.req)
1145 ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
1146
1147 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
1148
1149
1150
1151
1152
1153 skcipher_request_set_callback(ctx->r.req,
1154 CRYPTO_TFM_REQ_MAY_BACKLOG,
1155 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1156 }
1157
1158 static void crypt_alloc_req_aead(struct crypt_config *cc,
1159 struct convert_context *ctx)
1160 {
1161 if (!ctx->r.req_aead)
1162 ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
1163
1164 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
1165
1166
1167
1168
1169
1170 aead_request_set_callback(ctx->r.req_aead,
1171 CRYPTO_TFM_REQ_MAY_BACKLOG,
1172 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1173 }
1174
1175 static void crypt_alloc_req(struct crypt_config *cc,
1176 struct convert_context *ctx)
1177 {
1178 if (crypt_integrity_aead(cc))
1179 crypt_alloc_req_aead(cc, ctx);
1180 else
1181 crypt_alloc_req_skcipher(cc, ctx);
1182 }
1183
1184 static void crypt_free_req_skcipher(struct crypt_config *cc,
1185 struct skcipher_request *req, struct bio *base_bio)
1186 {
1187 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1188
1189 if ((struct skcipher_request *)(io + 1) != req)
1190 mempool_free(req, &cc->req_pool);
1191 }
1192
1193 static void crypt_free_req_aead(struct crypt_config *cc,
1194 struct aead_request *req, struct bio *base_bio)
1195 {
1196 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1197
1198 if ((struct aead_request *)(io + 1) != req)
1199 mempool_free(req, &cc->req_pool);
1200 }
1201
1202 static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1203 {
1204 if (crypt_integrity_aead(cc))
1205 crypt_free_req_aead(cc, req, base_bio);
1206 else
1207 crypt_free_req_skcipher(cc, req, base_bio);
1208 }
1209
1210
1211
1212
1213 static blk_status_t crypt_convert(struct crypt_config *cc,
1214 struct convert_context *ctx)
1215 {
1216 unsigned int tag_offset = 0;
1217 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
1218 int r;
1219
1220 atomic_set(&ctx->cc_pending, 1);
1221
1222 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
1223
1224 crypt_alloc_req(cc, ctx);
1225 atomic_inc(&ctx->cc_pending);
1226
1227 if (crypt_integrity_aead(cc))
1228 r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
1229 else
1230 r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
1231
1232 switch (r) {
1233
1234
1235
1236
1237 case -EBUSY:
1238 wait_for_completion(&ctx->restart);
1239 reinit_completion(&ctx->restart);
1240
1241
1242
1243
1244
1245 case -EINPROGRESS:
1246 ctx->r.req = NULL;
1247 ctx->cc_sector += sector_step;
1248 tag_offset++;
1249 continue;
1250
1251
1252
1253 case 0:
1254 atomic_dec(&ctx->cc_pending);
1255 ctx->cc_sector += sector_step;
1256 tag_offset++;
1257 cond_resched();
1258 continue;
1259
1260
1261
1262 case -EBADMSG:
1263 atomic_dec(&ctx->cc_pending);
1264 return BLK_STS_PROTECTION;
1265
1266
1267
1268 default:
1269 atomic_dec(&ctx->cc_pending);
1270 return BLK_STS_IOERR;
1271 }
1272 }
1273
1274 return 0;
1275 }
1276
1277 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
1297 {
1298 struct crypt_config *cc = io->cc;
1299 struct bio *clone;
1300 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1301 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
1302 unsigned i, len, remaining_size;
1303 struct page *page;
1304
1305 retry:
1306 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1307 mutex_lock(&cc->bio_alloc_lock);
1308
1309 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
1310 if (!clone)
1311 goto out;
1312
1313 clone_init(io, clone);
1314
1315 remaining_size = size;
1316
1317 for (i = 0; i < nr_iovecs; i++) {
1318 page = mempool_alloc(&cc->page_pool, gfp_mask);
1319 if (!page) {
1320 crypt_free_buffer_pages(cc, clone);
1321 bio_put(clone);
1322 gfp_mask |= __GFP_DIRECT_RECLAIM;
1323 goto retry;
1324 }
1325
1326 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
1327
1328 bio_add_page(clone, page, len, 0);
1329
1330 remaining_size -= len;
1331 }
1332
1333
1334 if (dm_crypt_integrity_io_alloc(io, clone)) {
1335 crypt_free_buffer_pages(cc, clone);
1336 bio_put(clone);
1337 clone = NULL;
1338 }
1339 out:
1340 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1341 mutex_unlock(&cc->bio_alloc_lock);
1342
1343 return clone;
1344 }
1345
1346 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1347 {
1348 struct bio_vec *bv;
1349 struct bvec_iter_all iter_all;
1350
1351 bio_for_each_segment_all(bv, clone, iter_all) {
1352 BUG_ON(!bv->bv_page);
1353 mempool_free(bv->bv_page, &cc->page_pool);
1354 }
1355 }
1356
1357 static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1358 struct bio *bio, sector_t sector)
1359 {
1360 io->cc = cc;
1361 io->base_bio = bio;
1362 io->sector = sector;
1363 io->error = 0;
1364 io->ctx.r.req = NULL;
1365 io->integrity_metadata = NULL;
1366 io->integrity_metadata_from_pool = false;
1367 atomic_set(&io->io_pending, 0);
1368 }
1369
1370 static void crypt_inc_pending(struct dm_crypt_io *io)
1371 {
1372 atomic_inc(&io->io_pending);
1373 }
1374
1375
1376
1377
1378
1379 static void crypt_dec_pending(struct dm_crypt_io *io)
1380 {
1381 struct crypt_config *cc = io->cc;
1382 struct bio *base_bio = io->base_bio;
1383 blk_status_t error = io->error;
1384
1385 if (!atomic_dec_and_test(&io->io_pending))
1386 return;
1387
1388 if (io->ctx.r.req)
1389 crypt_free_req(cc, io->ctx.r.req, base_bio);
1390
1391 if (unlikely(io->integrity_metadata_from_pool))
1392 mempool_free(io->integrity_metadata, &io->cc->tag_pool);
1393 else
1394 kfree(io->integrity_metadata);
1395
1396 base_bio->bi_status = error;
1397 bio_endio(base_bio);
1398 }
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417 static void crypt_endio(struct bio *clone)
1418 {
1419 struct dm_crypt_io *io = clone->bi_private;
1420 struct crypt_config *cc = io->cc;
1421 unsigned rw = bio_data_dir(clone);
1422 blk_status_t error;
1423
1424
1425
1426
1427 if (rw == WRITE)
1428 crypt_free_buffer_pages(cc, clone);
1429
1430 error = clone->bi_status;
1431 bio_put(clone);
1432
1433 if (rw == READ && !error) {
1434 kcryptd_queue_crypt(io);
1435 return;
1436 }
1437
1438 if (unlikely(error))
1439 io->error = error;
1440
1441 crypt_dec_pending(io);
1442 }
1443
1444 static void clone_init(struct dm_crypt_io *io, struct bio *clone)
1445 {
1446 struct crypt_config *cc = io->cc;
1447
1448 clone->bi_private = io;
1449 clone->bi_end_io = crypt_endio;
1450 bio_set_dev(clone, cc->dev->bdev);
1451 clone->bi_opf = io->base_bio->bi_opf;
1452 }
1453
1454 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1455 {
1456 struct crypt_config *cc = io->cc;
1457 struct bio *clone;
1458
1459
1460
1461
1462
1463
1464
1465 clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
1466 if (!clone)
1467 return 1;
1468
1469 crypt_inc_pending(io);
1470
1471 clone_init(io, clone);
1472 clone->bi_iter.bi_sector = cc->start + io->sector;
1473
1474 if (dm_crypt_integrity_io_alloc(io, clone)) {
1475 crypt_dec_pending(io);
1476 bio_put(clone);
1477 return 1;
1478 }
1479
1480 generic_make_request(clone);
1481 return 0;
1482 }
1483
1484 static void kcryptd_io_read_work(struct work_struct *work)
1485 {
1486 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1487
1488 crypt_inc_pending(io);
1489 if (kcryptd_io_read(io, GFP_NOIO))
1490 io->error = BLK_STS_RESOURCE;
1491 crypt_dec_pending(io);
1492 }
1493
1494 static void kcryptd_queue_read(struct dm_crypt_io *io)
1495 {
1496 struct crypt_config *cc = io->cc;
1497
1498 INIT_WORK(&io->work, kcryptd_io_read_work);
1499 queue_work(cc->io_queue, &io->work);
1500 }
1501
1502 static void kcryptd_io_write(struct dm_crypt_io *io)
1503 {
1504 struct bio *clone = io->ctx.bio_out;
1505
1506 generic_make_request(clone);
1507 }
1508
1509 #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1510
1511 static int dmcrypt_write(void *data)
1512 {
1513 struct crypt_config *cc = data;
1514 struct dm_crypt_io *io;
1515
1516 while (1) {
1517 struct rb_root write_tree;
1518 struct blk_plug plug;
1519
1520 spin_lock_irq(&cc->write_thread_lock);
1521 continue_locked:
1522
1523 if (!RB_EMPTY_ROOT(&cc->write_tree))
1524 goto pop_from_list;
1525
1526 set_current_state(TASK_INTERRUPTIBLE);
1527
1528 spin_unlock_irq(&cc->write_thread_lock);
1529
1530 if (unlikely(kthread_should_stop())) {
1531 set_current_state(TASK_RUNNING);
1532 break;
1533 }
1534
1535 schedule();
1536
1537 set_current_state(TASK_RUNNING);
1538 spin_lock_irq(&cc->write_thread_lock);
1539 goto continue_locked;
1540
1541 pop_from_list:
1542 write_tree = cc->write_tree;
1543 cc->write_tree = RB_ROOT;
1544 spin_unlock_irq(&cc->write_thread_lock);
1545
1546 BUG_ON(rb_parent(write_tree.rb_node));
1547
1548
1549
1550
1551
1552 blk_start_plug(&plug);
1553 do {
1554 io = crypt_io_from_node(rb_first(&write_tree));
1555 rb_erase(&io->rb_node, &write_tree);
1556 kcryptd_io_write(io);
1557 } while (!RB_EMPTY_ROOT(&write_tree));
1558 blk_finish_plug(&plug);
1559 }
1560 return 0;
1561 }
1562
1563 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1564 {
1565 struct bio *clone = io->ctx.bio_out;
1566 struct crypt_config *cc = io->cc;
1567 unsigned long flags;
1568 sector_t sector;
1569 struct rb_node **rbp, *parent;
1570
1571 if (unlikely(io->error)) {
1572 crypt_free_buffer_pages(cc, clone);
1573 bio_put(clone);
1574 crypt_dec_pending(io);
1575 return;
1576 }
1577
1578
1579 BUG_ON(io->ctx.iter_out.bi_size);
1580
1581 clone->bi_iter.bi_sector = cc->start + io->sector;
1582
1583 if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
1584 generic_make_request(clone);
1585 return;
1586 }
1587
1588 spin_lock_irqsave(&cc->write_thread_lock, flags);
1589 if (RB_EMPTY_ROOT(&cc->write_tree))
1590 wake_up_process(cc->write_thread);
1591 rbp = &cc->write_tree.rb_node;
1592 parent = NULL;
1593 sector = io->sector;
1594 while (*rbp) {
1595 parent = *rbp;
1596 if (sector < crypt_io_from_node(parent)->sector)
1597 rbp = &(*rbp)->rb_left;
1598 else
1599 rbp = &(*rbp)->rb_right;
1600 }
1601 rb_link_node(&io->rb_node, parent, rbp);
1602 rb_insert_color(&io->rb_node, &cc->write_tree);
1603 spin_unlock_irqrestore(&cc->write_thread_lock, flags);
1604 }
1605
1606 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1607 {
1608 struct crypt_config *cc = io->cc;
1609 struct bio *clone;
1610 int crypt_finished;
1611 sector_t sector = io->sector;
1612 blk_status_t r;
1613
1614
1615
1616
1617 crypt_inc_pending(io);
1618 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1619
1620 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1621 if (unlikely(!clone)) {
1622 io->error = BLK_STS_IOERR;
1623 goto dec;
1624 }
1625
1626 io->ctx.bio_out = clone;
1627 io->ctx.iter_out = clone->bi_iter;
1628
1629 sector += bio_sectors(clone);
1630
1631 crypt_inc_pending(io);
1632 r = crypt_convert(cc, &io->ctx);
1633 if (r)
1634 io->error = r;
1635 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1636
1637
1638 if (crypt_finished) {
1639 kcryptd_crypt_write_io_submit(io, 0);
1640 io->sector = sector;
1641 }
1642
1643 dec:
1644 crypt_dec_pending(io);
1645 }
1646
1647 static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
1648 {
1649 crypt_dec_pending(io);
1650 }
1651
1652 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
1653 {
1654 struct crypt_config *cc = io->cc;
1655 blk_status_t r;
1656
1657 crypt_inc_pending(io);
1658
1659 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
1660 io->sector);
1661
1662 r = crypt_convert(cc, &io->ctx);
1663 if (r)
1664 io->error = r;
1665
1666 if (atomic_dec_and_test(&io->ctx.cc_pending))
1667 kcryptd_crypt_read_done(io);
1668
1669 crypt_dec_pending(io);
1670 }
1671
1672 static void kcryptd_async_done(struct crypto_async_request *async_req,
1673 int error)
1674 {
1675 struct dm_crypt_request *dmreq = async_req->data;
1676 struct convert_context *ctx = dmreq->ctx;
1677 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1678 struct crypt_config *cc = io->cc;
1679
1680
1681
1682
1683
1684
1685 if (error == -EINPROGRESS) {
1686 complete(&ctx->restart);
1687 return;
1688 }
1689
1690 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1691 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
1692
1693 if (error == -EBADMSG) {
1694 char b[BDEVNAME_SIZE];
1695 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
1696 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
1697 io->error = BLK_STS_PROTECTION;
1698 } else if (error < 0)
1699 io->error = BLK_STS_IOERR;
1700
1701 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
1702
1703 if (!atomic_dec_and_test(&ctx->cc_pending))
1704 return;
1705
1706 if (bio_data_dir(io->base_bio) == READ)
1707 kcryptd_crypt_read_done(io);
1708 else
1709 kcryptd_crypt_write_io_submit(io, 1);
1710 }
1711
1712 static void kcryptd_crypt(struct work_struct *work)
1713 {
1714 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1715
1716 if (bio_data_dir(io->base_bio) == READ)
1717 kcryptd_crypt_read_convert(io);
1718 else
1719 kcryptd_crypt_write_convert(io);
1720 }
1721
1722 static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1723 {
1724 struct crypt_config *cc = io->cc;
1725
1726 INIT_WORK(&io->work, kcryptd_crypt);
1727 queue_work(cc->crypt_queue, &io->work);
1728 }
1729
1730 static void crypt_free_tfms_aead(struct crypt_config *cc)
1731 {
1732 if (!cc->cipher_tfm.tfms_aead)
1733 return;
1734
1735 if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
1736 crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
1737 cc->cipher_tfm.tfms_aead[0] = NULL;
1738 }
1739
1740 kfree(cc->cipher_tfm.tfms_aead);
1741 cc->cipher_tfm.tfms_aead = NULL;
1742 }
1743
1744 static void crypt_free_tfms_skcipher(struct crypt_config *cc)
1745 {
1746 unsigned i;
1747
1748 if (!cc->cipher_tfm.tfms)
1749 return;
1750
1751 for (i = 0; i < cc->tfms_count; i++)
1752 if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
1753 crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
1754 cc->cipher_tfm.tfms[i] = NULL;
1755 }
1756
1757 kfree(cc->cipher_tfm.tfms);
1758 cc->cipher_tfm.tfms = NULL;
1759 }
1760
1761 static void crypt_free_tfms(struct crypt_config *cc)
1762 {
1763 if (crypt_integrity_aead(cc))
1764 crypt_free_tfms_aead(cc);
1765 else
1766 crypt_free_tfms_skcipher(cc);
1767 }
1768
1769 static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
1770 {
1771 unsigned i;
1772 int err;
1773
1774 cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
1775 sizeof(struct crypto_skcipher *),
1776 GFP_KERNEL);
1777 if (!cc->cipher_tfm.tfms)
1778 return -ENOMEM;
1779
1780 for (i = 0; i < cc->tfms_count; i++) {
1781 cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0);
1782 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
1783 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
1784 crypt_free_tfms(cc);
1785 return err;
1786 }
1787 }
1788
1789
1790
1791
1792
1793
1794 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
1795 crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
1796 return 0;
1797 }
1798
1799 static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
1800 {
1801 int err;
1802
1803 cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
1804 if (!cc->cipher_tfm.tfms)
1805 return -ENOMEM;
1806
1807 cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0);
1808 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
1809 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
1810 crypt_free_tfms(cc);
1811 return err;
1812 }
1813
1814 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
1815 crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
1816 return 0;
1817 }
1818
1819 static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
1820 {
1821 if (crypt_integrity_aead(cc))
1822 return crypt_alloc_tfms_aead(cc, ciphermode);
1823 else
1824 return crypt_alloc_tfms_skcipher(cc, ciphermode);
1825 }
1826
1827 static unsigned crypt_subkey_size(struct crypt_config *cc)
1828 {
1829 return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1830 }
1831
1832 static unsigned crypt_authenckey_size(struct crypt_config *cc)
1833 {
1834 return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
1835 }
1836
1837
1838
1839
1840
1841
1842 static void crypt_copy_authenckey(char *p, const void *key,
1843 unsigned enckeylen, unsigned authkeylen)
1844 {
1845 struct crypto_authenc_key_param *param;
1846 struct rtattr *rta;
1847
1848 rta = (struct rtattr *)p;
1849 param = RTA_DATA(rta);
1850 param->enckeylen = cpu_to_be32(enckeylen);
1851 rta->rta_len = RTA_LENGTH(sizeof(*param));
1852 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1853 p += RTA_SPACE(sizeof(*param));
1854 memcpy(p, key + enckeylen, authkeylen);
1855 p += authkeylen;
1856 memcpy(p, key, enckeylen);
1857 }
1858
1859 static int crypt_setkey(struct crypt_config *cc)
1860 {
1861 unsigned subkey_size;
1862 int err = 0, i, r;
1863
1864
1865 subkey_size = crypt_subkey_size(cc);
1866
1867 if (crypt_integrity_hmac(cc)) {
1868 if (subkey_size < cc->key_mac_size)
1869 return -EINVAL;
1870
1871 crypt_copy_authenckey(cc->authenc_key, cc->key,
1872 subkey_size - cc->key_mac_size,
1873 cc->key_mac_size);
1874 }
1875
1876 for (i = 0; i < cc->tfms_count; i++) {
1877 if (crypt_integrity_hmac(cc))
1878 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
1879 cc->authenc_key, crypt_authenckey_size(cc));
1880 else if (crypt_integrity_aead(cc))
1881 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
1882 cc->key + (i * subkey_size),
1883 subkey_size);
1884 else
1885 r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
1886 cc->key + (i * subkey_size),
1887 subkey_size);
1888 if (r)
1889 err = r;
1890 }
1891
1892 if (crypt_integrity_hmac(cc))
1893 memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
1894
1895 return err;
1896 }
1897
1898 #ifdef CONFIG_KEYS
1899
1900 static bool contains_whitespace(const char *str)
1901 {
1902 while (*str)
1903 if (isspace(*str++))
1904 return true;
1905 return false;
1906 }
1907
1908 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
1909 {
1910 char *new_key_string, *key_desc;
1911 int ret;
1912 struct key *key;
1913 const struct user_key_payload *ukp;
1914
1915
1916
1917
1918
1919 if (contains_whitespace(key_string)) {
1920 DMERR("whitespace chars not allowed in key string");
1921 return -EINVAL;
1922 }
1923
1924
1925 key_desc = strpbrk(key_string, ":");
1926 if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
1927 return -EINVAL;
1928
1929 if (strncmp(key_string, "logon:", key_desc - key_string + 1) &&
1930 strncmp(key_string, "user:", key_desc - key_string + 1))
1931 return -EINVAL;
1932
1933 new_key_string = kstrdup(key_string, GFP_KERNEL);
1934 if (!new_key_string)
1935 return -ENOMEM;
1936
1937 key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user,
1938 key_desc + 1, NULL);
1939 if (IS_ERR(key)) {
1940 kzfree(new_key_string);
1941 return PTR_ERR(key);
1942 }
1943
1944 down_read(&key->sem);
1945
1946 ukp = user_key_payload_locked(key);
1947 if (!ukp) {
1948 up_read(&key->sem);
1949 key_put(key);
1950 kzfree(new_key_string);
1951 return -EKEYREVOKED;
1952 }
1953
1954 if (cc->key_size != ukp->datalen) {
1955 up_read(&key->sem);
1956 key_put(key);
1957 kzfree(new_key_string);
1958 return -EINVAL;
1959 }
1960
1961 memcpy(cc->key, ukp->data, cc->key_size);
1962
1963 up_read(&key->sem);
1964 key_put(key);
1965
1966
1967 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1968
1969 ret = crypt_setkey(cc);
1970
1971 if (!ret) {
1972 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1973 kzfree(cc->key_string);
1974 cc->key_string = new_key_string;
1975 } else
1976 kzfree(new_key_string);
1977
1978 return ret;
1979 }
1980
1981 static int get_key_size(char **key_string)
1982 {
1983 char *colon, dummy;
1984 int ret;
1985
1986 if (*key_string[0] != ':')
1987 return strlen(*key_string) >> 1;
1988
1989
1990 colon = strpbrk(*key_string + 1, ":");
1991 if (!colon)
1992 return -EINVAL;
1993
1994 if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
1995 return -EINVAL;
1996
1997 *key_string = colon;
1998
1999
2000
2001 return ret;
2002 }
2003
2004 #else
2005
2006 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2007 {
2008 return -EINVAL;
2009 }
2010
2011 static int get_key_size(char **key_string)
2012 {
2013 return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
2014 }
2015
2016 #endif
2017
2018 static int crypt_set_key(struct crypt_config *cc, char *key)
2019 {
2020 int r = -EINVAL;
2021 int key_string_len = strlen(key);
2022
2023
2024 if (!cc->key_size && strcmp(key, "-"))
2025 goto out;
2026
2027
2028 if (key[0] == ':') {
2029 r = crypt_set_keyring_key(cc, key + 1);
2030 goto out;
2031 }
2032
2033
2034 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2035
2036
2037 kzfree(cc->key_string);
2038 cc->key_string = NULL;
2039
2040
2041 if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
2042 goto out;
2043
2044 r = crypt_setkey(cc);
2045 if (!r)
2046 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2047
2048 out:
2049
2050 memset(key, '0', key_string_len);
2051
2052 return r;
2053 }
2054
2055 static int crypt_wipe_key(struct crypt_config *cc)
2056 {
2057 int r;
2058
2059 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2060 get_random_bytes(&cc->key, cc->key_size);
2061
2062
2063 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2064 r = cc->iv_gen_ops->wipe(cc);
2065 if (r)
2066 return r;
2067 }
2068
2069 kzfree(cc->key_string);
2070 cc->key_string = NULL;
2071 r = crypt_setkey(cc);
2072 memset(&cc->key, 0, cc->key_size * sizeof(u8));
2073
2074 return r;
2075 }
2076
2077 static void crypt_calculate_pages_per_client(void)
2078 {
2079 unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100;
2080
2081 if (!dm_crypt_clients_n)
2082 return;
2083
2084 pages /= dm_crypt_clients_n;
2085 if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
2086 pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
2087 dm_crypt_pages_per_client = pages;
2088 }
2089
2090 static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
2091 {
2092 struct crypt_config *cc = pool_data;
2093 struct page *page;
2094
2095 if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
2096 likely(gfp_mask & __GFP_NORETRY))
2097 return NULL;
2098
2099 page = alloc_page(gfp_mask);
2100 if (likely(page != NULL))
2101 percpu_counter_add(&cc->n_allocated_pages, 1);
2102
2103 return page;
2104 }
2105
2106 static void crypt_page_free(void *page, void *pool_data)
2107 {
2108 struct crypt_config *cc = pool_data;
2109
2110 __free_page(page);
2111 percpu_counter_sub(&cc->n_allocated_pages, 1);
2112 }
2113
2114 static void crypt_dtr(struct dm_target *ti)
2115 {
2116 struct crypt_config *cc = ti->private;
2117
2118 ti->private = NULL;
2119
2120 if (!cc)
2121 return;
2122
2123 if (cc->write_thread)
2124 kthread_stop(cc->write_thread);
2125
2126 if (cc->io_queue)
2127 destroy_workqueue(cc->io_queue);
2128 if (cc->crypt_queue)
2129 destroy_workqueue(cc->crypt_queue);
2130
2131 crypt_free_tfms(cc);
2132
2133 bioset_exit(&cc->bs);
2134
2135 mempool_exit(&cc->page_pool);
2136 mempool_exit(&cc->req_pool);
2137 mempool_exit(&cc->tag_pool);
2138
2139 WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
2140 percpu_counter_destroy(&cc->n_allocated_pages);
2141
2142 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2143 cc->iv_gen_ops->dtr(cc);
2144
2145 if (cc->dev)
2146 dm_put_device(ti, cc->dev);
2147
2148 kzfree(cc->cipher_string);
2149 kzfree(cc->key_string);
2150 kzfree(cc->cipher_auth);
2151 kzfree(cc->authenc_key);
2152
2153 mutex_destroy(&cc->bio_alloc_lock);
2154
2155
2156 kzfree(cc);
2157
2158 spin_lock(&dm_crypt_clients_lock);
2159 WARN_ON(!dm_crypt_clients_n);
2160 dm_crypt_clients_n--;
2161 crypt_calculate_pages_per_client();
2162 spin_unlock(&dm_crypt_clients_lock);
2163 }
2164
2165 static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
2166 {
2167 struct crypt_config *cc = ti->private;
2168
2169 if (crypt_integrity_aead(cc))
2170 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2171 else
2172 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2173
2174 if (cc->iv_size)
2175
2176 cc->iv_size = max(cc->iv_size,
2177 (unsigned int)(sizeof(u64) / sizeof(u8)));
2178 else if (ivmode) {
2179 DMWARN("Selected cipher does not support IVs");
2180 ivmode = NULL;
2181 }
2182
2183
2184 if (ivmode == NULL)
2185 cc->iv_gen_ops = NULL;
2186 else if (strcmp(ivmode, "plain") == 0)
2187 cc->iv_gen_ops = &crypt_iv_plain_ops;
2188 else if (strcmp(ivmode, "plain64") == 0)
2189 cc->iv_gen_ops = &crypt_iv_plain64_ops;
2190 else if (strcmp(ivmode, "plain64be") == 0)
2191 cc->iv_gen_ops = &crypt_iv_plain64be_ops;
2192 else if (strcmp(ivmode, "essiv") == 0)
2193 cc->iv_gen_ops = &crypt_iv_essiv_ops;
2194 else if (strcmp(ivmode, "benbi") == 0)
2195 cc->iv_gen_ops = &crypt_iv_benbi_ops;
2196 else if (strcmp(ivmode, "null") == 0)
2197 cc->iv_gen_ops = &crypt_iv_null_ops;
2198 else if (strcmp(ivmode, "eboiv") == 0)
2199 cc->iv_gen_ops = &crypt_iv_eboiv_ops;
2200 else if (strcmp(ivmode, "lmk") == 0) {
2201 cc->iv_gen_ops = &crypt_iv_lmk_ops;
2202
2203
2204
2205
2206
2207
2208 if (cc->key_size % cc->key_parts) {
2209 cc->key_parts++;
2210 cc->key_extra_size = cc->key_size / cc->key_parts;
2211 }
2212 } else if (strcmp(ivmode, "tcw") == 0) {
2213 cc->iv_gen_ops = &crypt_iv_tcw_ops;
2214 cc->key_parts += 2;
2215 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
2216 } else if (strcmp(ivmode, "random") == 0) {
2217 cc->iv_gen_ops = &crypt_iv_random_ops;
2218
2219 cc->integrity_iv_size = cc->iv_size;
2220 } else {
2221 ti->error = "Invalid IV mode";
2222 return -EINVAL;
2223 }
2224
2225 return 0;
2226 }
2227
2228
2229
2230
2231
2232
2233 static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
2234 {
2235 char *start, *end, *mac_alg = NULL;
2236 struct crypto_ahash *mac;
2237
2238 if (!strstarts(cipher_api, "authenc("))
2239 return 0;
2240
2241 start = strchr(cipher_api, '(');
2242 end = strchr(cipher_api, ',');
2243 if (!start || !end || ++start > end)
2244 return -EINVAL;
2245
2246 mac_alg = kzalloc(end - start + 1, GFP_KERNEL);
2247 if (!mac_alg)
2248 return -ENOMEM;
2249 strncpy(mac_alg, start, end - start);
2250
2251 mac = crypto_alloc_ahash(mac_alg, 0, 0);
2252 kfree(mac_alg);
2253
2254 if (IS_ERR(mac))
2255 return PTR_ERR(mac);
2256
2257 cc->key_mac_size = crypto_ahash_digestsize(mac);
2258 crypto_free_ahash(mac);
2259
2260 cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
2261 if (!cc->authenc_key)
2262 return -ENOMEM;
2263
2264 return 0;
2265 }
2266
2267 static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key,
2268 char **ivmode, char **ivopts)
2269 {
2270 struct crypt_config *cc = ti->private;
2271 char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME];
2272 int ret = -EINVAL;
2273
2274 cc->tfms_count = 1;
2275
2276
2277
2278
2279
2280 tmp = &cipher_in[strlen("capi:")];
2281
2282
2283 *ivopts = strrchr(tmp, ':');
2284 if (*ivopts) {
2285 **ivopts = '\0';
2286 (*ivopts)++;
2287 }
2288
2289 *ivmode = strrchr(tmp, '-');
2290 if (*ivmode) {
2291 **ivmode = '\0';
2292 (*ivmode)++;
2293 }
2294
2295 cipher_api = tmp;
2296
2297
2298 if (crypt_integrity_aead(cc)) {
2299 ret = crypt_ctr_auth_cipher(cc, cipher_api);
2300 if (ret < 0) {
2301 ti->error = "Invalid AEAD cipher spec";
2302 return -ENOMEM;
2303 }
2304 }
2305
2306 if (*ivmode && !strcmp(*ivmode, "lmk"))
2307 cc->tfms_count = 64;
2308
2309 if (*ivmode && !strcmp(*ivmode, "essiv")) {
2310 if (!*ivopts) {
2311 ti->error = "Digest algorithm missing for ESSIV mode";
2312 return -EINVAL;
2313 }
2314 ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)",
2315 cipher_api, *ivopts);
2316 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2317 ti->error = "Cannot allocate cipher string";
2318 return -ENOMEM;
2319 }
2320 cipher_api = buf;
2321 }
2322
2323 cc->key_parts = cc->tfms_count;
2324
2325
2326 ret = crypt_alloc_tfms(cc, cipher_api);
2327 if (ret < 0) {
2328 ti->error = "Error allocating crypto tfm";
2329 return ret;
2330 }
2331
2332 if (crypt_integrity_aead(cc))
2333 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2334 else
2335 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2336
2337 return 0;
2338 }
2339
2340 static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key,
2341 char **ivmode, char **ivopts)
2342 {
2343 struct crypt_config *cc = ti->private;
2344 char *tmp, *cipher, *chainmode, *keycount;
2345 char *cipher_api = NULL;
2346 int ret = -EINVAL;
2347 char dummy;
2348
2349 if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
2350 ti->error = "Bad cipher specification";
2351 return -EINVAL;
2352 }
2353
2354
2355
2356
2357
2358 tmp = cipher_in;
2359 keycount = strsep(&tmp, "-");
2360 cipher = strsep(&keycount, ":");
2361
2362 if (!keycount)
2363 cc->tfms_count = 1;
2364 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
2365 !is_power_of_2(cc->tfms_count)) {
2366 ti->error = "Bad cipher key count specification";
2367 return -EINVAL;
2368 }
2369 cc->key_parts = cc->tfms_count;
2370
2371 chainmode = strsep(&tmp, "-");
2372 *ivmode = strsep(&tmp, ":");
2373 *ivopts = tmp;
2374
2375
2376
2377
2378
2379 if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) {
2380 chainmode = "cbc";
2381 *ivmode = "plain";
2382 }
2383
2384 if (strcmp(chainmode, "ecb") && !*ivmode) {
2385 ti->error = "IV mechanism required";
2386 return -EINVAL;
2387 }
2388
2389 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
2390 if (!cipher_api)
2391 goto bad_mem;
2392
2393 if (*ivmode && !strcmp(*ivmode, "essiv")) {
2394 if (!*ivopts) {
2395 ti->error = "Digest algorithm missing for ESSIV mode";
2396 kfree(cipher_api);
2397 return -EINVAL;
2398 }
2399 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2400 "essiv(%s(%s),%s)", chainmode, cipher, *ivopts);
2401 } else {
2402 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2403 "%s(%s)", chainmode, cipher);
2404 }
2405 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2406 kfree(cipher_api);
2407 goto bad_mem;
2408 }
2409
2410
2411 ret = crypt_alloc_tfms(cc, cipher_api);
2412 if (ret < 0) {
2413 ti->error = "Error allocating crypto tfm";
2414 kfree(cipher_api);
2415 return ret;
2416 }
2417 kfree(cipher_api);
2418
2419 return 0;
2420 bad_mem:
2421 ti->error = "Cannot allocate cipher strings";
2422 return -ENOMEM;
2423 }
2424
2425 static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
2426 {
2427 struct crypt_config *cc = ti->private;
2428 char *ivmode = NULL, *ivopts = NULL;
2429 int ret;
2430
2431 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
2432 if (!cc->cipher_string) {
2433 ti->error = "Cannot allocate cipher strings";
2434 return -ENOMEM;
2435 }
2436
2437 if (strstarts(cipher_in, "capi:"))
2438 ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts);
2439 else
2440 ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts);
2441 if (ret)
2442 return ret;
2443
2444
2445 ret = crypt_ctr_ivmode(ti, ivmode);
2446 if (ret < 0)
2447 return ret;
2448
2449
2450 ret = crypt_set_key(cc, key);
2451 if (ret < 0) {
2452 ti->error = "Error decoding and setting key";
2453 return ret;
2454 }
2455
2456
2457 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
2458 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
2459 if (ret < 0) {
2460 ti->error = "Error creating IV";
2461 return ret;
2462 }
2463 }
2464
2465
2466 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
2467 ret = cc->iv_gen_ops->init(cc);
2468 if (ret < 0) {
2469 ti->error = "Error initialising IV";
2470 return ret;
2471 }
2472 }
2473
2474
2475 if (cc->key_string)
2476 memset(cc->key, 0, cc->key_size * sizeof(u8));
2477
2478 return ret;
2479 }
2480
2481 static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
2482 {
2483 struct crypt_config *cc = ti->private;
2484 struct dm_arg_set as;
2485 static const struct dm_arg _args[] = {
2486 {0, 6, "Invalid number of feature args"},
2487 };
2488 unsigned int opt_params, val;
2489 const char *opt_string, *sval;
2490 char dummy;
2491 int ret;
2492
2493
2494 as.argc = argc;
2495 as.argv = argv;
2496
2497 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2498 if (ret)
2499 return ret;
2500
2501 while (opt_params--) {
2502 opt_string = dm_shift_arg(&as);
2503 if (!opt_string) {
2504 ti->error = "Not enough feature arguments";
2505 return -EINVAL;
2506 }
2507
2508 if (!strcasecmp(opt_string, "allow_discards"))
2509 ti->num_discard_bios = 1;
2510
2511 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
2512 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
2513
2514 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
2515 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
2516 else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
2517 if (val == 0 || val > MAX_TAG_SIZE) {
2518 ti->error = "Invalid integrity arguments";
2519 return -EINVAL;
2520 }
2521 cc->on_disk_tag_size = val;
2522 sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
2523 if (!strcasecmp(sval, "aead")) {
2524 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
2525 } else if (strcasecmp(sval, "none")) {
2526 ti->error = "Unknown integrity profile";
2527 return -EINVAL;
2528 }
2529
2530 cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
2531 if (!cc->cipher_auth)
2532 return -ENOMEM;
2533 } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
2534 if (cc->sector_size < (1 << SECTOR_SHIFT) ||
2535 cc->sector_size > 4096 ||
2536 (cc->sector_size & (cc->sector_size - 1))) {
2537 ti->error = "Invalid feature value for sector_size";
2538 return -EINVAL;
2539 }
2540 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
2541 ti->error = "Device size is not multiple of sector_size feature";
2542 return -EINVAL;
2543 }
2544 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
2545 } else if (!strcasecmp(opt_string, "iv_large_sectors"))
2546 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
2547 else {
2548 ti->error = "Invalid feature arguments";
2549 return -EINVAL;
2550 }
2551 }
2552
2553 return 0;
2554 }
2555
2556
2557
2558
2559
2560 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2561 {
2562 struct crypt_config *cc;
2563 const char *devname = dm_table_device_name(ti->table);
2564 int key_size;
2565 unsigned int align_mask;
2566 unsigned long long tmpll;
2567 int ret;
2568 size_t iv_size_padding, additional_req_size;
2569 char dummy;
2570
2571 if (argc < 5) {
2572 ti->error = "Not enough arguments";
2573 return -EINVAL;
2574 }
2575
2576 key_size = get_key_size(&argv[1]);
2577 if (key_size < 0) {
2578 ti->error = "Cannot parse key size";
2579 return -EINVAL;
2580 }
2581
2582 cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
2583 if (!cc) {
2584 ti->error = "Cannot allocate encryption context";
2585 return -ENOMEM;
2586 }
2587 cc->key_size = key_size;
2588 cc->sector_size = (1 << SECTOR_SHIFT);
2589 cc->sector_shift = 0;
2590
2591 ti->private = cc;
2592
2593 spin_lock(&dm_crypt_clients_lock);
2594 dm_crypt_clients_n++;
2595 crypt_calculate_pages_per_client();
2596 spin_unlock(&dm_crypt_clients_lock);
2597
2598 ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
2599 if (ret < 0)
2600 goto bad;
2601
2602
2603 if (argc > 5) {
2604 ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
2605 if (ret)
2606 goto bad;
2607 }
2608
2609 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
2610 if (ret < 0)
2611 goto bad;
2612
2613 if (crypt_integrity_aead(cc)) {
2614 cc->dmreq_start = sizeof(struct aead_request);
2615 cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
2616 align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
2617 } else {
2618 cc->dmreq_start = sizeof(struct skcipher_request);
2619 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
2620 align_mask = crypto_skcipher_alignmask(any_tfm(cc));
2621 }
2622 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
2623
2624 if (align_mask < CRYPTO_MINALIGN) {
2625
2626 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
2627 & align_mask;
2628 } else {
2629
2630
2631
2632
2633
2634 iv_size_padding = align_mask;
2635 }
2636
2637
2638 additional_req_size = sizeof(struct dm_crypt_request) +
2639 iv_size_padding + cc->iv_size +
2640 cc->iv_size +
2641 sizeof(uint64_t) +
2642 sizeof(unsigned int);
2643
2644 ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
2645 if (ret) {
2646 ti->error = "Cannot allocate crypt request mempool";
2647 goto bad;
2648 }
2649
2650 cc->per_bio_data_size = ti->per_io_data_size =
2651 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
2652 ARCH_KMALLOC_MINALIGN);
2653
2654 ret = mempool_init(&cc->page_pool, BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
2655 if (ret) {
2656 ti->error = "Cannot allocate page mempool";
2657 goto bad;
2658 }
2659
2660 ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
2661 if (ret) {
2662 ti->error = "Cannot allocate crypt bioset";
2663 goto bad;
2664 }
2665
2666 mutex_init(&cc->bio_alloc_lock);
2667
2668 ret = -EINVAL;
2669 if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
2670 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
2671 ti->error = "Invalid iv_offset sector";
2672 goto bad;
2673 }
2674 cc->iv_offset = tmpll;
2675
2676 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
2677 if (ret) {
2678 ti->error = "Device lookup failed";
2679 goto bad;
2680 }
2681
2682 ret = -EINVAL;
2683 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
2684 ti->error = "Invalid device sector";
2685 goto bad;
2686 }
2687 cc->start = tmpll;
2688
2689 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
2690 ret = crypt_integrity_ctr(cc, ti);
2691 if (ret)
2692 goto bad;
2693
2694 cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
2695 if (!cc->tag_pool_max_sectors)
2696 cc->tag_pool_max_sectors = 1;
2697
2698 ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
2699 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
2700 if (ret) {
2701 ti->error = "Cannot allocate integrity tags mempool";
2702 goto bad;
2703 }
2704
2705 cc->tag_pool_max_sectors <<= cc->sector_shift;
2706 }
2707
2708 ret = -ENOMEM;
2709 cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
2710 if (!cc->io_queue) {
2711 ti->error = "Couldn't create kcryptd io queue";
2712 goto bad;
2713 }
2714
2715 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
2716 cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
2717 1, devname);
2718 else
2719 cc->crypt_queue = alloc_workqueue("kcryptd/%s",
2720 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
2721 num_online_cpus(), devname);
2722 if (!cc->crypt_queue) {
2723 ti->error = "Couldn't create kcryptd queue";
2724 goto bad;
2725 }
2726
2727 spin_lock_init(&cc->write_thread_lock);
2728 cc->write_tree = RB_ROOT;
2729
2730 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
2731 if (IS_ERR(cc->write_thread)) {
2732 ret = PTR_ERR(cc->write_thread);
2733 cc->write_thread = NULL;
2734 ti->error = "Couldn't spawn write thread";
2735 goto bad;
2736 }
2737 wake_up_process(cc->write_thread);
2738
2739 ti->num_flush_bios = 1;
2740
2741 return 0;
2742
2743 bad:
2744 crypt_dtr(ti);
2745 return ret;
2746 }
2747
2748 static int crypt_map(struct dm_target *ti, struct bio *bio)
2749 {
2750 struct dm_crypt_io *io;
2751 struct crypt_config *cc = ti->private;
2752
2753
2754
2755
2756
2757
2758 if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
2759 bio_op(bio) == REQ_OP_DISCARD)) {
2760 bio_set_dev(bio, cc->dev->bdev);
2761 if (bio_sectors(bio))
2762 bio->bi_iter.bi_sector = cc->start +
2763 dm_target_offset(ti, bio->bi_iter.bi_sector);
2764 return DM_MAPIO_REMAPPED;
2765 }
2766
2767
2768
2769
2770 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
2771 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
2772 dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
2773
2774
2775
2776
2777
2778 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
2779 return DM_MAPIO_KILL;
2780
2781 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
2782 return DM_MAPIO_KILL;
2783
2784 io = dm_per_bio_data(bio, cc->per_bio_data_size);
2785 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
2786
2787 if (cc->on_disk_tag_size) {
2788 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
2789
2790 if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
2791 unlikely(!(io->integrity_metadata = kmalloc(tag_len,
2792 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
2793 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
2794 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
2795 io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
2796 io->integrity_metadata_from_pool = true;
2797 }
2798 }
2799
2800 if (crypt_integrity_aead(cc))
2801 io->ctx.r.req_aead = (struct aead_request *)(io + 1);
2802 else
2803 io->ctx.r.req = (struct skcipher_request *)(io + 1);
2804
2805 if (bio_data_dir(io->base_bio) == READ) {
2806 if (kcryptd_io_read(io, GFP_NOWAIT))
2807 kcryptd_queue_read(io);
2808 } else
2809 kcryptd_queue_crypt(io);
2810
2811 return DM_MAPIO_SUBMITTED;
2812 }
2813
2814 static void crypt_status(struct dm_target *ti, status_type_t type,
2815 unsigned status_flags, char *result, unsigned maxlen)
2816 {
2817 struct crypt_config *cc = ti->private;
2818 unsigned i, sz = 0;
2819 int num_feature_args = 0;
2820
2821 switch (type) {
2822 case STATUSTYPE_INFO:
2823 result[0] = '\0';
2824 break;
2825
2826 case STATUSTYPE_TABLE:
2827 DMEMIT("%s ", cc->cipher_string);
2828
2829 if (cc->key_size > 0) {
2830 if (cc->key_string)
2831 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
2832 else
2833 for (i = 0; i < cc->key_size; i++)
2834 DMEMIT("%02x", cc->key[i]);
2835 } else
2836 DMEMIT("-");
2837
2838 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
2839 cc->dev->name, (unsigned long long)cc->start);
2840
2841 num_feature_args += !!ti->num_discard_bios;
2842 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
2843 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
2844 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
2845 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
2846 if (cc->on_disk_tag_size)
2847 num_feature_args++;
2848 if (num_feature_args) {
2849 DMEMIT(" %d", num_feature_args);
2850 if (ti->num_discard_bios)
2851 DMEMIT(" allow_discards");
2852 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
2853 DMEMIT(" same_cpu_crypt");
2854 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
2855 DMEMIT(" submit_from_crypt_cpus");
2856 if (cc->on_disk_tag_size)
2857 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
2858 if (cc->sector_size != (1 << SECTOR_SHIFT))
2859 DMEMIT(" sector_size:%d", cc->sector_size);
2860 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
2861 DMEMIT(" iv_large_sectors");
2862 }
2863
2864 break;
2865 }
2866 }
2867
2868 static void crypt_postsuspend(struct dm_target *ti)
2869 {
2870 struct crypt_config *cc = ti->private;
2871
2872 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
2873 }
2874
2875 static int crypt_preresume(struct dm_target *ti)
2876 {
2877 struct crypt_config *cc = ti->private;
2878
2879 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
2880 DMERR("aborting resume - crypt key is not set.");
2881 return -EAGAIN;
2882 }
2883
2884 return 0;
2885 }
2886
2887 static void crypt_resume(struct dm_target *ti)
2888 {
2889 struct crypt_config *cc = ti->private;
2890
2891 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
2892 }
2893
2894
2895
2896
2897
2898 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv,
2899 char *result, unsigned maxlen)
2900 {
2901 struct crypt_config *cc = ti->private;
2902 int key_size, ret = -EINVAL;
2903
2904 if (argc < 2)
2905 goto error;
2906
2907 if (!strcasecmp(argv[0], "key")) {
2908 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
2909 DMWARN("not suspended during key manipulation.");
2910 return -EINVAL;
2911 }
2912 if (argc == 3 && !strcasecmp(argv[1], "set")) {
2913
2914 key_size = get_key_size(&argv[2]);
2915 if (key_size < 0 || cc->key_size != key_size) {
2916 memset(argv[2], '0', strlen(argv[2]));
2917 return -EINVAL;
2918 }
2919
2920 ret = crypt_set_key(cc, argv[2]);
2921 if (ret)
2922 return ret;
2923 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
2924 ret = cc->iv_gen_ops->init(cc);
2925
2926 if (cc->key_string)
2927 memset(cc->key, 0, cc->key_size * sizeof(u8));
2928 return ret;
2929 }
2930 if (argc == 2 && !strcasecmp(argv[1], "wipe"))
2931 return crypt_wipe_key(cc);
2932 }
2933
2934 error:
2935 DMWARN("unrecognised message received.");
2936 return -EINVAL;
2937 }
2938
2939 static int crypt_iterate_devices(struct dm_target *ti,
2940 iterate_devices_callout_fn fn, void *data)
2941 {
2942 struct crypt_config *cc = ti->private;
2943
2944 return fn(ti, cc->dev, cc->start, ti->len, data);
2945 }
2946
2947 static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
2948 {
2949 struct crypt_config *cc = ti->private;
2950
2951
2952
2953
2954
2955
2956
2957 limits->max_segment_size = PAGE_SIZE;
2958
2959 limits->logical_block_size =
2960 max_t(unsigned short, limits->logical_block_size, cc->sector_size);
2961 limits->physical_block_size =
2962 max_t(unsigned, limits->physical_block_size, cc->sector_size);
2963 limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
2964 }
2965
2966 static struct target_type crypt_target = {
2967 .name = "crypt",
2968 .version = {1, 19, 0},
2969 .module = THIS_MODULE,
2970 .ctr = crypt_ctr,
2971 .dtr = crypt_dtr,
2972 .map = crypt_map,
2973 .status = crypt_status,
2974 .postsuspend = crypt_postsuspend,
2975 .preresume = crypt_preresume,
2976 .resume = crypt_resume,
2977 .message = crypt_message,
2978 .iterate_devices = crypt_iterate_devices,
2979 .io_hints = crypt_io_hints,
2980 };
2981
2982 static int __init dm_crypt_init(void)
2983 {
2984 int r;
2985
2986 r = dm_register_target(&crypt_target);
2987 if (r < 0)
2988 DMERR("register failed %d", r);
2989
2990 return r;
2991 }
2992
2993 static void __exit dm_crypt_exit(void)
2994 {
2995 dm_unregister_target(&crypt_target);
2996 }
2997
2998 module_init(dm_crypt_init);
2999 module_exit(dm_crypt_exit);
3000
3001 MODULE_AUTHOR("Jana Saout <jana@saout.de>");
3002 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
3003 MODULE_LICENSE("GPL");