This source file includes following definitions.
- dmz_bio_endio
- dmz_clone_endio
- dmz_submit_bio
- dmz_handle_read_zero
- dmz_handle_read
- dmz_handle_direct_write
- dmz_handle_buffered_write
- dmz_handle_write
- dmz_handle_discard
- dmz_handle_bio
- dmz_get_chunk_work
- dmz_put_chunk_work
- dmz_chunk_work
- dmz_flush_work
- dmz_queue_chunk_work
- dmz_bdev_is_dying
- dmz_check_bdev
- dmz_map
- dmz_get_zoned_device
- dmz_put_zoned_device
- dmz_ctr
- dmz_dtr
- dmz_io_hints
- dmz_prepare_ioctl
- dmz_suspend
- dmz_resume
- dmz_iterate_devices
- dmz_init
- dmz_exit
1
2
3
4
5
6
7
8 #include "dm-zoned.h"
9
10 #include <linux/module.h>
11
12 #define DM_MSG_PREFIX "zoned"
13
14 #define DMZ_MIN_BIOS 8192
15
16
17
18
19 struct dmz_bioctx {
20 struct dmz_target *target;
21 struct dm_zone *zone;
22 struct bio *bio;
23 refcount_t ref;
24 };
25
26
27
28
29 struct dm_chunk_work {
30 struct work_struct work;
31 refcount_t refcount;
32 struct dmz_target *target;
33 unsigned int chunk;
34 struct bio_list bio_list;
35 };
36
37
38
39
40 struct dmz_target {
41 struct dm_dev *ddev;
42
43 unsigned long flags;
44
45
46 struct dmz_dev *dev;
47
48
49 struct dmz_metadata *metadata;
50
51
52 struct dmz_reclaim *reclaim;
53
54
55 struct radix_tree_root chunk_rxtree;
56 struct workqueue_struct *chunk_wq;
57 struct mutex chunk_lock;
58
59
60 struct bio_set bio_set;
61
62
63 spinlock_t flush_lock;
64 struct bio_list flush_list;
65 struct delayed_work flush_work;
66 struct workqueue_struct *flush_wq;
67 };
68
69
70
71
72 #define DMZ_FLUSH_PERIOD (10 * HZ)
73
74
75
76
77 static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
78 {
79 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
80
81 if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
82 bio->bi_status = status;
83 if (bio->bi_status != BLK_STS_OK)
84 bioctx->target->dev->flags |= DMZ_CHECK_BDEV;
85
86 if (refcount_dec_and_test(&bioctx->ref)) {
87 struct dm_zone *zone = bioctx->zone;
88
89 if (zone) {
90 if (bio->bi_status != BLK_STS_OK &&
91 bio_op(bio) == REQ_OP_WRITE &&
92 dmz_is_seq(zone))
93 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
94 dmz_deactivate_zone(zone);
95 }
96 bio_endio(bio);
97 }
98 }
99
100
101
102
103
104 static void dmz_clone_endio(struct bio *clone)
105 {
106 struct dmz_bioctx *bioctx = clone->bi_private;
107 blk_status_t status = clone->bi_status;
108
109 bio_put(clone);
110 dmz_bio_endio(bioctx->bio, status);
111 }
112
113
114
115
116
117 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
118 struct bio *bio, sector_t chunk_block,
119 unsigned int nr_blocks)
120 {
121 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
122 struct bio *clone;
123
124 clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
125 if (!clone)
126 return -ENOMEM;
127
128 bio_set_dev(clone, dmz->dev->bdev);
129 clone->bi_iter.bi_sector =
130 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
131 clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
132 clone->bi_end_io = dmz_clone_endio;
133 clone->bi_private = bioctx;
134
135 bio_advance(bio, clone->bi_iter.bi_size);
136
137 refcount_inc(&bioctx->ref);
138 generic_make_request(clone);
139
140 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
141 zone->wp_block += nr_blocks;
142
143 return 0;
144 }
145
146
147
148
149 static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
150 sector_t chunk_block, unsigned int nr_blocks)
151 {
152 unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT;
153
154
155 swap(bio->bi_iter.bi_size, size);
156 zero_fill_bio(bio);
157 swap(bio->bi_iter.bi_size, size);
158
159 bio_advance(bio, size);
160 }
161
162
163
164
165 static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
166 struct bio *bio)
167 {
168 sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
169 unsigned int nr_blocks = dmz_bio_blocks(bio);
170 sector_t end_block = chunk_block + nr_blocks;
171 struct dm_zone *rzone, *bzone;
172 int ret;
173
174
175 if (!zone) {
176 zero_fill_bio(bio);
177 return 0;
178 }
179
180 dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks",
181 (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
182 (dmz_is_rnd(zone) ? "RND" : "SEQ"),
183 dmz_id(dmz->metadata, zone),
184 (unsigned long long)chunk_block, nr_blocks);
185
186
187 bzone = zone->bzone;
188 while (chunk_block < end_block) {
189 nr_blocks = 0;
190 if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) {
191
192 ret = dmz_block_valid(dmz->metadata, zone, chunk_block);
193 if (ret < 0)
194 return ret;
195 if (ret > 0) {
196
197 nr_blocks = ret;
198 rzone = zone;
199 }
200 }
201
202
203
204
205
206 if (!nr_blocks && bzone) {
207 ret = dmz_block_valid(dmz->metadata, bzone, chunk_block);
208 if (ret < 0)
209 return ret;
210 if (ret > 0) {
211
212 nr_blocks = ret;
213 rzone = bzone;
214 }
215 }
216
217 if (nr_blocks) {
218
219 nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
220 ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
221 if (ret)
222 return ret;
223 chunk_block += nr_blocks;
224 } else {
225
226 dmz_handle_read_zero(dmz, bio, chunk_block, 1);
227 chunk_block++;
228 }
229 }
230
231 return 0;
232 }
233
234
235
236
237
238
239 static int dmz_handle_direct_write(struct dmz_target *dmz,
240 struct dm_zone *zone, struct bio *bio,
241 sector_t chunk_block,
242 unsigned int nr_blocks)
243 {
244 struct dmz_metadata *zmd = dmz->metadata;
245 struct dm_zone *bzone = zone->bzone;
246 int ret;
247
248 if (dmz_is_readonly(zone))
249 return -EROFS;
250
251
252 ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
253 if (ret)
254 return ret;
255
256
257
258
259
260 ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks);
261 if (ret == 0 && bzone)
262 ret = dmz_invalidate_blocks(zmd, bzone, chunk_block, nr_blocks);
263
264 return ret;
265 }
266
267
268
269
270
271
272 static int dmz_handle_buffered_write(struct dmz_target *dmz,
273 struct dm_zone *zone, struct bio *bio,
274 sector_t chunk_block,
275 unsigned int nr_blocks)
276 {
277 struct dmz_metadata *zmd = dmz->metadata;
278 struct dm_zone *bzone;
279 int ret;
280
281
282 bzone = dmz_get_chunk_buffer(zmd, zone);
283 if (IS_ERR(bzone))
284 return PTR_ERR(bzone);
285
286 if (dmz_is_readonly(bzone))
287 return -EROFS;
288
289
290 ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
291 if (ret)
292 return ret;
293
294
295
296
297
298 ret = dmz_validate_blocks(zmd, bzone, chunk_block, nr_blocks);
299 if (ret == 0 && chunk_block < zone->wp_block)
300 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
301
302 return ret;
303 }
304
305
306
307
308 static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
309 struct bio *bio)
310 {
311 sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
312 unsigned int nr_blocks = dmz_bio_blocks(bio);
313
314 if (!zone)
315 return -ENOSPC;
316
317 dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
318 (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
319 (dmz_is_rnd(zone) ? "RND" : "SEQ"),
320 dmz_id(dmz->metadata, zone),
321 (unsigned long long)chunk_block, nr_blocks);
322
323 if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) {
324
325
326
327
328
329 return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks);
330 }
331
332
333
334
335
336 return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
337 }
338
339
340
341
342 static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
343 struct bio *bio)
344 {
345 struct dmz_metadata *zmd = dmz->metadata;
346 sector_t block = dmz_bio_block(bio);
347 unsigned int nr_blocks = dmz_bio_blocks(bio);
348 sector_t chunk_block = dmz_chunk_block(dmz->dev, block);
349 int ret = 0;
350
351
352 if (!zone)
353 return 0;
354
355 if (dmz_is_readonly(zone))
356 return -EROFS;
357
358 dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
359 (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
360 dmz_id(zmd, zone),
361 (unsigned long long)chunk_block, nr_blocks);
362
363
364
365
366
367 if (dmz_is_rnd(zone) || chunk_block < zone->wp_block)
368 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
369 if (ret == 0 && zone->bzone)
370 ret = dmz_invalidate_blocks(zmd, zone->bzone,
371 chunk_block, nr_blocks);
372 return ret;
373 }
374
375
376
377
378 static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
379 struct bio *bio)
380 {
381 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
382 struct dmz_metadata *zmd = dmz->metadata;
383 struct dm_zone *zone;
384 int ret;
385
386
387
388
389
390 if (bio_op(bio) == REQ_OP_WRITE)
391 dmz_schedule_reclaim(dmz->reclaim);
392
393 dmz_lock_metadata(zmd);
394
395 if (dmz->dev->flags & DMZ_BDEV_DYING) {
396 ret = -EIO;
397 goto out;
398 }
399
400
401
402
403
404
405 zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio),
406 bio_op(bio));
407 if (IS_ERR(zone)) {
408 ret = PTR_ERR(zone);
409 goto out;
410 }
411
412
413 if (zone) {
414 dmz_activate_zone(zone);
415 bioctx->zone = zone;
416 }
417
418 switch (bio_op(bio)) {
419 case REQ_OP_READ:
420 ret = dmz_handle_read(dmz, zone, bio);
421 break;
422 case REQ_OP_WRITE:
423 ret = dmz_handle_write(dmz, zone, bio);
424 break;
425 case REQ_OP_DISCARD:
426 case REQ_OP_WRITE_ZEROES:
427 ret = dmz_handle_discard(dmz, zone, bio);
428 break;
429 default:
430 dmz_dev_err(dmz->dev, "Unsupported BIO operation 0x%x",
431 bio_op(bio));
432 ret = -EIO;
433 }
434
435
436
437
438
439 if (zone)
440 dmz_put_chunk_mapping(zmd, zone);
441 out:
442 dmz_bio_endio(bio, errno_to_blk_status(ret));
443
444 dmz_unlock_metadata(zmd);
445 }
446
447
448
449
450 static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
451 {
452 refcount_inc(&cw->refcount);
453 }
454
455
456
457
458
459 static void dmz_put_chunk_work(struct dm_chunk_work *cw)
460 {
461 if (refcount_dec_and_test(&cw->refcount)) {
462 WARN_ON(!bio_list_empty(&cw->bio_list));
463 radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
464 kfree(cw);
465 }
466 }
467
468
469
470
471 static void dmz_chunk_work(struct work_struct *work)
472 {
473 struct dm_chunk_work *cw = container_of(work, struct dm_chunk_work, work);
474 struct dmz_target *dmz = cw->target;
475 struct bio *bio;
476
477 mutex_lock(&dmz->chunk_lock);
478
479
480 while ((bio = bio_list_pop(&cw->bio_list))) {
481 mutex_unlock(&dmz->chunk_lock);
482 dmz_handle_bio(dmz, cw, bio);
483 mutex_lock(&dmz->chunk_lock);
484 dmz_put_chunk_work(cw);
485 }
486
487
488 dmz_put_chunk_work(cw);
489
490 mutex_unlock(&dmz->chunk_lock);
491 }
492
493
494
495
496 static void dmz_flush_work(struct work_struct *work)
497 {
498 struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work);
499 struct bio *bio;
500 int ret;
501
502
503 ret = dmz_flush_metadata(dmz->metadata);
504 if (ret)
505 dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
506
507
508 while (1) {
509 spin_lock(&dmz->flush_lock);
510 bio = bio_list_pop(&dmz->flush_list);
511 spin_unlock(&dmz->flush_lock);
512
513 if (!bio)
514 break;
515
516 dmz_bio_endio(bio, errno_to_blk_status(ret));
517 }
518
519 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
520 }
521
522
523
524
525
526 static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
527 {
528 unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
529 struct dm_chunk_work *cw;
530 int ret = 0;
531
532 mutex_lock(&dmz->chunk_lock);
533
534
535 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
536 if (cw) {
537 dmz_get_chunk_work(cw);
538 } else {
539
540 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
541 if (unlikely(!cw)) {
542 ret = -ENOMEM;
543 goto out;
544 }
545
546 INIT_WORK(&cw->work, dmz_chunk_work);
547 refcount_set(&cw->refcount, 1);
548 cw->target = dmz;
549 cw->chunk = chunk;
550 bio_list_init(&cw->bio_list);
551
552 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
553 if (unlikely(ret)) {
554 kfree(cw);
555 goto out;
556 }
557 }
558
559 bio_list_add(&cw->bio_list, bio);
560
561 dmz_reclaim_bio_acc(dmz->reclaim);
562 if (queue_work(dmz->chunk_wq, &cw->work))
563 dmz_get_chunk_work(cw);
564 out:
565 mutex_unlock(&dmz->chunk_lock);
566 return ret;
567 }
568
569
570
571
572
573
574 bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
575 {
576 if (dmz_dev->flags & DMZ_BDEV_DYING)
577 return true;
578
579 if (dmz_dev->flags & DMZ_CHECK_BDEV)
580 return !dmz_check_bdev(dmz_dev);
581
582 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
583 dmz_dev_warn(dmz_dev, "Backing device queue dying");
584 dmz_dev->flags |= DMZ_BDEV_DYING;
585 }
586
587 return dmz_dev->flags & DMZ_BDEV_DYING;
588 }
589
590
591
592
593
594
595
596 bool dmz_check_bdev(struct dmz_dev *dmz_dev)
597 {
598 struct gendisk *disk;
599
600 dmz_dev->flags &= ~DMZ_CHECK_BDEV;
601
602 if (dmz_bdev_is_dying(dmz_dev))
603 return false;
604
605 disk = dmz_dev->bdev->bd_disk;
606 if (disk->fops->check_events &&
607 disk->fops->check_events(disk, 0) & DISK_EVENT_MEDIA_CHANGE) {
608 dmz_dev_warn(dmz_dev, "Backing device offline");
609 dmz_dev->flags |= DMZ_BDEV_DYING;
610 }
611
612 return !(dmz_dev->flags & DMZ_BDEV_DYING);
613 }
614
615
616
617
618 static int dmz_map(struct dm_target *ti, struct bio *bio)
619 {
620 struct dmz_target *dmz = ti->private;
621 struct dmz_dev *dev = dmz->dev;
622 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
623 sector_t sector = bio->bi_iter.bi_sector;
624 unsigned int nr_sectors = bio_sectors(bio);
625 sector_t chunk_sector;
626 int ret;
627
628 if (dmz_bdev_is_dying(dmz->dev))
629 return DM_MAPIO_KILL;
630
631 dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
632 bio_op(bio), (unsigned long long)sector, nr_sectors,
633 (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
634 (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
635 (unsigned int)dmz_bio_blocks(bio));
636
637 bio_set_dev(bio, dev->bdev);
638
639 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
640 return DM_MAPIO_REMAPPED;
641
642
643 if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK))
644 return DM_MAPIO_KILL;
645
646
647 bioctx->target = dmz;
648 bioctx->zone = NULL;
649 bioctx->bio = bio;
650 refcount_set(&bioctx->ref, 1);
651
652
653 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
654 spin_lock(&dmz->flush_lock);
655 bio_list_add(&dmz->flush_list, bio);
656 spin_unlock(&dmz->flush_lock);
657 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0);
658 return DM_MAPIO_SUBMITTED;
659 }
660
661
662 chunk_sector = sector & (dev->zone_nr_sectors - 1);
663 if (chunk_sector + nr_sectors > dev->zone_nr_sectors)
664 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
665
666
667 ret = dmz_queue_chunk_work(dmz, bio);
668 if (ret) {
669 dmz_dev_debug(dmz->dev,
670 "BIO op %d, can't process chunk %llu, err %i\n",
671 bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
672 ret);
673 return DM_MAPIO_REQUEUE;
674 }
675
676 return DM_MAPIO_SUBMITTED;
677 }
678
679
680
681
682 static int dmz_get_zoned_device(struct dm_target *ti, char *path)
683 {
684 struct dmz_target *dmz = ti->private;
685 struct request_queue *q;
686 struct dmz_dev *dev;
687 sector_t aligned_capacity;
688 int ret;
689
690
691 ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev);
692 if (ret) {
693 ti->error = "Get target device failed";
694 dmz->ddev = NULL;
695 return ret;
696 }
697
698 dev = kzalloc(sizeof(struct dmz_dev), GFP_KERNEL);
699 if (!dev) {
700 ret = -ENOMEM;
701 goto err;
702 }
703
704 dev->bdev = dmz->ddev->bdev;
705 (void)bdevname(dev->bdev, dev->name);
706
707 if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE) {
708 ti->error = "Not a zoned block device";
709 ret = -EINVAL;
710 goto err;
711 }
712
713 q = bdev_get_queue(dev->bdev);
714 dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
715 aligned_capacity = dev->capacity &
716 ~((sector_t)blk_queue_zone_sectors(q) - 1);
717 if (ti->begin ||
718 ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
719 ti->error = "Partial mapping not supported";
720 ret = -EINVAL;
721 goto err;
722 }
723
724 dev->zone_nr_sectors = blk_queue_zone_sectors(q);
725 dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
726
727 dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
728 dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
729
730 dev->nr_zones = blkdev_nr_zones(dev->bdev);
731
732 dmz->dev = dev;
733
734 return 0;
735 err:
736 dm_put_device(ti, dmz->ddev);
737 kfree(dev);
738
739 return ret;
740 }
741
742
743
744
745 static void dmz_put_zoned_device(struct dm_target *ti)
746 {
747 struct dmz_target *dmz = ti->private;
748
749 dm_put_device(ti, dmz->ddev);
750 kfree(dmz->dev);
751 dmz->dev = NULL;
752 }
753
754
755
756
757 static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
758 {
759 struct dmz_target *dmz;
760 struct dmz_dev *dev;
761 int ret;
762
763
764 if (argc != 1) {
765 ti->error = "Invalid argument count";
766 return -EINVAL;
767 }
768
769
770 dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL);
771 if (!dmz) {
772 ti->error = "Unable to allocate the zoned target descriptor";
773 return -ENOMEM;
774 }
775 ti->private = dmz;
776
777
778 ret = dmz_get_zoned_device(ti, argv[0]);
779 if (ret) {
780 dmz->ddev = NULL;
781 goto err;
782 }
783
784
785 dev = dmz->dev;
786 ret = dmz_ctr_metadata(dev, &dmz->metadata);
787 if (ret) {
788 ti->error = "Metadata initialization failed";
789 goto err_dev;
790 }
791
792
793 ti->max_io_len = dev->zone_nr_sectors << 9;
794 ti->num_flush_bios = 1;
795 ti->num_discard_bios = 1;
796 ti->num_write_zeroes_bios = 1;
797 ti->per_io_data_size = sizeof(struct dmz_bioctx);
798 ti->flush_supported = true;
799 ti->discards_supported = true;
800
801
802 ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift;
803
804
805 ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
806 if (ret) {
807 ti->error = "Create BIO set failed";
808 goto err_meta;
809 }
810
811
812 mutex_init(&dmz->chunk_lock);
813 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
814 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
815 0, dev->name);
816 if (!dmz->chunk_wq) {
817 ti->error = "Create chunk workqueue failed";
818 ret = -ENOMEM;
819 goto err_bio;
820 }
821
822
823 spin_lock_init(&dmz->flush_lock);
824 bio_list_init(&dmz->flush_list);
825 INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
826 dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
827 dev->name);
828 if (!dmz->flush_wq) {
829 ti->error = "Create flush workqueue failed";
830 ret = -ENOMEM;
831 goto err_cwq;
832 }
833 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
834
835
836 ret = dmz_ctr_reclaim(dev, dmz->metadata, &dmz->reclaim);
837 if (ret) {
838 ti->error = "Zone reclaim initialization failed";
839 goto err_fwq;
840 }
841
842 dmz_dev_info(dev, "Target device: %llu 512-byte logical sectors (%llu blocks)",
843 (unsigned long long)ti->len,
844 (unsigned long long)dmz_sect2blk(ti->len));
845
846 return 0;
847 err_fwq:
848 destroy_workqueue(dmz->flush_wq);
849 err_cwq:
850 destroy_workqueue(dmz->chunk_wq);
851 err_bio:
852 mutex_destroy(&dmz->chunk_lock);
853 bioset_exit(&dmz->bio_set);
854 err_meta:
855 dmz_dtr_metadata(dmz->metadata);
856 err_dev:
857 dmz_put_zoned_device(ti);
858 err:
859 kfree(dmz);
860
861 return ret;
862 }
863
864
865
866
867 static void dmz_dtr(struct dm_target *ti)
868 {
869 struct dmz_target *dmz = ti->private;
870
871 flush_workqueue(dmz->chunk_wq);
872 destroy_workqueue(dmz->chunk_wq);
873
874 dmz_dtr_reclaim(dmz->reclaim);
875
876 cancel_delayed_work_sync(&dmz->flush_work);
877 destroy_workqueue(dmz->flush_wq);
878
879 (void) dmz_flush_metadata(dmz->metadata);
880
881 dmz_dtr_metadata(dmz->metadata);
882
883 bioset_exit(&dmz->bio_set);
884
885 dmz_put_zoned_device(ti);
886
887 mutex_destroy(&dmz->chunk_lock);
888
889 kfree(dmz);
890 }
891
892
893
894
895 static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
896 {
897 struct dmz_target *dmz = ti->private;
898 unsigned int chunk_sectors = dmz->dev->zone_nr_sectors;
899
900 limits->logical_block_size = DMZ_BLOCK_SIZE;
901 limits->physical_block_size = DMZ_BLOCK_SIZE;
902
903 blk_limits_io_min(limits, DMZ_BLOCK_SIZE);
904 blk_limits_io_opt(limits, DMZ_BLOCK_SIZE);
905
906 limits->discard_alignment = DMZ_BLOCK_SIZE;
907 limits->discard_granularity = DMZ_BLOCK_SIZE;
908 limits->max_discard_sectors = chunk_sectors;
909 limits->max_hw_discard_sectors = chunk_sectors;
910 limits->max_write_zeroes_sectors = chunk_sectors;
911
912
913 limits->chunk_sectors = chunk_sectors;
914 limits->max_sectors = chunk_sectors;
915
916
917 limits->zoned = BLK_ZONED_NONE;
918 }
919
920
921
922
923 static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
924 {
925 struct dmz_target *dmz = ti->private;
926
927 if (!dmz_check_bdev(dmz->dev))
928 return -EIO;
929
930 *bdev = dmz->dev->bdev;
931
932 return 0;
933 }
934
935
936
937
938 static void dmz_suspend(struct dm_target *ti)
939 {
940 struct dmz_target *dmz = ti->private;
941
942 flush_workqueue(dmz->chunk_wq);
943 dmz_suspend_reclaim(dmz->reclaim);
944 cancel_delayed_work_sync(&dmz->flush_work);
945 }
946
947
948
949
950 static void dmz_resume(struct dm_target *ti)
951 {
952 struct dmz_target *dmz = ti->private;
953
954 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
955 dmz_resume_reclaim(dmz->reclaim);
956 }
957
958 static int dmz_iterate_devices(struct dm_target *ti,
959 iterate_devices_callout_fn fn, void *data)
960 {
961 struct dmz_target *dmz = ti->private;
962 struct dmz_dev *dev = dmz->dev;
963 sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
964
965 return fn(ti, dmz->ddev, 0, capacity, data);
966 }
967
968 static struct target_type dmz_type = {
969 .name = "zoned",
970 .version = {1, 0, 0},
971 .features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
972 .module = THIS_MODULE,
973 .ctr = dmz_ctr,
974 .dtr = dmz_dtr,
975 .map = dmz_map,
976 .io_hints = dmz_io_hints,
977 .prepare_ioctl = dmz_prepare_ioctl,
978 .postsuspend = dmz_suspend,
979 .resume = dmz_resume,
980 .iterate_devices = dmz_iterate_devices,
981 };
982
983 static int __init dmz_init(void)
984 {
985 return dm_register_target(&dmz_type);
986 }
987
988 static void __exit dmz_exit(void)
989 {
990 dm_unregister_target(&dmz_type);
991 }
992
993 module_init(dmz_init);
994 module_exit(dmz_exit);
995
996 MODULE_DESCRIPTION(DM_NAME " target for zoned block devices");
997 MODULE_AUTHOR("Damien Le Moal <damien.lemoal@wdc.com>");
998 MODULE_LICENSE("GPL");