dmz 117 drivers/md/dm-zoned-target.c static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, dmz 124 drivers/md/dm-zoned-target.c clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set); dmz 128 drivers/md/dm-zoned-target.c bio_set_dev(clone, dmz->dev->bdev); dmz 130 drivers/md/dm-zoned-target.c dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); dmz 149 drivers/md/dm-zoned-target.c static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio, dmz 165 drivers/md/dm-zoned-target.c static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone, dmz 168 drivers/md/dm-zoned-target.c sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio)); dmz 180 drivers/md/dm-zoned-target.c dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks", dmz 181 drivers/md/dm-zoned-target.c (unsigned long long)dmz_bio_chunk(dmz->dev, bio), dmz 183 drivers/md/dm-zoned-target.c dmz_id(dmz->metadata, zone), dmz 192 drivers/md/dm-zoned-target.c ret = dmz_block_valid(dmz->metadata, zone, chunk_block); dmz 207 drivers/md/dm-zoned-target.c ret = dmz_block_valid(dmz->metadata, bzone, chunk_block); dmz 220 drivers/md/dm-zoned-target.c ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks); dmz 226 drivers/md/dm-zoned-target.c dmz_handle_read_zero(dmz, bio, chunk_block, 1); dmz 239 drivers/md/dm-zoned-target.c static int dmz_handle_direct_write(struct dmz_target *dmz, dmz 244 drivers/md/dm-zoned-target.c struct dmz_metadata *zmd = dmz->metadata; dmz 252 drivers/md/dm-zoned-target.c ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks); dmz 272 drivers/md/dm-zoned-target.c static int dmz_handle_buffered_write(struct dmz_target *dmz, dmz 277 drivers/md/dm-zoned-target.c struct dmz_metadata *zmd = dmz->metadata; dmz 290 drivers/md/dm-zoned-target.c ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks); dmz 308 drivers/md/dm-zoned-target.c static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone, dmz 311 drivers/md/dm-zoned-target.c sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio)); dmz 317 drivers/md/dm-zoned-target.c dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks", dmz 318 drivers/md/dm-zoned-target.c (unsigned long long)dmz_bio_chunk(dmz->dev, bio), dmz 320 drivers/md/dm-zoned-target.c dmz_id(dmz->metadata, zone), dmz 329 drivers/md/dm-zoned-target.c return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks); dmz 336 drivers/md/dm-zoned-target.c return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks); dmz 342 drivers/md/dm-zoned-target.c static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone, dmz 345 drivers/md/dm-zoned-target.c struct dmz_metadata *zmd = dmz->metadata; dmz 348 drivers/md/dm-zoned-target.c sector_t chunk_block = dmz_chunk_block(dmz->dev, block); dmz 358 drivers/md/dm-zoned-target.c dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks", dmz 359 drivers/md/dm-zoned-target.c (unsigned long long)dmz_bio_chunk(dmz->dev, bio), dmz 378 drivers/md/dm-zoned-target.c static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw, dmz 382 drivers/md/dm-zoned-target.c struct dmz_metadata *zmd = dmz->metadata; dmz 391 drivers/md/dm-zoned-target.c dmz_schedule_reclaim(dmz->reclaim); dmz 395 drivers/md/dm-zoned-target.c if (dmz->dev->flags & DMZ_BDEV_DYING) { dmz 405 drivers/md/dm-zoned-target.c zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio), dmz 420 drivers/md/dm-zoned-target.c ret = dmz_handle_read(dmz, zone, bio); dmz 423 drivers/md/dm-zoned-target.c ret = dmz_handle_write(dmz, zone, bio); dmz 427 drivers/md/dm-zoned-target.c ret = dmz_handle_discard(dmz, zone, bio); dmz 430 drivers/md/dm-zoned-target.c dmz_dev_err(dmz->dev, "Unsupported BIO operation 0x%x", dmz 474 drivers/md/dm-zoned-target.c struct dmz_target *dmz = cw->target; dmz 477 drivers/md/dm-zoned-target.c mutex_lock(&dmz->chunk_lock); dmz 481 drivers/md/dm-zoned-target.c mutex_unlock(&dmz->chunk_lock); dmz 482 drivers/md/dm-zoned-target.c dmz_handle_bio(dmz, cw, bio); dmz 483 drivers/md/dm-zoned-target.c mutex_lock(&dmz->chunk_lock); dmz 490 drivers/md/dm-zoned-target.c mutex_unlock(&dmz->chunk_lock); dmz 498 drivers/md/dm-zoned-target.c struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work); dmz 503 drivers/md/dm-zoned-target.c ret = dmz_flush_metadata(dmz->metadata); dmz 505 drivers/md/dm-zoned-target.c dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret); dmz 509 drivers/md/dm-zoned-target.c spin_lock(&dmz->flush_lock); dmz 510 drivers/md/dm-zoned-target.c bio = bio_list_pop(&dmz->flush_list); dmz 511 drivers/md/dm-zoned-target.c spin_unlock(&dmz->flush_lock); dmz 519 drivers/md/dm-zoned-target.c queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD); dmz 526 drivers/md/dm-zoned-target.c static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) dmz 528 drivers/md/dm-zoned-target.c unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); dmz 532 drivers/md/dm-zoned-target.c mutex_lock(&dmz->chunk_lock); dmz 535 drivers/md/dm-zoned-target.c cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); dmz 548 drivers/md/dm-zoned-target.c cw->target = dmz; dmz 552 drivers/md/dm-zoned-target.c ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); dmz 561 drivers/md/dm-zoned-target.c dmz_reclaim_bio_acc(dmz->reclaim); dmz 562 drivers/md/dm-zoned-target.c if (queue_work(dmz->chunk_wq, &cw->work)) dmz 565 drivers/md/dm-zoned-target.c mutex_unlock(&dmz->chunk_lock); dmz 620 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; dmz 621 drivers/md/dm-zoned-target.c struct dmz_dev *dev = dmz->dev; dmz 628 drivers/md/dm-zoned-target.c if (dmz_bdev_is_dying(dmz->dev)) dmz 633 drivers/md/dm-zoned-target.c (unsigned long long)dmz_bio_chunk(dmz->dev, bio), dmz 634 drivers/md/dm-zoned-target.c (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)), dmz 647 drivers/md/dm-zoned-target.c bioctx->target = dmz; dmz 654 drivers/md/dm-zoned-target.c spin_lock(&dmz->flush_lock); dmz 655 drivers/md/dm-zoned-target.c bio_list_add(&dmz->flush_list, bio); dmz 656 drivers/md/dm-zoned-target.c spin_unlock(&dmz->flush_lock); dmz 657 drivers/md/dm-zoned-target.c mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0); dmz 667 drivers/md/dm-zoned-target.c ret = dmz_queue_chunk_work(dmz, bio); dmz 669 drivers/md/dm-zoned-target.c dmz_dev_debug(dmz->dev, dmz 671 drivers/md/dm-zoned-target.c bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio), dmz 684 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; dmz 691 drivers/md/dm-zoned-target.c ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev); dmz 694 drivers/md/dm-zoned-target.c dmz->ddev = NULL; dmz 704 drivers/md/dm-zoned-target.c dev->bdev = dmz->ddev->bdev; dmz 732 drivers/md/dm-zoned-target.c dmz->dev = dev; dmz 736 drivers/md/dm-zoned-target.c dm_put_device(ti, dmz->ddev); dmz 747 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; dmz 749 drivers/md/dm-zoned-target.c dm_put_device(ti, dmz->ddev); dmz 750 drivers/md/dm-zoned-target.c kfree(dmz->dev); dmz 751 drivers/md/dm-zoned-target.c dmz->dev = NULL; dmz 759 drivers/md/dm-zoned-target.c struct dmz_target *dmz; dmz 770 drivers/md/dm-zoned-target.c dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL); dmz 771 drivers/md/dm-zoned-target.c if (!dmz) { dmz 775 drivers/md/dm-zoned-target.c ti->private = dmz; dmz 780 drivers/md/dm-zoned-target.c dmz->ddev = NULL; dmz 785 drivers/md/dm-zoned-target.c dev = dmz->dev; dmz 786 drivers/md/dm-zoned-target.c ret = dmz_ctr_metadata(dev, &dmz->metadata); dmz 802 drivers/md/dm-zoned-target.c ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift; dmz 805 drivers/md/dm-zoned-target.c ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0); dmz 812 drivers/md/dm-zoned-target.c mutex_init(&dmz->chunk_lock); dmz 813 drivers/md/dm-zoned-target.c INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO); dmz 814 drivers/md/dm-zoned-target.c dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, dmz 816 drivers/md/dm-zoned-target.c if (!dmz->chunk_wq) { dmz 823 drivers/md/dm-zoned-target.c spin_lock_init(&dmz->flush_lock); dmz 824 drivers/md/dm-zoned-target.c bio_list_init(&dmz->flush_list); dmz 825 drivers/md/dm-zoned-target.c INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work); dmz 826 drivers/md/dm-zoned-target.c dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM, dmz 828 drivers/md/dm-zoned-target.c if (!dmz->flush_wq) { dmz 833 drivers/md/dm-zoned-target.c mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD); dmz 836 drivers/md/dm-zoned-target.c ret = dmz_ctr_reclaim(dev, dmz->metadata, &dmz->reclaim); dmz 848 drivers/md/dm-zoned-target.c destroy_workqueue(dmz->flush_wq); dmz 850 drivers/md/dm-zoned-target.c destroy_workqueue(dmz->chunk_wq); dmz 852 drivers/md/dm-zoned-target.c mutex_destroy(&dmz->chunk_lock); dmz 853 drivers/md/dm-zoned-target.c bioset_exit(&dmz->bio_set); dmz 855 drivers/md/dm-zoned-target.c dmz_dtr_metadata(dmz->metadata); dmz 859 drivers/md/dm-zoned-target.c kfree(dmz); dmz 869 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; dmz 871 drivers/md/dm-zoned-target.c flush_workqueue(dmz->chunk_wq); dmz 872 drivers/md/dm-zoned-target.c destroy_workqueue(dmz->chunk_wq); dmz 874 drivers/md/dm-zoned-target.c dmz_dtr_reclaim(dmz->reclaim); dmz 876 drivers/md/dm-zoned-target.c cancel_delayed_work_sync(&dmz->flush_work); dmz 877 drivers/md/dm-zoned-target.c destroy_workqueue(dmz->flush_wq); dmz 879 drivers/md/dm-zoned-target.c (void) dmz_flush_metadata(dmz->metadata); dmz 881 drivers/md/dm-zoned-target.c dmz_dtr_metadata(dmz->metadata); dmz 883 drivers/md/dm-zoned-target.c bioset_exit(&dmz->bio_set); dmz 887 drivers/md/dm-zoned-target.c mutex_destroy(&dmz->chunk_lock); dmz 889 drivers/md/dm-zoned-target.c kfree(dmz); dmz 897 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; dmz 898 drivers/md/dm-zoned-target.c unsigned int chunk_sectors = dmz->dev->zone_nr_sectors; dmz 925 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; dmz 927 drivers/md/dm-zoned-target.c if (!dmz_check_bdev(dmz->dev)) dmz 930 drivers/md/dm-zoned-target.c *bdev = dmz->dev->bdev; dmz 940 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; dmz 942 drivers/md/dm-zoned-target.c flush_workqueue(dmz->chunk_wq); dmz 943 drivers/md/dm-zoned-target.c dmz_suspend_reclaim(dmz->reclaim); dmz 944 drivers/md/dm-zoned-target.c cancel_delayed_work_sync(&dmz->flush_work); dmz 952 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; dmz 954 drivers/md/dm-zoned-target.c queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD); dmz 955 drivers/md/dm-zoned-target.c dmz_resume_reclaim(dmz->reclaim); dmz 961 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; dmz 962 drivers/md/dm-zoned-target.c struct dmz_dev *dev = dmz->dev; dmz 965 drivers/md/dm-zoned-target.c return fn(ti, dmz->ddev, 0, capacity, data);