Lines Matching refs:dc
203 struct cached_dev *dc = bio->bi_private; in write_bdev_super_endio() local
206 closure_put(&dc->sb_write); in write_bdev_super_endio()
246 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); in bch_write_bdev_super_unlock() local
248 up(&dc->sb_write_mutex); in bch_write_bdev_super_unlock()
251 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) in bch_write_bdev_super() argument
253 struct closure *cl = &dc->sb_write; in bch_write_bdev_super()
254 struct bio *bio = &dc->sb_bio; in bch_write_bdev_super()
256 down(&dc->sb_write_mutex); in bch_write_bdev_super()
260 bio->bi_bdev = dc->bdev; in bch_write_bdev_super()
262 bio->bi_private = dc; in bch_write_bdev_super()
265 __write_super(&dc->sb, bio); in bch_write_bdev_super()
829 struct cached_dev *dc; in calc_cached_dev_sectors() local
831 list_for_each_entry(dc, &c->cached_devs, list) in calc_cached_dev_sectors()
832 sectors += bdev_sectors(dc->bdev); in calc_cached_dev_sectors()
837 void bch_cached_dev_run(struct cached_dev *dc) in bch_cached_dev_run() argument
839 struct bcache_device *d = &dc->disk; in bch_cached_dev_run()
843 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), in bch_cached_dev_run()
848 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); in bch_cached_dev_run()
852 if (atomic_xchg(&dc->running, 1)) { in bch_cached_dev_run()
859 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { in bch_cached_dev_run()
863 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); in bch_cached_dev_run()
864 bch_write_bdev_super(dc, &cl); in bch_cached_dev_run()
869 bd_link_disk_holder(dc->bdev, dc->disk.disk); in bch_cached_dev_run()
883 struct cached_dev *dc = container_of(w, struct cached_dev, detach); in cached_dev_detach_finish() local
888 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); in cached_dev_detach_finish()
889 BUG_ON(atomic_read(&dc->count)); in cached_dev_detach_finish()
893 memset(&dc->sb.set_uuid, 0, 16); in cached_dev_detach_finish()
894 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); in cached_dev_detach_finish()
896 bch_write_bdev_super(dc, &cl); in cached_dev_detach_finish()
899 bcache_device_detach(&dc->disk); in cached_dev_detach_finish()
900 list_move(&dc->list, &uncached_devices); in cached_dev_detach_finish()
902 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); in cached_dev_detach_finish()
903 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); in cached_dev_detach_finish()
907 pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); in cached_dev_detach_finish()
910 closure_put(&dc->disk.cl); in cached_dev_detach_finish()
913 void bch_cached_dev_detach(struct cached_dev *dc) in bch_cached_dev_detach() argument
917 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) in bch_cached_dev_detach()
920 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) in bch_cached_dev_detach()
927 closure_get(&dc->disk.cl); in bch_cached_dev_detach()
929 bch_writeback_queue(dc); in bch_cached_dev_detach()
930 cached_dev_put(dc); in bch_cached_dev_detach()
933 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) in bch_cached_dev_attach() argument
939 bdevname(dc->bdev, buf); in bch_cached_dev_attach()
941 if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) in bch_cached_dev_attach()
944 if (dc->disk.c) { in bch_cached_dev_attach()
954 if (dc->sb.block_size < c->sb.block_size) { in bch_cached_dev_attach()
961 u = uuid_find(c, dc->sb.uuid); in bch_cached_dev_attach()
964 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || in bch_cached_dev_attach()
965 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { in bch_cached_dev_attach()
972 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { in bch_cached_dev_attach()
992 memcpy(u->uuid, dc->sb.uuid, 16); in bch_cached_dev_attach()
993 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); in bch_cached_dev_attach()
997 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); in bch_cached_dev_attach()
998 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); in bch_cached_dev_attach()
1000 bch_write_bdev_super(dc, &cl); in bch_cached_dev_attach()
1007 bcache_device_attach(&dc->disk, c, u - c->uuids); in bch_cached_dev_attach()
1008 list_move(&dc->list, &c->cached_devs); in bch_cached_dev_attach()
1016 atomic_set(&dc->count, 1); in bch_cached_dev_attach()
1019 down_write(&dc->writeback_lock); in bch_cached_dev_attach()
1020 if (bch_cached_dev_writeback_start(dc)) { in bch_cached_dev_attach()
1021 up_write(&dc->writeback_lock); in bch_cached_dev_attach()
1025 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { in bch_cached_dev_attach()
1026 bch_sectors_dirty_init(dc); in bch_cached_dev_attach()
1027 atomic_set(&dc->has_dirty, 1); in bch_cached_dev_attach()
1028 atomic_inc(&dc->count); in bch_cached_dev_attach()
1029 bch_writeback_queue(dc); in bch_cached_dev_attach()
1032 bch_cached_dev_run(dc); in bch_cached_dev_attach()
1033 bcache_device_link(&dc->disk, c, "bdev"); in bch_cached_dev_attach()
1036 up_write(&dc->writeback_lock); in bch_cached_dev_attach()
1039 bdevname(dc->bdev, buf), dc->disk.disk->disk_name, in bch_cached_dev_attach()
1040 dc->disk.c->sb.set_uuid); in bch_cached_dev_attach()
1046 struct cached_dev *dc = container_of(kobj, struct cached_dev, in bch_cached_dev_release() local
1048 kfree(dc); in bch_cached_dev_release()
1054 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); in cached_dev_free() local
1056 cancel_delayed_work_sync(&dc->writeback_rate_update); in cached_dev_free()
1057 if (!IS_ERR_OR_NULL(dc->writeback_thread)) in cached_dev_free()
1058 kthread_stop(dc->writeback_thread); in cached_dev_free()
1062 if (atomic_read(&dc->running)) in cached_dev_free()
1063 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); in cached_dev_free()
1064 bcache_device_free(&dc->disk); in cached_dev_free()
1065 list_del(&dc->list); in cached_dev_free()
1069 if (!IS_ERR_OR_NULL(dc->bdev)) in cached_dev_free()
1070 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); in cached_dev_free()
1074 kobject_put(&dc->disk.kobj); in cached_dev_free()
1079 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); in cached_dev_flush() local
1080 struct bcache_device *d = &dc->disk; in cached_dev_flush()
1086 bch_cache_accounting_destroy(&dc->accounting); in cached_dev_flush()
1092 static int cached_dev_init(struct cached_dev *dc, unsigned block_size) in cached_dev_init() argument
1096 struct request_queue *q = bdev_get_queue(dc->bdev); in cached_dev_init()
1099 INIT_LIST_HEAD(&dc->list); in cached_dev_init()
1100 closure_init(&dc->disk.cl, NULL); in cached_dev_init()
1101 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); in cached_dev_init()
1102 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); in cached_dev_init()
1103 INIT_WORK(&dc->detach, cached_dev_detach_finish); in cached_dev_init()
1104 sema_init(&dc->sb_write_mutex, 1); in cached_dev_init()
1105 INIT_LIST_HEAD(&dc->io_lru); in cached_dev_init()
1106 spin_lock_init(&dc->io_lock); in cached_dev_init()
1107 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); in cached_dev_init()
1109 dc->sequential_cutoff = 4 << 20; in cached_dev_init()
1111 for (io = dc->io; io < dc->io + RECENT_IO; io++) { in cached_dev_init()
1112 list_add(&io->lru, &dc->io_lru); in cached_dev_init()
1113 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); in cached_dev_init()
1116 dc->disk.stripe_size = q->limits.io_opt >> 9; in cached_dev_init()
1118 if (dc->disk.stripe_size) in cached_dev_init()
1119 dc->partial_stripes_expensive = in cached_dev_init()
1122 ret = bcache_device_init(&dc->disk, block_size, in cached_dev_init()
1123 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); in cached_dev_init()
1127 set_capacity(dc->disk.disk, in cached_dev_init()
1128 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); in cached_dev_init()
1130 dc->disk.disk->queue->backing_dev_info.ra_pages = in cached_dev_init()
1131 max(dc->disk.disk->queue->backing_dev_info.ra_pages, in cached_dev_init()
1134 bch_cached_dev_request_init(dc); in cached_dev_init()
1135 bch_cached_dev_writeback_init(dc); in cached_dev_init()
1143 struct cached_dev *dc) in register_bdev() argument
1149 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); in register_bdev()
1150 dc->bdev = bdev; in register_bdev()
1151 dc->bdev->bd_holder = dc; in register_bdev()
1153 bio_init(&dc->sb_bio); in register_bdev()
1154 dc->sb_bio.bi_max_vecs = 1; in register_bdev()
1155 dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; in register_bdev()
1156 dc->sb_bio.bi_io_vec[0].bv_page = sb_page; in register_bdev()
1159 if (cached_dev_init(dc, sb->block_size << 9)) in register_bdev()
1163 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, in register_bdev()
1166 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) in register_bdev()
1171 list_add(&dc->list, &uncached_devices); in register_bdev()
1173 bch_cached_dev_attach(dc, c); in register_bdev()
1175 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || in register_bdev()
1176 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) in register_bdev()
1177 bch_cached_dev_run(dc); in register_bdev()
1182 bcache_device_stop(&dc->disk); in register_bdev()
1414 struct cached_dev *dc; in __cache_set_unregister() local
1423 dc = container_of(c->devices[i], in __cache_set_unregister()
1425 bch_cached_dev_detach(dc); in __cache_set_unregister()
1541 struct cached_dev *dc, *t; in run_cache_set() local
1691 list_for_each_entry_safe(dc, t, &uncached_devices, list) in run_cache_set()
1692 bch_cached_dev_attach(dc, c); in run_cache_set()
1902 struct cached_dev *dc, *t; in bch_is_open_backing() local
1905 list_for_each_entry_safe(dc, t, &c->cached_devs, list) in bch_is_open_backing()
1906 if (dc->bdev == bdev) in bch_is_open_backing()
1908 list_for_each_entry_safe(dc, t, &uncached_devices, list) in bch_is_open_backing()
1909 if (dc->bdev == bdev) in bch_is_open_backing()
1975 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); in register_bcache() local
1976 if (!dc) in register_bcache()
1980 register_bdev(sb, sb_page, bdev, dc); in register_bcache()
2016 struct cached_dev *dc, *tdc; in bcache_reboot() local
2029 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) in bcache_reboot()
2030 bcache_device_stop(&dc->disk); in bcache_reboot()