Lines Matching refs:dc
226 struct cached_dev *dc = bio->bi_private; in write_bdev_super_endio() local
229 closure_put(&dc->sb_write); in write_bdev_super_endio()
269 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); in bch_write_bdev_super_unlock() local
271 up(&dc->sb_write_mutex); in bch_write_bdev_super_unlock()
274 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) in bch_write_bdev_super() argument
276 struct closure *cl = &dc->sb_write; in bch_write_bdev_super()
277 struct bio *bio = &dc->sb_bio; in bch_write_bdev_super()
279 down(&dc->sb_write_mutex); in bch_write_bdev_super()
283 bio->bi_bdev = dc->bdev; in bch_write_bdev_super()
285 bio->bi_private = dc; in bch_write_bdev_super()
288 __write_super(&dc->sb, bio); in bch_write_bdev_super()
860 struct cached_dev *dc; in calc_cached_dev_sectors() local
862 list_for_each_entry(dc, &c->cached_devs, list) in calc_cached_dev_sectors()
863 sectors += bdev_sectors(dc->bdev); in calc_cached_dev_sectors()
868 void bch_cached_dev_run(struct cached_dev *dc) in bch_cached_dev_run() argument
870 struct bcache_device *d = &dc->disk; in bch_cached_dev_run()
874 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), in bch_cached_dev_run()
879 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); in bch_cached_dev_run()
883 if (atomic_xchg(&dc->running, 1)) { in bch_cached_dev_run()
890 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { in bch_cached_dev_run()
894 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); in bch_cached_dev_run()
895 bch_write_bdev_super(dc, &cl); in bch_cached_dev_run()
900 bd_link_disk_holder(dc->bdev, dc->disk.disk); in bch_cached_dev_run()
914 struct cached_dev *dc = container_of(w, struct cached_dev, detach); in cached_dev_detach_finish() local
919 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); in cached_dev_detach_finish()
920 BUG_ON(atomic_read(&dc->count)); in cached_dev_detach_finish()
924 memset(&dc->sb.set_uuid, 0, 16); in cached_dev_detach_finish()
925 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); in cached_dev_detach_finish()
927 bch_write_bdev_super(dc, &cl); in cached_dev_detach_finish()
930 bcache_device_detach(&dc->disk); in cached_dev_detach_finish()
931 list_move(&dc->list, &uncached_devices); in cached_dev_detach_finish()
933 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); in cached_dev_detach_finish()
934 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); in cached_dev_detach_finish()
938 pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); in cached_dev_detach_finish()
941 closure_put(&dc->disk.cl); in cached_dev_detach_finish()
944 void bch_cached_dev_detach(struct cached_dev *dc) in bch_cached_dev_detach() argument
948 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) in bch_cached_dev_detach()
951 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) in bch_cached_dev_detach()
958 closure_get(&dc->disk.cl); in bch_cached_dev_detach()
960 bch_writeback_queue(dc); in bch_cached_dev_detach()
961 cached_dev_put(dc); in bch_cached_dev_detach()
964 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) in bch_cached_dev_attach() argument
970 bdevname(dc->bdev, buf); in bch_cached_dev_attach()
972 if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) in bch_cached_dev_attach()
975 if (dc->disk.c) { in bch_cached_dev_attach()
985 if (dc->sb.block_size < c->sb.block_size) { in bch_cached_dev_attach()
992 u = uuid_find(c, dc->sb.uuid); in bch_cached_dev_attach()
995 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || in bch_cached_dev_attach()
996 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { in bch_cached_dev_attach()
1003 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { in bch_cached_dev_attach()
1023 memcpy(u->uuid, dc->sb.uuid, 16); in bch_cached_dev_attach()
1024 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); in bch_cached_dev_attach()
1028 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); in bch_cached_dev_attach()
1029 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); in bch_cached_dev_attach()
1031 bch_write_bdev_super(dc, &cl); in bch_cached_dev_attach()
1038 bcache_device_attach(&dc->disk, c, u - c->uuids); in bch_cached_dev_attach()
1039 list_move(&dc->list, &c->cached_devs); in bch_cached_dev_attach()
1047 atomic_set(&dc->count, 1); in bch_cached_dev_attach()
1050 down_write(&dc->writeback_lock); in bch_cached_dev_attach()
1051 if (bch_cached_dev_writeback_start(dc)) { in bch_cached_dev_attach()
1052 up_write(&dc->writeback_lock); in bch_cached_dev_attach()
1056 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { in bch_cached_dev_attach()
1057 bch_sectors_dirty_init(dc); in bch_cached_dev_attach()
1058 atomic_set(&dc->has_dirty, 1); in bch_cached_dev_attach()
1059 atomic_inc(&dc->count); in bch_cached_dev_attach()
1060 bch_writeback_queue(dc); in bch_cached_dev_attach()
1063 bch_cached_dev_run(dc); in bch_cached_dev_attach()
1064 bcache_device_link(&dc->disk, c, "bdev"); in bch_cached_dev_attach()
1067 up_write(&dc->writeback_lock); in bch_cached_dev_attach()
1070 bdevname(dc->bdev, buf), dc->disk.disk->disk_name, in bch_cached_dev_attach()
1071 dc->disk.c->sb.set_uuid); in bch_cached_dev_attach()
1077 struct cached_dev *dc = container_of(kobj, struct cached_dev, in bch_cached_dev_release() local
1079 kfree(dc); in bch_cached_dev_release()
1085 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); in cached_dev_free() local
1087 cancel_delayed_work_sync(&dc->writeback_rate_update); in cached_dev_free()
1088 if (!IS_ERR_OR_NULL(dc->writeback_thread)) in cached_dev_free()
1089 kthread_stop(dc->writeback_thread); in cached_dev_free()
1093 if (atomic_read(&dc->running)) in cached_dev_free()
1094 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); in cached_dev_free()
1095 bcache_device_free(&dc->disk); in cached_dev_free()
1096 list_del(&dc->list); in cached_dev_free()
1100 if (!IS_ERR_OR_NULL(dc->bdev)) in cached_dev_free()
1101 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); in cached_dev_free()
1105 kobject_put(&dc->disk.kobj); in cached_dev_free()
1110 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); in cached_dev_flush() local
1111 struct bcache_device *d = &dc->disk; in cached_dev_flush()
1117 bch_cache_accounting_destroy(&dc->accounting); in cached_dev_flush()
1123 static int cached_dev_init(struct cached_dev *dc, unsigned block_size) in cached_dev_init() argument
1127 struct request_queue *q = bdev_get_queue(dc->bdev); in cached_dev_init()
1130 INIT_LIST_HEAD(&dc->list); in cached_dev_init()
1131 closure_init(&dc->disk.cl, NULL); in cached_dev_init()
1132 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); in cached_dev_init()
1133 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); in cached_dev_init()
1134 INIT_WORK(&dc->detach, cached_dev_detach_finish); in cached_dev_init()
1135 sema_init(&dc->sb_write_mutex, 1); in cached_dev_init()
1136 INIT_LIST_HEAD(&dc->io_lru); in cached_dev_init()
1137 spin_lock_init(&dc->io_lock); in cached_dev_init()
1138 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); in cached_dev_init()
1140 dc->sequential_cutoff = 4 << 20; in cached_dev_init()
1142 for (io = dc->io; io < dc->io + RECENT_IO; io++) { in cached_dev_init()
1143 list_add(&io->lru, &dc->io_lru); in cached_dev_init()
1144 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); in cached_dev_init()
1147 dc->disk.stripe_size = q->limits.io_opt >> 9; in cached_dev_init()
1149 if (dc->disk.stripe_size) in cached_dev_init()
1150 dc->partial_stripes_expensive = in cached_dev_init()
1153 ret = bcache_device_init(&dc->disk, block_size, in cached_dev_init()
1154 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); in cached_dev_init()
1158 set_capacity(dc->disk.disk, in cached_dev_init()
1159 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); in cached_dev_init()
1161 dc->disk.disk->queue->backing_dev_info.ra_pages = in cached_dev_init()
1162 max(dc->disk.disk->queue->backing_dev_info.ra_pages, in cached_dev_init()
1165 bch_cached_dev_request_init(dc); in cached_dev_init()
1166 bch_cached_dev_writeback_init(dc); in cached_dev_init()
1174 struct cached_dev *dc) in register_bdev() argument
1180 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); in register_bdev()
1181 dc->bdev = bdev; in register_bdev()
1182 dc->bdev->bd_holder = dc; in register_bdev()
1184 bio_init(&dc->sb_bio); in register_bdev()
1185 dc->sb_bio.bi_max_vecs = 1; in register_bdev()
1186 dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; in register_bdev()
1187 dc->sb_bio.bi_io_vec[0].bv_page = sb_page; in register_bdev()
1190 if (cached_dev_init(dc, sb->block_size << 9)) in register_bdev()
1194 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, in register_bdev()
1197 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) in register_bdev()
1202 list_add(&dc->list, &uncached_devices); in register_bdev()
1204 bch_cached_dev_attach(dc, c); in register_bdev()
1206 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || in register_bdev()
1207 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) in register_bdev()
1208 bch_cached_dev_run(dc); in register_bdev()
1213 bcache_device_stop(&dc->disk); in register_bdev()
1445 struct cached_dev *dc; in __cache_set_unregister() local
1454 dc = container_of(c->devices[i], in __cache_set_unregister()
1456 bch_cached_dev_detach(dc); in __cache_set_unregister()
1572 struct cached_dev *dc, *t; in run_cache_set() local
1722 list_for_each_entry_safe(dc, t, &uncached_devices, list) in run_cache_set()
1723 bch_cached_dev_attach(dc, c); in run_cache_set()
1936 struct cached_dev *dc, *t; in bch_is_open_backing() local
1939 list_for_each_entry_safe(dc, t, &c->cached_devs, list) in bch_is_open_backing()
1940 if (dc->bdev == bdev) in bch_is_open_backing()
1942 list_for_each_entry_safe(dc, t, &uncached_devices, list) in bch_is_open_backing()
1943 if (dc->bdev == bdev) in bch_is_open_backing()
2009 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); in register_bcache() local
2010 if (!dc) in register_bcache()
2014 register_bdev(sb, sb_page, bdev, dc); in register_bcache()
2050 struct cached_dev *dc, *tdc; in bcache_reboot() local
2063 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) in bcache_reboot()
2064 bcache_device_stop(&dc->disk); in bcache_reboot()