Lines Matching refs:dc
28 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio) in cache_mode() argument
30 return BDEV_CACHE_MODE(&dc->sb); in cache_mode()
33 static bool verify(struct cached_dev *dc, struct bio *bio) in verify() argument
35 return dc->verify; in verify()
360 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) in iohash() argument
362 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; in iohash()
365 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) in check_should_bypass() argument
367 struct cache_set *c = dc->disk.c; in check_should_bypass()
368 unsigned mode = cache_mode(dc, bio); in check_should_bypass()
373 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || in check_should_bypass()
389 if (bypass_torture_test(dc)) { in check_should_bypass()
396 if (!congested && !dc->sequential_cutoff) in check_should_bypass()
405 spin_lock(&dc->io_lock); in check_should_bypass()
407 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) in check_should_bypass()
412 i = list_first_entry(&dc->io_lru, struct io, lru); in check_should_bypass()
425 hlist_add_head(&i->hash, iohash(dc, i->last)); in check_should_bypass()
426 list_move_tail(&i->lru, &dc->io_lru); in check_should_bypass()
428 spin_unlock(&dc->io_lock); in check_should_bypass()
433 if (dc->sequential_cutoff && in check_should_bypass()
434 sectors >= dc->sequential_cutoff >> 9) { in check_should_bypass()
448 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); in check_should_bypass()
673 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_bio_complete() local
676 cached_dev_put(dc); in cached_dev_bio_complete()
722 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_read_done() local
745 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data) in cached_dev_read_done()
746 bch_data_verify(dc, s->orig_bio); in cached_dev_read_done()
762 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_read_done_bh() local
770 else if (s->iop.bio || verify(dc, &s->bio.bio)) in cached_dev_read_done_bh()
781 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_cache_miss() local
793 reada = min_t(sector_t, dc->readahead >> 9, in cached_dev_cache_miss()
815 dc->disk.bio_split); in cached_dev_cache_miss()
848 static void cached_dev_read(struct cached_dev *dc, struct search *s) in cached_dev_read() argument
861 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_write_complete() local
863 up_read_non_owner(&dc->writeback_lock); in cached_dev_write_complete()
867 static void cached_dev_write(struct cached_dev *dc, struct search *s) in cached_dev_write() argument
871 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); in cached_dev_write()
872 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); in cached_dev_write()
876 down_read_non_owner(&dc->writeback_lock); in cached_dev_write()
877 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { in cached_dev_write()
896 if (should_writeback(dc, s->orig_bio, in cached_dev_write()
897 cache_mode(dc, bio), in cached_dev_write()
908 blk_queue_discard(bdev_get_queue(dc->bdev))) in cached_dev_write()
911 bch_writeback_add(dc); in cached_dev_write()
917 dc->disk.bio_split); in cached_dev_write()
927 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); in cached_dev_write()
956 struct cached_dev *dc = container_of(d, struct cached_dev, disk); in cached_dev_make_request() local
961 bio->bi_bdev = dc->bdev; in cached_dev_make_request()
962 bio->bi_iter.bi_sector += dc->sb.data_offset; in cached_dev_make_request()
964 if (cached_dev_get(dc)) { in cached_dev_make_request()
977 s->iop.bypass = check_should_bypass(dc, bio); in cached_dev_make_request()
980 cached_dev_write(dc, s); in cached_dev_make_request()
982 cached_dev_read(dc, s); in cached_dev_make_request()
986 !blk_queue_discard(bdev_get_queue(dc->bdev))) in cached_dev_make_request()
996 struct cached_dev *dc = container_of(d, struct cached_dev, disk); in cached_dev_ioctl() local
997 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); in cached_dev_ioctl()
1003 struct cached_dev *dc = container_of(d, struct cached_dev, disk); in cached_dev_congested() local
1004 struct request_queue *q = bdev_get_queue(dc->bdev); in cached_dev_congested()
1010 if (cached_dev_get(dc)) { in cached_dev_congested()
1019 cached_dev_put(dc); in cached_dev_congested()
1025 void bch_cached_dev_request_init(struct cached_dev *dc) in bch_cached_dev_request_init() argument
1027 struct gendisk *g = dc->disk.disk; in bch_cached_dev_request_init()
1031 dc->disk.cache_miss = cached_dev_cache_miss; in bch_cached_dev_request_init()
1032 dc->disk.ioctl = cached_dev_ioctl; in bch_cached_dev_request_init()