Lines Matching refs:dc

29 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)  in cache_mode()  argument
31 return BDEV_CACHE_MODE(&dc->sb); in cache_mode()
34 static bool verify(struct cached_dev *dc, struct bio *bio) in verify() argument
36 return dc->verify; in verify()
366 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) in iohash() argument
368 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; in iohash()
371 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) in check_should_bypass() argument
373 struct cache_set *c = dc->disk.c; in check_should_bypass()
374 unsigned mode = cache_mode(dc, bio); in check_should_bypass()
379 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || in check_should_bypass()
395 if (bypass_torture_test(dc)) { in check_should_bypass()
402 if (!congested && !dc->sequential_cutoff) in check_should_bypass()
411 spin_lock(&dc->io_lock); in check_should_bypass()
413 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) in check_should_bypass()
418 i = list_first_entry(&dc->io_lru, struct io, lru); in check_should_bypass()
431 hlist_add_head(&i->hash, iohash(dc, i->last)); in check_should_bypass()
432 list_move_tail(&i->lru, &dc->io_lru); in check_should_bypass()
434 spin_unlock(&dc->io_lock); in check_should_bypass()
439 if (dc->sequential_cutoff && in check_should_bypass()
440 sectors >= dc->sequential_cutoff >> 9) { in check_should_bypass()
454 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); in check_should_bypass()
682 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_bio_complete() local
685 cached_dev_put(dc); in cached_dev_bio_complete()
731 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_read_done() local
754 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data) in cached_dev_read_done()
755 bch_data_verify(dc, s->orig_bio); in cached_dev_read_done()
771 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_read_done_bh() local
779 else if (s->iop.bio || verify(dc, &s->bio.bio)) in cached_dev_read_done_bh()
790 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_cache_miss() local
802 reada = min_t(sector_t, dc->readahead >> 9, in cached_dev_cache_miss()
824 dc->disk.bio_split); in cached_dev_cache_miss()
857 static void cached_dev_read(struct cached_dev *dc, struct search *s) in cached_dev_read() argument
870 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_write_complete() local
872 up_read_non_owner(&dc->writeback_lock); in cached_dev_write_complete()
876 static void cached_dev_write(struct cached_dev *dc, struct search *s) in cached_dev_write() argument
880 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); in cached_dev_write()
881 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); in cached_dev_write()
885 down_read_non_owner(&dc->writeback_lock); in cached_dev_write()
886 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { in cached_dev_write()
905 if (should_writeback(dc, s->orig_bio, in cached_dev_write()
906 cache_mode(dc, bio), in cached_dev_write()
917 blk_queue_discard(bdev_get_queue(dc->bdev))) in cached_dev_write()
920 bch_writeback_add(dc); in cached_dev_write()
926 dc->disk.bio_split); in cached_dev_write()
936 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); in cached_dev_write()
966 struct cached_dev *dc = container_of(d, struct cached_dev, disk); in cached_dev_make_request() local
971 bio->bi_bdev = dc->bdev; in cached_dev_make_request()
972 bio->bi_iter.bi_sector += dc->sb.data_offset; in cached_dev_make_request()
974 if (cached_dev_get(dc)) { in cached_dev_make_request()
987 s->iop.bypass = check_should_bypass(dc, bio); in cached_dev_make_request()
990 cached_dev_write(dc, s); in cached_dev_make_request()
992 cached_dev_read(dc, s); in cached_dev_make_request()
996 !blk_queue_discard(bdev_get_queue(dc->bdev))) in cached_dev_make_request()
1008 struct cached_dev *dc = container_of(d, struct cached_dev, disk); in cached_dev_ioctl() local
1009 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); in cached_dev_ioctl()
1015 struct cached_dev *dc = container_of(d, struct cached_dev, disk); in cached_dev_congested() local
1016 struct request_queue *q = bdev_get_queue(dc->bdev); in cached_dev_congested()
1022 if (cached_dev_get(dc)) { in cached_dev_congested()
1031 cached_dev_put(dc); in cached_dev_congested()
1037 void bch_cached_dev_request_init(struct cached_dev *dc) in bch_cached_dev_request_init() argument
1039 struct gendisk *g = dc->disk.disk; in bch_cached_dev_request_init()
1043 dc->disk.cache_miss = cached_dev_cache_miss; in bch_cached_dev_request_init()
1044 dc->disk.ioctl = cached_dev_ioctl; in bch_cached_dev_request_init()