Lines Matching refs:bio

124 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,  in dm_hook_bio()  argument
127 h->bi_end_io = bio->bi_end_io; in dm_hook_bio()
128 h->bi_private = bio->bi_private; in dm_hook_bio()
130 bio->bi_end_io = bi_end_io; in dm_hook_bio()
131 bio->bi_private = bi_private; in dm_hook_bio()
134 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) in dm_unhook_bio() argument
136 bio->bi_end_io = h->bi_end_io; in dm_unhook_bio()
137 bio->bi_private = h->bi_private; in dm_unhook_bio()
529 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, in bio_detain_range() argument
537 r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result); in bio_detain_range()
545 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, in bio_detain() argument
550 return bio_detain_range(cache, oblock, end, bio, in bio_detain()
742 static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) in get_per_bio_data() argument
744 struct per_bio_data *pb = dm_per_bio_data(bio, data_size); in get_per_bio_data()
749 static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) in init_per_bio_data() argument
751 struct per_bio_data *pb = get_per_bio_data(bio, data_size); in init_per_bio_data()
754 pb->req_nr = dm_bio_get_target_bio_nr(bio); in init_per_bio_data()
764 static void remap_to_origin(struct cache *cache, struct bio *bio) in remap_to_origin() argument
766 bio->bi_bdev = cache->origin_dev->bdev; in remap_to_origin()
769 static void remap_to_cache(struct cache *cache, struct bio *bio, in remap_to_cache() argument
772 sector_t bi_sector = bio->bi_iter.bi_sector; in remap_to_cache()
775 bio->bi_bdev = cache->cache_dev->bdev; in remap_to_cache()
777 bio->bi_iter.bi_sector = in remap_to_cache()
781 bio->bi_iter.bi_sector = in remap_to_cache()
786 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) in check_if_tick_bio_needed() argument
790 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); in check_if_tick_bio_needed()
794 !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) { in check_if_tick_bio_needed()
801 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, in remap_to_origin_clear_discard() argument
804 check_if_tick_bio_needed(cache, bio); in remap_to_origin_clear_discard()
805 remap_to_origin(cache, bio); in remap_to_origin_clear_discard()
806 if (bio_data_dir(bio) == WRITE) in remap_to_origin_clear_discard()
810 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, in remap_to_cache_dirty() argument
813 check_if_tick_bio_needed(cache, bio); in remap_to_cache_dirty()
814 remap_to_cache(cache, bio, cblock); in remap_to_cache_dirty()
815 if (bio_data_dir(bio) == WRITE) { in remap_to_cache_dirty()
821 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) in get_bio_block() argument
823 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block()
833 static int bio_triggers_commit(struct cache *cache, struct bio *bio) in bio_triggers_commit() argument
835 return bio->bi_rw & (REQ_FLUSH | REQ_FUA); in bio_triggers_commit()
842 static void inc_ds(struct cache *cache, struct bio *bio, in inc_ds() argument
846 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); in inc_ds()
854 static bool accountable_bio(struct cache *cache, struct bio *bio) in accountable_bio() argument
856 return ((bio->bi_bdev == cache->origin_dev->bdev) && in accountable_bio()
857 !(bio->bi_rw & REQ_DISCARD)); in accountable_bio()
860 static void accounted_begin(struct cache *cache, struct bio *bio) in accounted_begin() argument
863 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); in accounted_begin()
865 if (accountable_bio(cache, bio)) { in accounted_begin()
866 pb->len = bio_sectors(bio); in accounted_begin()
871 static void accounted_complete(struct cache *cache, struct bio *bio) in accounted_complete() argument
874 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); in accounted_complete()
879 static void accounted_request(struct cache *cache, struct bio *bio) in accounted_request() argument
881 accounted_begin(cache, bio); in accounted_request()
882 generic_make_request(bio); in accounted_request()
885 static void issue(struct cache *cache, struct bio *bio) in issue() argument
889 if (!bio_triggers_commit(cache, bio)) { in issue()
890 accounted_request(cache, bio); in issue()
900 bio_list_add(&cache->deferred_flush_bios, bio); in issue()
904 static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell) in inc_and_issue() argument
906 inc_ds(cache, bio, cell); in inc_and_issue()
907 issue(cache, bio); in inc_and_issue()
910 static void defer_writethrough_bio(struct cache *cache, struct bio *bio) in defer_writethrough_bio() argument
915 bio_list_add(&cache->deferred_writethrough_bios, bio); in defer_writethrough_bio()
921 static void writethrough_endio(struct bio *bio) in writethrough_endio() argument
923 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); in writethrough_endio()
925 dm_unhook_bio(&pb->hook_info, bio); in writethrough_endio()
927 if (bio->bi_error) { in writethrough_endio()
928 bio_endio(bio); in writethrough_endio()
932 dm_bio_restore(&pb->bio_details, bio); in writethrough_endio()
933 remap_to_cache(pb->cache, bio, pb->cblock); in writethrough_endio()
940 defer_writethrough_bio(pb->cache, bio); in writethrough_endio()
949 static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, in remap_to_origin_then_cache() argument
952 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); in remap_to_origin_then_cache()
956 dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL); in remap_to_origin_then_cache()
957 dm_bio_record(&pb->bio_details, bio); in remap_to_origin_then_cache()
959 remap_to_origin_clear_discard(pb->cache, bio, oblock); in remap_to_origin_then_cache()
1071 static bool discard_or_flush(struct bio *bio) in discard_or_flush() argument
1073 return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD); in discard_or_flush()
1285 static void overwrite_endio(struct bio *bio) in overwrite_endio() argument
1287 struct dm_cache_migration *mg = bio->bi_private; in overwrite_endio()
1290 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); in overwrite_endio()
1293 dm_unhook_bio(&pb->hook_info, bio); in overwrite_endio()
1295 if (bio->bi_error) in overwrite_endio()
1307 static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio) in issue_overwrite() argument
1310 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); in issue_overwrite()
1312 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); in issue_overwrite()
1313 remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); in issue_overwrite()
1319 accounted_request(mg->cache, bio); in issue_overwrite()
1322 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) in bio_writes_complete_block() argument
1324 return (bio_data_dir(bio) == WRITE) && in bio_writes_complete_block()
1325 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); in bio_writes_complete_block()
1334 static void calc_discard_block_range(struct cache *cache, struct bio *bio, in calc_discard_block_range() argument
1337 sector_t sb = bio->bi_iter.bi_sector; in calc_discard_block_range()
1338 sector_t se = bio_end_sector(bio); in calc_discard_block_range()
1351 struct bio *bio = mg->new_ocell->holder; in issue_discard() local
1354 calc_discard_block_range(cache, bio, &b, &e); in issue_discard()
1360 bio_endio(bio); in issue_discard()
1380 struct bio *bio = mg->new_ocell->holder; in issue_copy_or_discard() local
1385 !avoid && bio_writes_complete_block(cache, bio)) { in issue_copy_or_discard()
1386 issue_overwrite(mg, bio); in issue_copy_or_discard()
1595 static void defer_bio(struct cache *cache, struct bio *bio) in defer_bio() argument
1600 bio_list_add(&cache->deferred_bios, bio); in defer_bio()
1606 static void process_flush_bio(struct cache *cache, struct bio *bio) in process_flush_bio() argument
1609 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); in process_flush_bio()
1611 BUG_ON(bio->bi_iter.bi_size); in process_flush_bio()
1613 remap_to_origin(cache, bio); in process_flush_bio()
1615 remap_to_cache(cache, bio, 0); in process_flush_bio()
1622 issue(cache, bio); in process_flush_bio()
1626 struct bio *bio) in process_discard_bio() argument
1632 calc_discard_block_range(cache, bio, &b, &e); in process_discard_bio()
1634 bio_endio(bio); in process_discard_bio()
1639 …r = bio_detain_range(cache, dblock_to_oblock(cache, b), dblock_to_oblock(cache, e), bio, cell_prea… in process_discard_bio()
1655 static void inc_hit_counter(struct cache *cache, struct bio *bio) in inc_hit_counter() argument
1657 atomic_inc(bio_data_dir(bio) == READ ? in inc_hit_counter()
1661 static void inc_miss_counter(struct cache *cache, struct bio *bio) in inc_miss_counter() argument
1663 atomic_inc(bio_data_dir(bio) == READ ? in inc_miss_counter()
1678 struct bio *bio; in inc_fn() local
1686 while ((bio = bio_list_pop(&cell->bios))) { in inc_fn()
1687 if (discard_or_flush(bio)) { in inc_fn()
1688 bio_list_add(&detail->unhandled_bios, bio); in inc_fn()
1692 if (bio_data_dir(bio) == WRITE) in inc_fn()
1695 bio_list_add(&detail->bios_for_issue, bio); in inc_fn()
1696 inc_ds(cache, bio, cell); in inc_fn()
1705 struct bio *bio; in remap_cell_to_origin_clear_discard() local
1728 while ((bio = bio_list_pop(&detail.bios_for_issue))) { in remap_cell_to_origin_clear_discard()
1729 remap_to_origin(cache, bio); in remap_cell_to_origin_clear_discard()
1730 issue(cache, bio); in remap_cell_to_origin_clear_discard()
1739 struct bio *bio; in remap_cell_to_cache_dirty() local
1764 while ((bio = bio_list_pop(&detail.bios_for_issue))) { in remap_cell_to_cache_dirty()
1765 remap_to_cache(cache, bio, cblock); in remap_cell_to_cache_dirty()
1766 issue(cache, bio); in remap_cell_to_cache_dirty()
1803 struct bio *bio = new_ocell->holder; in process_cell() local
1804 dm_oblock_t block = get_bio_block(cache, bio); in process_cell()
1810 fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio); in process_cell()
1818 bio, &ool.locker, &lookup_result); in process_cell()
1827 inc_miss_counter(cache, bio); in process_cell()
1835 if (bio_data_dir(bio) == WRITE) { in process_cell()
1842 remap_to_origin_clear_discard(cache, bio, block); in process_cell()
1843 inc_and_issue(cache, bio, new_ocell); in process_cell()
1846 inc_hit_counter(cache, bio); in process_cell()
1848 if (bio_data_dir(bio) == WRITE && in process_cell()
1851 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); in process_cell()
1852 inc_and_issue(cache, bio, new_ocell); in process_cell()
1863 inc_miss_counter(cache, bio); in process_cell()
1887 bio_io_error(bio); in process_cell()
1895 struct bio *bio) in process_bio() argument
1898 dm_oblock_t block = get_bio_block(cache, bio); in process_bio()
1905 r = bio_detain(cache, block, bio, cell_prealloc, in process_bio()
1957 struct bio *bio; in process_deferred_bios() local
1982 bio = bio_list_pop(&bios); in process_deferred_bios()
1984 if (bio->bi_rw & REQ_FLUSH) in process_deferred_bios()
1985 process_flush_bio(cache, bio); in process_deferred_bios()
1986 else if (bio->bi_rw & REQ_DISCARD) in process_deferred_bios()
1987 process_discard_bio(cache, &structs, bio); in process_deferred_bios()
1989 process_bio(cache, &structs, bio); in process_deferred_bios()
2037 struct bio *bio; in process_deferred_flush_bios() local
2049 while ((bio = bio_list_pop(&bios))) in process_deferred_flush_bios()
2050 submit_bios ? accounted_request(cache, bio) : bio_io_error(bio); in process_deferred_flush_bios()
2057 struct bio *bio; in process_deferred_writethrough_bios() local
2069 while ((bio = bio_list_pop(&bios))) in process_deferred_writethrough_bios()
2070 accounted_request(cache, bio); in process_deferred_writethrough_bios()
2217 struct bio *bio; in requeue_deferred_bios() local
2224 while ((bio = bio_list_pop(&bios))) { in requeue_deferred_bios()
2225 bio->bi_error = DM_ENDIO_REQUEUE; in requeue_deferred_bios()
2226 bio_endio(bio); in requeue_deferred_bios()
3021 static int cache_map(struct dm_target *ti, struct bio *bio) in cache_map() argument
3027 dm_oblock_t block = get_bio_block(cache, bio); in cache_map()
3032 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); in cache_map()
3043 remap_to_origin(cache, bio); in cache_map()
3044 accounted_begin(cache, bio); in cache_map()
3048 if (discard_or_flush(bio)) { in cache_map()
3049 defer_bio(cache, bio); in cache_map()
3058 defer_bio(cache, bio); in cache_map()
3062 r = bio_detain(cache, block, bio, cell, in cache_map()
3067 defer_bio(cache, bio); in cache_map()
3072 fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio); in cache_map()
3075 bio, &ool.locker, &lookup_result); in cache_map()
3084 bio_io_error(bio); in cache_map()
3092 if (bio_data_dir(bio) == WRITE) { in cache_map()
3101 inc_miss_counter(cache, bio); in cache_map()
3102 remap_to_origin_clear_discard(cache, bio, block); in cache_map()
3103 accounted_begin(cache, bio); in cache_map()
3104 inc_ds(cache, bio, cell); in cache_map()
3111 inc_hit_counter(cache, bio); in cache_map()
3112 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && in cache_map()
3114 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); in cache_map()
3115 accounted_begin(cache, bio); in cache_map()
3116 inc_ds(cache, bio, cell); in cache_map()
3125 inc_miss_counter(cache, bio); in cache_map()
3131 bio_endio(bio); in cache_map()
3145 bio_io_error(bio); in cache_map()
3152 static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) in cache_end_io() argument
3157 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); in cache_end_io()
3168 accounted_complete(cache, bio); in cache_end_io()