Lines Matching refs:bio

217 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
338 struct bio *parent_bio) in __blkdev_issue_discard_async()
342 struct bio *bio; in __blkdev_issue_discard_async() local
359 bio = bio_alloc(gfp_mask, 1); in __blkdev_issue_discard_async()
360 if (!bio) in __blkdev_issue_discard_async()
363 bio_chain(bio, parent_bio); in __blkdev_issue_discard_async()
365 bio->bi_iter.bi_sector = sector; in __blkdev_issue_discard_async()
366 bio->bi_bdev = bdev; in __blkdev_issue_discard_async()
367 bio->bi_iter.bi_size = nr_sects << 9; in __blkdev_issue_discard_async()
369 submit_bio(type, bio); in __blkdev_issue_discard_async()
387 struct bio *parent_bio) in issue_discard()
409 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, in bio_detain() argument
421 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); in bio_detain()
558 struct bio *bio; in error_bio_list() local
560 while ((bio = bio_list_pop(bios))) { in error_bio_list()
561 bio->bi_error = error; in error_bio_list()
562 bio_endio(bio); in error_bio_list()
635 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) in get_bio_block() argument
638 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block()
651 static void get_bio_block_range(struct thin_c *tc, struct bio *bio, in get_bio_block_range() argument
655 sector_t b = bio->bi_iter.bi_sector; in get_bio_block_range()
656 sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT); in get_bio_block_range()
676 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) in remap() argument
679 sector_t bi_sector = bio->bi_iter.bi_sector; in remap()
681 bio->bi_bdev = tc->pool_dev->bdev; in remap()
683 bio->bi_iter.bi_sector = in remap()
687 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap()
691 static void remap_to_origin(struct thin_c *tc, struct bio *bio) in remap_to_origin() argument
693 bio->bi_bdev = tc->origin_dev->bdev; in remap_to_origin()
696 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) in bio_triggers_commit() argument
698 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && in bio_triggers_commit()
702 static void inc_all_io_entry(struct pool *pool, struct bio *bio) in inc_all_io_entry() argument
706 if (bio->bi_rw & REQ_DISCARD) in inc_all_io_entry()
709 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in inc_all_io_entry()
713 static void issue(struct thin_c *tc, struct bio *bio) in issue() argument
718 if (!bio_triggers_commit(tc, bio)) { in issue()
719 generic_make_request(bio); in issue()
729 bio_io_error(bio); in issue()
738 bio_list_add(&pool->deferred_flush_bios, bio); in issue()
742 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio) in remap_to_origin_and_issue() argument
744 remap_to_origin(tc, bio); in remap_to_origin_and_issue()
745 issue(tc, bio); in remap_to_origin_and_issue()
748 static void remap_and_issue(struct thin_c *tc, struct bio *bio, in remap_and_issue() argument
751 remap(tc, bio, block); in remap_and_issue()
752 issue(tc, bio); in remap_and_issue()
785 struct bio *bio; member
817 static void overwrite_endio(struct bio *bio) in overwrite_endio() argument
819 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in overwrite_endio()
822 bio->bi_end_io = m->saved_bi_end_io; in overwrite_endio()
824 m->err = bio->bi_error; in overwrite_endio()
854 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
866 struct bio *bio; in __inc_remap_and_issue_cell() local
868 while ((bio = bio_list_pop(&cell->bios))) { in __inc_remap_and_issue_cell()
869 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) in __inc_remap_and_issue_cell()
870 bio_list_add(&info->defer_bios, bio); in __inc_remap_and_issue_cell()
872 inc_all_io_entry(info->tc->pool, bio); in __inc_remap_and_issue_cell()
879 bio_list_add(&info->issue_bios, bio); in __inc_remap_and_issue_cell()
888 struct bio *bio; in inc_remap_and_issue_cell() local
903 while ((bio = bio_list_pop(&info.defer_bios))) in inc_remap_and_issue_cell()
904 thin_defer_bio(tc, bio); in inc_remap_and_issue_cell()
906 while ((bio = bio_list_pop(&info.issue_bios))) in inc_remap_and_issue_cell()
907 remap_and_issue(info.tc, bio, block); in inc_remap_and_issue_cell()
921 struct bio *bio = m->bio; in process_prepared_mapping() local
947 if (bio) { in process_prepared_mapping()
949 bio_endio(bio); in process_prepared_mapping()
973 bio_io_error(m->bio); in process_prepared_discard_fail()
979 bio_endio(m->bio); in process_prepared_discard_success()
991 bio_io_error(m->bio); in process_prepared_discard_no_passdown()
993 bio_endio(m->bio); in process_prepared_discard_no_passdown()
1035 r = issue_discard(tc, b, e, m->bio); in passdown_double_checking_shared_status()
1058 r = issue_discard(tc, m->data_block, m->data_block + (m->virt_end - m->virt_begin), m->bio); in process_prepared_discard_passdown()
1064 m->bio->bi_error = r; in process_prepared_discard_passdown()
1065 bio_endio(m->bio); in process_prepared_discard_passdown()
1089 static int io_overlaps_block(struct pool *pool, struct bio *bio) in io_overlaps_block() argument
1091 return bio->bi_iter.bi_size == in io_overlaps_block()
1095 static int io_overwrites_block(struct pool *pool, struct bio *bio) in io_overwrites_block() argument
1097 return (bio_data_dir(bio) == WRITE) && in io_overwrites_block()
1098 io_overlaps_block(pool, bio); in io_overwrites_block()
1101 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save, in save_and_set_endio() argument
1104 *save = bio->bi_end_io; in save_and_set_endio()
1105 bio->bi_end_io = fn; in save_and_set_endio()
1126 m->bio = NULL; in get_next_mapping()
1150 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio, in remap_and_issue_overwrite() argument
1155 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in remap_and_issue_overwrite()
1158 m->bio = bio; in remap_and_issue_overwrite()
1159 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); in remap_and_issue_overwrite()
1160 inc_all_io_entry(pool, bio); in remap_and_issue_overwrite()
1161 remap_and_issue(tc, bio, data_begin); in remap_and_issue_overwrite()
1170 struct dm_bio_prison_cell *cell, struct bio *bio, in schedule_copy() argument
1199 if (io_overwrites_block(pool, bio)) in schedule_copy()
1200 remap_and_issue_overwrite(tc, bio, data_dest, m); in schedule_copy()
1242 struct dm_bio_prison_cell *cell, struct bio *bio) in schedule_internal_copy() argument
1245 data_origin, data_dest, cell, bio, in schedule_internal_copy()
1251 struct bio *bio) in schedule_zero() argument
1269 if (io_overwrites_block(pool, bio)) in schedule_zero()
1270 remap_and_issue_overwrite(tc, bio, data_block, m); in schedule_zero()
1280 struct dm_bio_prison_cell *cell, struct bio *bio) in schedule_external_copy() argument
1288 virt_block, data_dest, cell, bio, in schedule_external_copy()
1293 virt_block, data_dest, cell, bio, in schedule_external_copy()
1297 schedule_zero(tc, virt_block, data_dest, cell, bio); in schedule_external_copy()
1403 static void retry_on_resume(struct bio *bio) in retry_on_resume() argument
1405 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in retry_on_resume()
1410 bio_list_add(&tc->retry_on_resume_list, bio); in retry_on_resume()
1437 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) in handle_unserviceable_bio() argument
1442 bio->bi_error = error; in handle_unserviceable_bio()
1443 bio_endio(bio); in handle_unserviceable_bio()
1445 retry_on_resume(bio); in handle_unserviceable_bio()
1450 struct bio *bio; in retry_bios_on_resume() local
1463 while ((bio = bio_list_pop(&bios))) in retry_bios_on_resume()
1464 retry_on_resume(bio); in retry_bios_on_resume()
1481 m->bio = virt_cell->holder; in process_discard_cell_no_passdown()
1491 static inline void __bio_inc_remaining(struct bio *bio) in __bio_inc_remaining() argument
1493 bio->bi_flags |= (1 << BIO_CHAIN); in __bio_inc_remaining()
1495 atomic_inc(&bio->__bi_remaining); in __bio_inc_remaining()
1499 struct bio *bio) in break_up_discard_bio() argument
1543 m->bio = bio; in break_up_discard_bio()
1553 __bio_inc_remaining(bio); in break_up_discard_bio()
1563 struct bio *bio = virt_cell->holder; in process_discard_cell_passdown() local
1564 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in process_discard_cell_passdown()
1572 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio); in process_discard_cell_passdown()
1579 bio_endio(bio); in process_discard_cell_passdown()
1582 static void process_discard_bio(struct thin_c *tc, struct bio *bio) in process_discard_bio() argument
1588 get_bio_block_range(tc, bio, &begin, &end); in process_discard_bio()
1593 bio_endio(bio); in process_discard_bio()
1598 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) in process_discard_bio()
1611 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, in break_sharing() argument
1624 data_block, cell, bio); in break_sharing()
1643 struct bio *bio; in __remap_and_issue_shared_cell() local
1645 while ((bio = bio_list_pop(&cell->bios))) { in __remap_and_issue_shared_cell()
1646 if ((bio_data_dir(bio) == WRITE) || in __remap_and_issue_shared_cell()
1647 (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))) in __remap_and_issue_shared_cell()
1648 bio_list_add(&info->defer_bios, bio); in __remap_and_issue_shared_cell()
1650 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));; in __remap_and_issue_shared_cell()
1653 inc_all_io_entry(info->tc->pool, bio); in __remap_and_issue_shared_cell()
1654 bio_list_add(&info->issue_bios, bio); in __remap_and_issue_shared_cell()
1663 struct bio *bio; in remap_and_issue_shared_cell() local
1673 while ((bio = bio_list_pop(&info.defer_bios))) in remap_and_issue_shared_cell()
1674 thin_defer_bio(tc, bio); in remap_and_issue_shared_cell()
1676 while ((bio = bio_list_pop(&info.issue_bios))) in remap_and_issue_shared_cell()
1677 remap_and_issue(tc, bio, block); in remap_and_issue_shared_cell()
1680 static void process_shared_bio(struct thin_c *tc, struct bio *bio, in process_shared_bio() argument
1694 if (bio_detain(pool, &key, bio, &data_cell)) { in process_shared_bio()
1699 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) { in process_shared_bio()
1700 break_sharing(tc, bio, block, &key, lookup_result, data_cell); in process_shared_bio()
1703 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in process_shared_bio()
1706 inc_all_io_entry(pool, bio); in process_shared_bio()
1707 remap_and_issue(tc, bio, lookup_result->block); in process_shared_bio()
1714 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, in provision_block() argument
1724 if (!bio->bi_iter.bi_size) { in provision_block()
1725 inc_all_io_entry(pool, bio); in provision_block()
1728 remap_and_issue(tc, bio, 0); in provision_block()
1735 if (bio_data_dir(bio) == READ) { in provision_block()
1736 zero_fill_bio(bio); in provision_block()
1738 bio_endio(bio); in provision_block()
1746 schedule_external_copy(tc, block, data_block, cell, bio); in provision_block()
1748 schedule_zero(tc, block, data_block, cell, bio); in provision_block()
1767 struct bio *bio = cell->holder; in process_cell() local
1768 dm_block_t block = get_bio_block(tc, bio); in process_cell()
1780 process_shared_bio(tc, bio, block, &lookup_result, cell); in process_cell()
1782 inc_all_io_entry(pool, bio); in process_cell()
1783 remap_and_issue(tc, bio, lookup_result.block); in process_cell()
1789 if (bio_data_dir(bio) == READ && tc->origin_dev) { in process_cell()
1790 inc_all_io_entry(pool, bio); in process_cell()
1793 if (bio_end_sector(bio) <= tc->origin_size) in process_cell()
1794 remap_to_origin_and_issue(tc, bio); in process_cell()
1796 else if (bio->bi_iter.bi_sector < tc->origin_size) { in process_cell()
1797 zero_fill_bio(bio); in process_cell()
1798 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell()
1799 remap_to_origin_and_issue(tc, bio); in process_cell()
1802 zero_fill_bio(bio); in process_cell()
1803 bio_endio(bio); in process_cell()
1806 provision_block(tc, bio, block, cell); in process_cell()
1813 bio_io_error(bio); in process_cell()
1818 static void process_bio(struct thin_c *tc, struct bio *bio) in process_bio() argument
1821 dm_block_t block = get_bio_block(tc, bio); in process_bio()
1830 if (bio_detain(pool, &key, bio, &cell)) in process_bio()
1836 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio, in __process_bio_read_only() argument
1840 int rw = bio_data_dir(bio); in __process_bio_read_only()
1841 dm_block_t block = get_bio_block(tc, bio); in __process_bio_read_only()
1847 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) { in __process_bio_read_only()
1848 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
1852 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
1853 remap_and_issue(tc, bio, lookup_result.block); in __process_bio_read_only()
1863 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
1868 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
1869 remap_to_origin_and_issue(tc, bio); in __process_bio_read_only()
1873 zero_fill_bio(bio); in __process_bio_read_only()
1874 bio_endio(bio); in __process_bio_read_only()
1882 bio_io_error(bio); in __process_bio_read_only()
1887 static void process_bio_read_only(struct thin_c *tc, struct bio *bio) in process_bio_read_only() argument
1889 __process_bio_read_only(tc, bio, NULL); in process_bio_read_only()
1897 static void process_bio_success(struct thin_c *tc, struct bio *bio) in process_bio_success() argument
1899 bio_endio(bio); in process_bio_success()
1902 static void process_bio_fail(struct thin_c *tc, struct bio *bio) in process_bio_fail() argument
1904 bio_io_error(bio); in process_bio_fail()
1930 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio) in __thin_bio_rb_add() argument
1934 sector_t bi_sector = bio->bi_iter.bi_sector; in __thin_bio_rb_add()
1948 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in __thin_bio_rb_add()
1957 struct bio *bio; in __extract_sorted_bios() local
1961 bio = thin_bio(pbd); in __extract_sorted_bios()
1963 bio_list_add(&tc->deferred_bio_list, bio); in __extract_sorted_bios()
1972 struct bio *bio; in __sort_thin_deferred_bios() local
1980 while ((bio = bio_list_pop(&bios))) in __sort_thin_deferred_bios()
1981 __thin_bio_rb_add(tc, bio); in __sort_thin_deferred_bios()
1995 struct bio *bio; in process_thin_deferred_bios() local
2022 while ((bio = bio_list_pop(&bios))) { in process_thin_deferred_bios()
2030 bio_list_add(&tc->deferred_bio_list, bio); in process_thin_deferred_bios()
2036 if (bio->bi_rw & REQ_DISCARD) in process_thin_deferred_bios()
2037 pool->process_discard(tc, bio); in process_thin_deferred_bios()
2039 pool->process_bio(tc, bio); in process_thin_deferred_bios()
2173 struct bio *bio; in process_deferred_bios() local
2199 while ((bio = bio_list_pop(&bios))) in process_deferred_bios()
2200 bio_io_error(bio); in process_deferred_bios()
2205 while ((bio = bio_list_pop(&bios))) in process_deferred_bios()
2206 generic_make_request(bio); in process_deferred_bios()
2487 static void thin_defer_bio(struct thin_c *tc, struct bio *bio) in thin_defer_bio() argument
2493 bio_list_add(&tc->deferred_bio_list, bio); in thin_defer_bio()
2499 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio) in thin_defer_bio_with_throttle() argument
2504 thin_defer_bio(tc, bio); in thin_defer_bio_with_throttle()
2522 static void thin_hook_bio(struct thin_c *tc, struct bio *bio) in thin_hook_bio() argument
2524 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in thin_hook_bio()
2536 static int thin_bio_map(struct dm_target *ti, struct bio *bio) in thin_bio_map() argument
2540 dm_block_t block = get_bio_block(tc, bio); in thin_bio_map()
2546 thin_hook_bio(tc, bio); in thin_bio_map()
2549 bio->bi_error = DM_ENDIO_REQUEUE; in thin_bio_map()
2550 bio_endio(bio); in thin_bio_map()
2555 bio_io_error(bio); in thin_bio_map()
2559 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) { in thin_bio_map()
2560 thin_defer_bio_with_throttle(tc, bio); in thin_bio_map()
2569 if (bio_detain(tc->pool, &key, bio, &virt_cell)) in thin_bio_map()
2599 if (bio_detain(tc->pool, &key, bio, &data_cell)) { in thin_bio_map()
2604 inc_all_io_entry(tc->pool, bio); in thin_bio_map()
2608 remap(tc, bio, result.block); in thin_bio_map()
2622 bio_io_error(bio); in thin_bio_map()
3228 static int pool_map(struct dm_target *ti, struct bio *bio) in pool_map() argument
3239 bio->bi_bdev = pt->data_dev->bdev; in pool_map()
4095 static int thin_map(struct dm_target *ti, struct bio *bio) in thin_map() argument
4097 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); in thin_map()
4099 return thin_bio_map(ti, bio); in thin_map()
4102 static int thin_endio(struct dm_target *ti, struct bio *bio, int err) in thin_endio() argument
4105 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in thin_endio()