Lines Matching refs:bio
209 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
327 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, in bio_detain() argument
339 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); in bio_detain()
475 struct bio *bio; in error_bio_list() local
477 while ((bio = bio_list_pop(bios))) in error_bio_list()
478 bio_endio(bio, error); in error_bio_list()
550 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) in get_bio_block() argument
553 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block()
563 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) in remap() argument
566 sector_t bi_sector = bio->bi_iter.bi_sector; in remap()
568 bio->bi_bdev = tc->pool_dev->bdev; in remap()
570 bio->bi_iter.bi_sector = in remap()
574 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap()
578 static void remap_to_origin(struct thin_c *tc, struct bio *bio) in remap_to_origin() argument
580 bio->bi_bdev = tc->origin_dev->bdev; in remap_to_origin()
583 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) in bio_triggers_commit() argument
585 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && in bio_triggers_commit()
589 static void inc_all_io_entry(struct pool *pool, struct bio *bio) in inc_all_io_entry() argument
593 if (bio->bi_rw & REQ_DISCARD) in inc_all_io_entry()
596 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in inc_all_io_entry()
600 static void issue(struct thin_c *tc, struct bio *bio) in issue() argument
605 if (!bio_triggers_commit(tc, bio)) { in issue()
606 generic_make_request(bio); in issue()
616 bio_io_error(bio); in issue()
625 bio_list_add(&pool->deferred_flush_bios, bio); in issue()
629 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio) in remap_to_origin_and_issue() argument
631 remap_to_origin(tc, bio); in remap_to_origin_and_issue()
632 issue(tc, bio); in remap_to_origin_and_issue()
635 static void remap_and_issue(struct thin_c *tc, struct bio *bio, in remap_and_issue() argument
638 remap(tc, bio, block); in remap_and_issue()
639 issue(tc, bio); in remap_and_issue()
672 struct bio *bio; member
704 static void overwrite_endio(struct bio *bio, int err) in overwrite_endio() argument
706 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in overwrite_endio()
739 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
751 struct bio *bio; in __inc_remap_and_issue_cell() local
753 while ((bio = bio_list_pop(&cell->bios))) { in __inc_remap_and_issue_cell()
754 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) in __inc_remap_and_issue_cell()
755 bio_list_add(&info->defer_bios, bio); in __inc_remap_and_issue_cell()
757 inc_all_io_entry(info->tc->pool, bio); in __inc_remap_and_issue_cell()
764 bio_list_add(&info->issue_bios, bio); in __inc_remap_and_issue_cell()
773 struct bio *bio; in inc_remap_and_issue_cell() local
788 while ((bio = bio_list_pop(&info.defer_bios))) in inc_remap_and_issue_cell()
789 thin_defer_bio(tc, bio); in inc_remap_and_issue_cell()
791 while ((bio = bio_list_pop(&info.issue_bios))) in inc_remap_and_issue_cell()
792 remap_and_issue(info.tc, bio, block); in inc_remap_and_issue_cell()
797 if (m->bio) { in process_prepared_mapping_fail()
798 m->bio->bi_end_io = m->saved_bi_end_io; in process_prepared_mapping_fail()
799 atomic_inc(&m->bio->bi_remaining); in process_prepared_mapping_fail()
810 struct bio *bio; in process_prepared_mapping() local
813 bio = m->bio; in process_prepared_mapping()
814 if (bio) { in process_prepared_mapping()
815 bio->bi_end_io = m->saved_bi_end_io; in process_prepared_mapping()
816 atomic_inc(&bio->bi_remaining); in process_prepared_mapping()
842 if (bio) { in process_prepared_mapping()
844 bio_endio(bio, 0); in process_prepared_mapping()
860 bio_io_error(m->bio); in process_prepared_discard_fail()
870 inc_all_io_entry(tc->pool, m->bio); in process_prepared_discard_passdown()
876 remap_and_issue(tc, m->bio, m->data_block); in process_prepared_discard_passdown()
880 bio_endio(m->bio, 0); in process_prepared_discard_passdown()
882 remap_and_issue(tc, m->bio, m->data_block); in process_prepared_discard_passdown()
885 bio_endio(m->bio, 0); in process_prepared_discard_passdown()
921 static int io_overlaps_block(struct pool *pool, struct bio *bio) in io_overlaps_block() argument
923 return bio->bi_iter.bi_size == in io_overlaps_block()
927 static int io_overwrites_block(struct pool *pool, struct bio *bio) in io_overwrites_block() argument
929 return (bio_data_dir(bio) == WRITE) && in io_overwrites_block()
930 io_overlaps_block(pool, bio); in io_overwrites_block()
933 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save, in save_and_set_endio() argument
936 *save = bio->bi_end_io; in save_and_set_endio()
937 bio->bi_end_io = fn; in save_and_set_endio()
958 m->bio = NULL; in get_next_mapping()
982 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio, in remap_and_issue_overwrite() argument
987 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in remap_and_issue_overwrite()
990 m->bio = bio; in remap_and_issue_overwrite()
991 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); in remap_and_issue_overwrite()
992 inc_all_io_entry(pool, bio); in remap_and_issue_overwrite()
993 remap_and_issue(tc, bio, data_block); in remap_and_issue_overwrite()
1002 struct dm_bio_prison_cell *cell, struct bio *bio, in schedule_copy() argument
1030 if (io_overwrites_block(pool, bio)) in schedule_copy()
1031 remap_and_issue_overwrite(tc, bio, data_dest, m); in schedule_copy()
1073 struct dm_bio_prison_cell *cell, struct bio *bio) in schedule_internal_copy() argument
1076 data_origin, data_dest, cell, bio, in schedule_internal_copy()
1082 struct bio *bio) in schedule_zero() argument
1101 else if (io_overwrites_block(pool, bio)) in schedule_zero()
1102 remap_and_issue_overwrite(tc, bio, data_block, m); in schedule_zero()
1112 struct dm_bio_prison_cell *cell, struct bio *bio) in schedule_external_copy() argument
1120 virt_block, data_dest, cell, bio, in schedule_external_copy()
1125 virt_block, data_dest, cell, bio, in schedule_external_copy()
1129 schedule_zero(tc, virt_block, data_dest, cell, bio); in schedule_external_copy()
1235 static void retry_on_resume(struct bio *bio) in retry_on_resume() argument
1237 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in retry_on_resume()
1242 bio_list_add(&tc->retry_on_resume_list, bio); in retry_on_resume()
1269 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) in handle_unserviceable_bio() argument
1274 bio_endio(bio, error); in handle_unserviceable_bio()
1276 retry_on_resume(bio); in handle_unserviceable_bio()
1281 struct bio *bio; in retry_bios_on_resume() local
1294 while ((bio = bio_list_pop(&bios))) in retry_bios_on_resume()
1295 retry_on_resume(bio); in retry_bios_on_resume()
1301 struct bio *bio = cell->holder; in process_discard_cell() local
1305 dm_block_t block = get_bio_block(tc, bio); in process_discard_cell()
1323 if (bio_detain(tc->pool, &key2, bio, &cell2)) { in process_discard_cell()
1328 if (io_overlaps_block(pool, bio)) { in process_discard_cell()
1341 m->bio = bio; in process_discard_cell()
1347 inc_all_io_entry(pool, bio); in process_discard_cell()
1357 remap_and_issue(tc, bio, lookup_result.block); in process_discard_cell()
1359 bio_endio(bio, 0); in process_discard_cell()
1368 bio_endio(bio, 0); in process_discard_cell()
1375 bio_io_error(bio); in process_discard_cell()
1380 static void process_discard_bio(struct thin_c *tc, struct bio *bio) in process_discard_bio() argument
1384 dm_block_t block = get_bio_block(tc, bio); in process_discard_bio()
1387 if (bio_detain(tc->pool, &key, bio, &cell)) in process_discard_bio()
1393 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, in break_sharing() argument
1406 data_block, cell, bio); in break_sharing()
1425 struct bio *bio; in __remap_and_issue_shared_cell() local
1427 while ((bio = bio_list_pop(&cell->bios))) { in __remap_and_issue_shared_cell()
1428 if ((bio_data_dir(bio) == WRITE) || in __remap_and_issue_shared_cell()
1429 (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))) in __remap_and_issue_shared_cell()
1430 bio_list_add(&info->defer_bios, bio); in __remap_and_issue_shared_cell()
1432 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));; in __remap_and_issue_shared_cell()
1435 inc_all_io_entry(info->tc->pool, bio); in __remap_and_issue_shared_cell()
1436 bio_list_add(&info->issue_bios, bio); in __remap_and_issue_shared_cell()
1445 struct bio *bio; in remap_and_issue_shared_cell() local
1455 while ((bio = bio_list_pop(&info.defer_bios))) in remap_and_issue_shared_cell()
1456 thin_defer_bio(tc, bio); in remap_and_issue_shared_cell()
1458 while ((bio = bio_list_pop(&info.issue_bios))) in remap_and_issue_shared_cell()
1459 remap_and_issue(tc, bio, block); in remap_and_issue_shared_cell()
1462 static void process_shared_bio(struct thin_c *tc, struct bio *bio, in process_shared_bio() argument
1476 if (bio_detain(pool, &key, bio, &data_cell)) { in process_shared_bio()
1481 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) { in process_shared_bio()
1482 break_sharing(tc, bio, block, &key, lookup_result, data_cell); in process_shared_bio()
1485 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in process_shared_bio()
1488 inc_all_io_entry(pool, bio); in process_shared_bio()
1489 remap_and_issue(tc, bio, lookup_result->block); in process_shared_bio()
1496 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, in provision_block() argument
1506 if (!bio->bi_iter.bi_size) { in provision_block()
1507 inc_all_io_entry(pool, bio); in provision_block()
1510 remap_and_issue(tc, bio, 0); in provision_block()
1517 if (bio_data_dir(bio) == READ) { in provision_block()
1518 zero_fill_bio(bio); in provision_block()
1520 bio_endio(bio, 0); in provision_block()
1528 schedule_external_copy(tc, block, data_block, cell, bio); in provision_block()
1530 schedule_zero(tc, block, data_block, cell, bio); in provision_block()
1549 struct bio *bio = cell->holder; in process_cell() local
1550 dm_block_t block = get_bio_block(tc, bio); in process_cell()
1562 process_shared_bio(tc, bio, block, &lookup_result, cell); in process_cell()
1564 inc_all_io_entry(pool, bio); in process_cell()
1565 remap_and_issue(tc, bio, lookup_result.block); in process_cell()
1571 if (bio_data_dir(bio) == READ && tc->origin_dev) { in process_cell()
1572 inc_all_io_entry(pool, bio); in process_cell()
1575 if (bio_end_sector(bio) <= tc->origin_size) in process_cell()
1576 remap_to_origin_and_issue(tc, bio); in process_cell()
1578 else if (bio->bi_iter.bi_sector < tc->origin_size) { in process_cell()
1579 zero_fill_bio(bio); in process_cell()
1580 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell()
1581 remap_to_origin_and_issue(tc, bio); in process_cell()
1584 zero_fill_bio(bio); in process_cell()
1585 bio_endio(bio, 0); in process_cell()
1588 provision_block(tc, bio, block, cell); in process_cell()
1595 bio_io_error(bio); in process_cell()
1600 static void process_bio(struct thin_c *tc, struct bio *bio) in process_bio() argument
1603 dm_block_t block = get_bio_block(tc, bio); in process_bio()
1612 if (bio_detain(pool, &key, bio, &cell)) in process_bio()
1618 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio, in __process_bio_read_only() argument
1622 int rw = bio_data_dir(bio); in __process_bio_read_only()
1623 dm_block_t block = get_bio_block(tc, bio); in __process_bio_read_only()
1629 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) { in __process_bio_read_only()
1630 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
1634 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
1635 remap_and_issue(tc, bio, lookup_result.block); in __process_bio_read_only()
1645 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
1650 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
1651 remap_to_origin_and_issue(tc, bio); in __process_bio_read_only()
1655 zero_fill_bio(bio); in __process_bio_read_only()
1656 bio_endio(bio, 0); in __process_bio_read_only()
1664 bio_io_error(bio); in __process_bio_read_only()
1669 static void process_bio_read_only(struct thin_c *tc, struct bio *bio) in process_bio_read_only() argument
1671 __process_bio_read_only(tc, bio, NULL); in process_bio_read_only()
1679 static void process_bio_success(struct thin_c *tc, struct bio *bio) in process_bio_success() argument
1681 bio_endio(bio, 0); in process_bio_success()
1684 static void process_bio_fail(struct thin_c *tc, struct bio *bio) in process_bio_fail() argument
1686 bio_io_error(bio); in process_bio_fail()
1712 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio) in __thin_bio_rb_add() argument
1716 sector_t bi_sector = bio->bi_iter.bi_sector; in __thin_bio_rb_add()
1730 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in __thin_bio_rb_add()
1739 struct bio *bio; in __extract_sorted_bios() local
1743 bio = thin_bio(pbd); in __extract_sorted_bios()
1745 bio_list_add(&tc->deferred_bio_list, bio); in __extract_sorted_bios()
1754 struct bio *bio; in __sort_thin_deferred_bios() local
1762 while ((bio = bio_list_pop(&bios))) in __sort_thin_deferred_bios()
1763 __thin_bio_rb_add(tc, bio); in __sort_thin_deferred_bios()
1777 struct bio *bio; in process_thin_deferred_bios() local
1804 while ((bio = bio_list_pop(&bios))) { in process_thin_deferred_bios()
1812 bio_list_add(&tc->deferred_bio_list, bio); in process_thin_deferred_bios()
1818 if (bio->bi_rw & REQ_DISCARD) in process_thin_deferred_bios()
1819 pool->process_discard(tc, bio); in process_thin_deferred_bios()
1821 pool->process_bio(tc, bio); in process_thin_deferred_bios()
1955 struct bio *bio; in process_deferred_bios() local
1981 while ((bio = bio_list_pop(&bios))) in process_deferred_bios()
1982 bio_io_error(bio); in process_deferred_bios()
1987 while ((bio = bio_list_pop(&bios))) in process_deferred_bios()
1988 generic_make_request(bio); in process_deferred_bios()
2239 static void thin_defer_bio(struct thin_c *tc, struct bio *bio) in thin_defer_bio() argument
2245 bio_list_add(&tc->deferred_bio_list, bio); in thin_defer_bio()
2251 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio) in thin_defer_bio_with_throttle() argument
2256 thin_defer_bio(tc, bio); in thin_defer_bio_with_throttle()
2274 static void thin_hook_bio(struct thin_c *tc, struct bio *bio) in thin_hook_bio() argument
2276 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in thin_hook_bio()
2287 static int thin_bio_map(struct dm_target *ti, struct bio *bio) in thin_bio_map() argument
2291 dm_block_t block = get_bio_block(tc, bio); in thin_bio_map()
2297 thin_hook_bio(tc, bio); in thin_bio_map()
2300 bio_endio(bio, DM_ENDIO_REQUEUE); in thin_bio_map()
2305 bio_io_error(bio); in thin_bio_map()
2309 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) { in thin_bio_map()
2310 thin_defer_bio_with_throttle(tc, bio); in thin_bio_map()
2319 if (bio_detain(tc->pool, &key, bio, &virt_cell)) in thin_bio_map()
2349 if (bio_detain(tc->pool, &key, bio, &data_cell)) { in thin_bio_map()
2354 inc_all_io_entry(tc->pool, bio); in thin_bio_map()
2358 remap(tc, bio, result.block); in thin_bio_map()
2372 bio_io_error(bio); in thin_bio_map()
2985 static int pool_map(struct dm_target *ti, struct bio *bio) in pool_map() argument
2996 bio->bi_bdev = pt->data_dev->bdev; in pool_map()
3877 static int thin_map(struct dm_target *ti, struct bio *bio) in thin_map() argument
3879 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); in thin_map()
3881 return thin_bio_map(ti, bio); in thin_map()
3884 static int thin_endio(struct dm_target *ti, struct bio *bio, int err) in thin_endio() argument
3887 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in thin_endio()