Lines Matching refs:bio
73 struct bio *bio; member
104 struct bio *orig;
106 struct bio clone;
216 struct bio flush_bio;
676 struct bio *bio = io->bio; in start_io_acct() local
678 int rw = bio_data_dir(bio); in start_io_acct()
689 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, in start_io_acct()
690 bio_sectors(bio), false, 0, &io->stats_aux); in start_io_acct()
696 struct bio *bio = io->bio; in end_io_acct() local
699 int rw = bio_data_dir(bio); in end_io_acct()
704 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, in end_io_acct()
705 bio_sectors(bio), true, duration, &io->stats_aux); in end_io_acct()
723 static void queue_io(struct mapped_device *md, struct bio *bio) in queue_io() argument
728 bio_list_add(&md->deferred, bio); in queue_io()
934 struct bio *bio; in dec_pending() local
952 bio_list_add_head(&md->deferred, io->bio); in dec_pending()
960 bio = io->bio; in dec_pending()
967 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { in dec_pending()
972 bio->bi_rw &= ~REQ_FLUSH; in dec_pending()
973 queue_io(md, bio); in dec_pending()
976 trace_block_bio_complete(md->queue, bio, io_error); in dec_pending()
977 bio->bi_error = io_error; in dec_pending()
978 bio_endio(bio); in dec_pending()
991 static void clone_endio(struct bio *bio) in clone_endio() argument
993 int error = bio->bi_error; in clone_endio()
995 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in clone_endio()
1001 r = endio(tio->ti, bio, error); in clone_endio()
1017 if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) && in clone_endio()
1018 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) in clone_endio()
1028 static void end_clone_bio(struct bio *clone) in end_clone_bio()
1033 struct bio *bio = info->orig; in end_clone_bio() local
1066 if (tio->orig->bio != bio) in end_clone_bio()
1458 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) in dm_accept_partial_bio() argument
1460 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in dm_accept_partial_bio()
1461 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; in dm_accept_partial_bio()
1462 BUG_ON(bio->bi_rw & REQ_FLUSH); in dm_accept_partial_bio()
1466 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; in dm_accept_partial_bio()
1475 struct bio *clone = &tio->clone; in __map_bio()
1492 tio->io->bio->bi_bdev->bd_dev, sector); in __map_bio()
1509 struct bio *bio; member
1515 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) in bio_setup_sector() argument
1517 bio->bi_iter.bi_sector = sector; in bio_setup_sector()
1518 bio->bi_iter.bi_size = to_bytes(len); in bio_setup_sector()
1524 static void clone_bio(struct dm_target_io *tio, struct bio *bio, in clone_bio() argument
1527 struct bio *clone = &tio->clone; in clone_bio()
1529 __bio_clone_fast(clone, bio); in clone_bio()
1531 if (bio_integrity(bio)) in clone_bio()
1532 bio_integrity_clone(clone, bio, GFP_NOIO); in clone_bio()
1537 if (bio_integrity(bio)) in clone_bio()
1546 struct bio *clone; in alloc_tio()
1563 struct bio *clone = &tio->clone; in __clone_and_map_simple_bio()
1567 __bio_clone_fast(clone, ci->bio); in __clone_and_map_simple_bio()
1588 BUG_ON(bio_has_data(ci->bio)); in __send_empty_flush()
1598 struct bio *bio = ci->bio; in __clone_and_map_data_bio() local
1606 if (bio_data_dir(bio) == WRITE && ti->num_write_bios) in __clone_and_map_data_bio()
1607 num_target_bios = ti->num_write_bios(ti, bio); in __clone_and_map_data_bio()
1612 clone_bio(tio, bio, sector, *len); in __clone_and_map_data_bio()
1688 struct bio *bio = ci->bio; in __split_and_process_non_flush() local
1692 if (unlikely(bio->bi_rw & REQ_DISCARD)) in __split_and_process_non_flush()
1694 else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) in __split_and_process_non_flush()
1715 struct dm_table *map, struct bio *bio) in __split_and_process_bio() argument
1721 bio_io_error(bio); in __split_and_process_bio()
1730 ci.io->bio = bio; in __split_and_process_bio()
1733 ci.sector = bio->bi_iter.bi_sector; in __split_and_process_bio()
1737 if (bio->bi_rw & REQ_FLUSH) { in __split_and_process_bio()
1738 ci.bio = &ci.md->flush_bio; in __split_and_process_bio()
1743 ci.bio = bio; in __split_and_process_bio()
1744 ci.sector_count = bio_sectors(bio); in __split_and_process_bio()
1760 static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) in dm_make_request() argument
1762 int rw = bio_data_dir(bio); in dm_make_request()
1769 generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); in dm_make_request()
1775 if (bio_rw(bio) != READA) in dm_make_request()
1776 queue_io(md, bio); in dm_make_request()
1778 bio_io_error(bio); in dm_make_request()
1782 __split_and_process_bio(md, map, bio); in dm_make_request()
1806 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, in dm_rq_bio_constructor() argument
1811 container_of(bio, struct dm_rq_clone_bio_info, clone); in dm_rq_bio_constructor()
1815 bio->bi_end_io = end_clone_bio; in dm_rq_bio_constructor()
2117 md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && in dm_request_fn()
2968 struct bio *c; in dm_wq_work()