Lines Matching refs:bio
72 struct bio *bio; member
100 struct bio *orig;
102 struct bio clone;
213 struct bio flush_bio;
643 struct bio *bio = io->bio; in start_io_acct() local
645 int rw = bio_data_dir(bio); in start_io_acct()
656 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, in start_io_acct()
657 bio_sectors(bio), false, 0, &io->stats_aux); in start_io_acct()
663 struct bio *bio = io->bio; in end_io_acct() local
666 int rw = bio_data_dir(bio); in end_io_acct()
671 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, in end_io_acct()
672 bio_sectors(bio), true, duration, &io->stats_aux); in end_io_acct()
690 static void queue_io(struct mapped_device *md, struct bio *bio) in queue_io() argument
695 bio_list_add(&md->deferred, bio); in queue_io()
901 struct bio *bio; in dec_pending() local
919 bio_list_add_head(&md->deferred, io->bio); in dec_pending()
927 bio = io->bio; in dec_pending()
934 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { in dec_pending()
939 bio->bi_rw &= ~REQ_FLUSH; in dec_pending()
940 queue_io(md, bio); in dec_pending()
943 trace_block_bio_complete(md->queue, bio, io_error); in dec_pending()
944 bio_endio(bio, io_error); in dec_pending()
957 static void clone_endio(struct bio *bio, int error) in clone_endio() argument
960 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in clone_endio()
965 if (!bio_flagged(bio, BIO_UPTODATE) && !error) in clone_endio()
969 r = endio(tio->ti, bio, error); in clone_endio()
985 if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) && in clone_endio()
986 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) in clone_endio()
996 static void end_clone_bio(struct bio *clone, int error) in end_clone_bio()
1001 struct bio *bio = info->orig; in end_clone_bio() local
1033 if (tio->orig->bio != bio) in end_clone_bio()
1418 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) in dm_accept_partial_bio() argument
1420 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in dm_accept_partial_bio()
1421 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; in dm_accept_partial_bio()
1422 BUG_ON(bio->bi_rw & REQ_FLUSH); in dm_accept_partial_bio()
1426 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; in dm_accept_partial_bio()
1435 struct bio *clone = &tio->clone; in __map_bio()
1452 tio->io->bio->bi_bdev->bd_dev, sector); in __map_bio()
1469 struct bio *bio; member
1475 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) in bio_setup_sector() argument
1477 bio->bi_iter.bi_sector = sector; in bio_setup_sector()
1478 bio->bi_iter.bi_size = to_bytes(len); in bio_setup_sector()
1484 static void clone_bio(struct dm_target_io *tio, struct bio *bio, in clone_bio() argument
1487 struct bio *clone = &tio->clone; in clone_bio()
1489 __bio_clone_fast(clone, bio); in clone_bio()
1491 if (bio_integrity(bio)) in clone_bio()
1492 bio_integrity_clone(clone, bio, GFP_NOIO); in clone_bio()
1497 if (bio_integrity(bio)) in clone_bio()
1506 struct bio *clone; in alloc_tio()
1523 struct bio *clone = &tio->clone; in __clone_and_map_simple_bio()
1527 __bio_clone_fast(clone, ci->bio); in __clone_and_map_simple_bio()
1548 BUG_ON(bio_has_data(ci->bio)); in __send_empty_flush()
1558 struct bio *bio = ci->bio; in __clone_and_map_data_bio() local
1566 if (bio_data_dir(bio) == WRITE && ti->num_write_bios) in __clone_and_map_data_bio()
1567 num_target_bios = ti->num_write_bios(ti, bio); in __clone_and_map_data_bio()
1572 clone_bio(tio, bio, sector, *len); in __clone_and_map_data_bio()
1648 struct bio *bio = ci->bio; in __split_and_process_non_flush() local
1652 if (unlikely(bio->bi_rw & REQ_DISCARD)) in __split_and_process_non_flush()
1654 else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) in __split_and_process_non_flush()
1675 struct dm_table *map, struct bio *bio) in __split_and_process_bio() argument
1681 bio_io_error(bio); in __split_and_process_bio()
1690 ci.io->bio = bio; in __split_and_process_bio()
1693 ci.sector = bio->bi_iter.bi_sector; in __split_and_process_bio()
1697 if (bio->bi_rw & REQ_FLUSH) { in __split_and_process_bio()
1698 ci.bio = &ci.md->flush_bio; in __split_and_process_bio()
1703 ci.bio = bio; in __split_and_process_bio()
1704 ci.sector_count = bio_sectors(bio); in __split_and_process_bio()
1774 static void dm_make_request(struct request_queue *q, struct bio *bio) in dm_make_request() argument
1776 int rw = bio_data_dir(bio); in dm_make_request()
1783 generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); in dm_make_request()
1789 if (bio_rw(bio) != READA) in dm_make_request()
1790 queue_io(md, bio); in dm_make_request()
1792 bio_io_error(bio); in dm_make_request()
1796 __split_and_process_bio(md, map, bio); in dm_make_request()
1820 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, in dm_rq_bio_constructor() argument
1825 container_of(bio, struct dm_rq_clone_bio_info, clone); in dm_rq_bio_constructor()
1829 bio->bi_end_io = end_clone_bio; in dm_rq_bio_constructor()
2123 md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && in dm_request_fn()
3020 struct bio *c; in dm_wq_work()