Lines Matching refs:tio
101 struct dm_rq_target_io *tio; member
607 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) in free_tio() argument
609 bio_put(&tio->clone); in free_tio()
618 static void free_rq_tio(struct dm_rq_target_io *tio) in free_rq_tio() argument
620 mempool_free(tio, tio->md->io_pool); in free_rq_tio()
960 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in clone_endio() local
961 struct dm_io *io = tio->io; in clone_endio()
962 struct mapped_device *md = tio->io->md; in clone_endio()
963 dm_endio_fn endio = tio->ti->type->end_io; in clone_endio()
969 r = endio(tio->ti, bio, error); in clone_endio()
989 free_tio(md, tio); in clone_endio()
1000 struct dm_rq_target_io *tio = info->tio; in end_clone_bio() local
1006 if (tio->error) in end_clone_bio()
1019 tio->error = error; in end_clone_bio()
1033 if (tio->orig->bio != bio) in end_clone_bio()
1041 blk_update_request(tio->orig, 0, nr_bytes); in end_clone_bio()
1079 struct dm_rq_target_io *tio = clone->end_io_data; in free_rq_clone() local
1080 struct mapped_device *md = tio->md; in free_rq_clone()
1086 tio->ti->type->release_clone_rq(clone); in free_rq_clone()
1097 free_rq_tio(tio); in free_rq_clone()
1108 struct dm_rq_target_io *tio = clone->end_io_data; in dm_end_request() local
1109 struct mapped_device *md = tio->md; in dm_end_request()
1110 struct request *rq = tio->orig; in dm_end_request()
1135 struct dm_rq_target_io *tio = tio_from_request(rq); in dm_unprep_request() local
1136 struct request *clone = tio->clone; in dm_unprep_request()
1145 else if (!tio->md->queue->mq_ops) in dm_unprep_request()
1146 free_rq_tio(tio); in dm_unprep_request()
1182 struct dm_rq_target_io *tio = clone->end_io_data; in dm_requeue_unmapped_request() local
1184 dm_requeue_unmapped_original_request(tio->md, tio->orig); in dm_requeue_unmapped_request()
1228 struct dm_rq_target_io *tio = clone->end_io_data; in dm_done() local
1231 if (tio->ti) { in dm_done()
1232 rq_end_io = tio->ti->type->rq_end_io; in dm_done()
1235 r = rq_end_io(tio->ti, clone, error, &tio->info); in dm_done()
1240 disable_write_same(tio->md); in dm_done()
1263 struct dm_rq_target_io *tio = tio_from_request(rq); in dm_softirq_done() local
1264 struct request *clone = tio->clone; in dm_softirq_done()
1270 blk_end_request_all(rq, tio->error); in dm_softirq_done()
1271 rq_completed(tio->md, rw, false); in dm_softirq_done()
1272 free_rq_tio(tio); in dm_softirq_done()
1274 blk_mq_end_request(rq, tio->error); in dm_softirq_done()
1275 rq_completed(tio->md, rw, false); in dm_softirq_done()
1283 dm_done(clone, tio->error, mapped); in dm_softirq_done()
1292 struct dm_rq_target_io *tio = tio_from_request(rq); in dm_complete_request() local
1294 tio->error = error; in dm_complete_request()
1318 struct dm_rq_target_io *tio = clone->end_io_data; in end_clone_request() local
1338 dm_complete_request(tio->orig, error); in end_clone_request()
1420 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in dm_accept_partial_bio() local
1423 BUG_ON(bi_size > *tio->len_ptr); in dm_accept_partial_bio()
1425 *tio->len_ptr -= bi_size - n_sectors; in dm_accept_partial_bio()
1430 static void __map_bio(struct dm_target_io *tio) in __map_bio() argument
1435 struct bio *clone = &tio->clone; in __map_bio()
1436 struct dm_target *ti = tio->ti; in __map_bio()
1445 atomic_inc(&tio->io->io_count); in __map_bio()
1452 tio->io->bio->bi_bdev->bd_dev, sector); in __map_bio()
1457 md = tio->io->md; in __map_bio()
1458 dec_pending(tio->io, r); in __map_bio()
1459 free_tio(md, tio); in __map_bio()
1484 static void clone_bio(struct dm_target_io *tio, struct bio *bio, in clone_bio() argument
1487 struct bio *clone = &tio->clone; in clone_bio()
1505 struct dm_target_io *tio; in alloc_tio() local
1509 tio = container_of(clone, struct dm_target_io, clone); in alloc_tio()
1511 tio->io = ci->io; in alloc_tio()
1512 tio->ti = ti; in alloc_tio()
1513 tio->target_bio_nr = target_bio_nr; in alloc_tio()
1515 return tio; in alloc_tio()
1522 struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); in __clone_and_map_simple_bio() local
1523 struct bio *clone = &tio->clone; in __clone_and_map_simple_bio()
1525 tio->len_ptr = len; in __clone_and_map_simple_bio()
1531 __map_bio(tio); in __clone_and_map_simple_bio()
1559 struct dm_target_io *tio; in __clone_and_map_data_bio() local
1570 tio = alloc_tio(ci, ti, target_bio_nr); in __clone_and_map_data_bio()
1571 tio->len_ptr = len; in __clone_and_map_data_bio()
1572 clone_bio(tio, bio, sector, *len); in __clone_and_map_data_bio()
1573 __map_bio(tio); in __clone_and_map_data_bio()
1823 struct dm_rq_target_io *tio = data; in dm_rq_bio_constructor() local
1828 info->tio = tio; in dm_rq_bio_constructor()
1835 struct dm_rq_target_io *tio, gfp_t gfp_mask) in setup_clone() argument
1839 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, in setup_clone()
1840 dm_rq_bio_constructor, tio); in setup_clone()
1848 clone->end_io_data = tio; in setup_clone()
1850 tio->clone = clone; in setup_clone()
1856 struct dm_rq_target_io *tio, gfp_t gfp_mask) in clone_rq() argument
1862 bool alloc_clone = !tio->clone; in clone_rq()
1870 clone = tio->clone; in clone_rq()
1873 if (setup_clone(clone, rq, tio, gfp_mask)) { in clone_rq()
1885 static void init_tio(struct dm_rq_target_io *tio, struct request *rq, in init_tio() argument
1888 tio->md = md; in init_tio()
1889 tio->ti = NULL; in init_tio()
1890 tio->clone = NULL; in init_tio()
1891 tio->orig = rq; in init_tio()
1892 tio->error = 0; in init_tio()
1893 memset(&tio->info, 0, sizeof(tio->info)); in init_tio()
1895 init_kthread_work(&tio->work, map_tio_request); in init_tio()
1901 struct dm_rq_target_io *tio; in prep_tio() local
1905 tio = alloc_rq_tio(md, gfp_mask); in prep_tio()
1906 if (!tio) in prep_tio()
1909 init_tio(tio, rq, md); in prep_tio()
1913 if (!clone_rq(rq, md, tio, gfp_mask)) { in prep_tio()
1915 free_rq_tio(tio); in prep_tio()
1921 return tio; in prep_tio()
1930 struct dm_rq_target_io *tio; in dm_prep_fn() local
1937 tio = prep_tio(rq, md, GFP_ATOMIC); in dm_prep_fn()
1938 if (!tio) in dm_prep_fn()
1941 rq->special = tio; in dm_prep_fn()
1953 static int map_request(struct dm_rq_target_io *tio, struct request *rq, in map_request() argument
1957 struct dm_target *ti = tio->ti; in map_request()
1960 if (tio->clone) { in map_request()
1961 clone = tio->clone; in map_request()
1962 r = ti->type->map_rq(ti, clone, &tio->info); in map_request()
1964 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); in map_request()
1972 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { in map_request()
2009 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); in map_tio_request() local
2010 struct request *rq = tio->orig; in map_tio_request()
2011 struct mapped_device *md = tio->md; in map_tio_request()
2013 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) in map_tio_request()
2091 struct dm_rq_target_io *tio; in dm_request_fn() local
2132 tio = tio_from_request(rq); in dm_request_fn()
2134 tio->ti = ti; in dm_request_fn()
2135 queue_kthread_work(&md->kworker, &tio->work); in dm_request_fn()
2694 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); in dm_mq_init_request() local
2700 tio->md = md; in dm_mq_init_request()
2709 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); in dm_mq_queue_rq() local
2710 struct mapped_device *md = tio->md; in dm_mq_queue_rq()
2740 init_tio(tio, rq, md); in dm_mq_queue_rq()
2746 tio->ti = ti; in dm_mq_queue_rq()
2751 tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); in dm_mq_queue_rq()
2752 (void) clone_rq(rq, md, tio, GFP_ATOMIC); in dm_mq_queue_rq()
2753 queue_kthread_work(&md->kworker, &tio->work); in dm_mq_queue_rq()
2756 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { in dm_mq_queue_rq()