Lines Matching refs:tio

105 	struct dm_rq_target_io *tio;  member
640 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) in free_tio() argument
642 bio_put(&tio->clone); in free_tio()
651 static void free_rq_tio(struct dm_rq_target_io *tio) in free_rq_tio() argument
653 mempool_free(tio, tio->md->io_pool); in free_rq_tio()
995 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in clone_endio() local
996 struct dm_io *io = tio->io; in clone_endio()
997 struct mapped_device *md = tio->io->md; in clone_endio()
998 dm_endio_fn endio = tio->ti->type->end_io; in clone_endio()
1001 r = endio(tio->ti, bio, error); in clone_endio()
1021 free_tio(md, tio); in clone_endio()
1032 struct dm_rq_target_io *tio = info->tio; in end_clone_bio() local
1039 if (tio->error) in end_clone_bio()
1052 tio->error = error; in end_clone_bio()
1066 if (tio->orig->bio != bio) in end_clone_bio()
1074 blk_update_request(tio->orig, 0, nr_bytes); in end_clone_bio()
1085 struct dm_rq_target_io *tio = tio_from_request(orig); in rq_end_stats() local
1086 tio->duration_jiffies = jiffies - tio->duration_jiffies; in rq_end_stats()
1088 tio->n_sectors, true, tio->duration_jiffies, in rq_end_stats()
1089 &tio->stats_aux); in rq_end_stats()
1123 struct dm_rq_target_io *tio = clone->end_io_data; in free_rq_clone() local
1124 struct mapped_device *md = tio->md; in free_rq_clone()
1130 tio->ti->type->release_clone_rq(clone); in free_rq_clone()
1141 free_rq_tio(tio); in free_rq_clone()
1152 struct dm_rq_target_io *tio = clone->end_io_data; in dm_end_request() local
1153 struct mapped_device *md = tio->md; in dm_end_request()
1154 struct request *rq = tio->orig; in dm_end_request()
1180 struct dm_rq_target_io *tio = tio_from_request(rq); in dm_unprep_request() local
1181 struct request *clone = tio->clone; in dm_unprep_request()
1190 else if (!tio->md->queue->mq_ops) in dm_unprep_request()
1191 free_rq_tio(tio); in dm_unprep_request()
1267 struct dm_rq_target_io *tio = clone->end_io_data; in dm_done() local
1270 if (tio->ti) { in dm_done()
1271 rq_end_io = tio->ti->type->rq_end_io; in dm_done()
1274 r = rq_end_io(tio->ti, clone, error, &tio->info); in dm_done()
1279 disable_write_same(tio->md); in dm_done()
1289 dm_requeue_original_request(tio->md, tio->orig); in dm_done()
1302 struct dm_rq_target_io *tio = tio_from_request(rq); in dm_softirq_done() local
1303 struct request *clone = tio->clone; in dm_softirq_done()
1307 rq_end_stats(tio->md, rq); in dm_softirq_done()
1310 blk_end_request_all(rq, tio->error); in dm_softirq_done()
1311 rq_completed(tio->md, rw, false); in dm_softirq_done()
1312 free_rq_tio(tio); in dm_softirq_done()
1314 blk_mq_end_request(rq, tio->error); in dm_softirq_done()
1315 rq_completed(tio->md, rw, false); in dm_softirq_done()
1323 dm_done(clone, tio->error, mapped); in dm_softirq_done()
1332 struct dm_rq_target_io *tio = tio_from_request(rq); in dm_complete_request() local
1334 tio->error = error; in dm_complete_request()
1358 struct dm_rq_target_io *tio = clone->end_io_data; in end_clone_request() local
1378 dm_complete_request(tio->orig, error); in end_clone_request()
1460 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in dm_accept_partial_bio() local
1463 BUG_ON(bi_size > *tio->len_ptr); in dm_accept_partial_bio()
1465 *tio->len_ptr -= bi_size - n_sectors; in dm_accept_partial_bio()
1470 static void __map_bio(struct dm_target_io *tio) in __map_bio() argument
1475 struct bio *clone = &tio->clone; in __map_bio()
1476 struct dm_target *ti = tio->ti; in __map_bio()
1485 atomic_inc(&tio->io->io_count); in __map_bio()
1492 tio->io->bio->bi_bdev->bd_dev, sector); in __map_bio()
1497 md = tio->io->md; in __map_bio()
1498 dec_pending(tio->io, r); in __map_bio()
1499 free_tio(md, tio); in __map_bio()
1524 static void clone_bio(struct dm_target_io *tio, struct bio *bio, in clone_bio() argument
1527 struct bio *clone = &tio->clone; in clone_bio()
1545 struct dm_target_io *tio; in alloc_tio() local
1549 tio = container_of(clone, struct dm_target_io, clone); in alloc_tio()
1551 tio->io = ci->io; in alloc_tio()
1552 tio->ti = ti; in alloc_tio()
1553 tio->target_bio_nr = target_bio_nr; in alloc_tio()
1555 return tio; in alloc_tio()
1562 struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); in __clone_and_map_simple_bio() local
1563 struct bio *clone = &tio->clone; in __clone_and_map_simple_bio()
1565 tio->len_ptr = len; in __clone_and_map_simple_bio()
1571 __map_bio(tio); in __clone_and_map_simple_bio()
1599 struct dm_target_io *tio; in __clone_and_map_data_bio() local
1610 tio = alloc_tio(ci, ti, target_bio_nr); in __clone_and_map_data_bio()
1611 tio->len_ptr = len; in __clone_and_map_data_bio()
1612 clone_bio(tio, bio, sector, *len); in __clone_and_map_data_bio()
1613 __map_bio(tio); in __clone_and_map_data_bio()
1809 struct dm_rq_target_io *tio = data; in dm_rq_bio_constructor() local
1814 info->tio = tio; in dm_rq_bio_constructor()
1821 struct dm_rq_target_io *tio, gfp_t gfp_mask) in setup_clone() argument
1825 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, in setup_clone()
1826 dm_rq_bio_constructor, tio); in setup_clone()
1834 clone->end_io_data = tio; in setup_clone()
1836 tio->clone = clone; in setup_clone()
1842 struct dm_rq_target_io *tio, gfp_t gfp_mask) in clone_rq() argument
1848 bool alloc_clone = !tio->clone; in clone_rq()
1856 clone = tio->clone; in clone_rq()
1859 if (setup_clone(clone, rq, tio, gfp_mask)) { in clone_rq()
1871 static void init_tio(struct dm_rq_target_io *tio, struct request *rq, in init_tio() argument
1874 tio->md = md; in init_tio()
1875 tio->ti = NULL; in init_tio()
1876 tio->clone = NULL; in init_tio()
1877 tio->orig = rq; in init_tio()
1878 tio->error = 0; in init_tio()
1879 memset(&tio->info, 0, sizeof(tio->info)); in init_tio()
1881 init_kthread_work(&tio->work, map_tio_request); in init_tio()
1887 struct dm_rq_target_io *tio; in prep_tio() local
1891 tio = alloc_rq_tio(md, gfp_mask); in prep_tio()
1892 if (!tio) in prep_tio()
1895 init_tio(tio, rq, md); in prep_tio()
1899 if (!clone_rq(rq, md, tio, gfp_mask)) { in prep_tio()
1901 free_rq_tio(tio); in prep_tio()
1907 return tio; in prep_tio()
1916 struct dm_rq_target_io *tio; in dm_prep_fn() local
1923 tio = prep_tio(rq, md, GFP_ATOMIC); in dm_prep_fn()
1924 if (!tio) in dm_prep_fn()
1927 rq->special = tio; in dm_prep_fn()
1939 static int map_request(struct dm_rq_target_io *tio, struct request *rq, in map_request() argument
1943 struct dm_target *ti = tio->ti; in map_request()
1946 if (tio->clone) { in map_request()
1947 clone = tio->clone; in map_request()
1948 r = ti->type->map_rq(ti, clone, &tio->info); in map_request()
1950 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); in map_request()
1958 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { in map_request()
1977 dm_requeue_original_request(md, tio->orig); in map_request()
1995 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); in map_tio_request() local
1996 struct request *rq = tio->orig; in map_tio_request()
1997 struct mapped_device *md = tio->md; in map_tio_request()
1999 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) in map_tio_request()
2018 struct dm_rq_target_io *tio = tio_from_request(orig); in dm_start_request() local
2019 tio->duration_jiffies = jiffies; in dm_start_request()
2020 tio->n_sectors = blk_rq_sectors(orig); in dm_start_request()
2022 tio->n_sectors, false, 0, &tio->stats_aux); in dm_start_request()
2085 struct dm_rq_target_io *tio; in dm_request_fn() local
2126 tio = tio_from_request(rq); in dm_request_fn()
2128 tio->ti = ti; in dm_request_fn()
2129 queue_kthread_work(&md->kworker, &tio->work); in dm_request_fn()
2636 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); in dm_mq_init_request() local
2642 tio->md = md; in dm_mq_init_request()
2651 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); in dm_mq_queue_rq() local
2652 struct mapped_device *md = tio->md; in dm_mq_queue_rq()
2682 init_tio(tio, rq, md); in dm_mq_queue_rq()
2688 tio->ti = ti; in dm_mq_queue_rq()
2693 tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); in dm_mq_queue_rq()
2694 (void) clone_rq(rq, md, tio, GFP_ATOMIC); in dm_mq_queue_rq()
2695 queue_kthread_work(&md->kworker, &tio->work); in dm_mq_queue_rq()
2698 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { in dm_mq_queue_rq()