Searched refs:tio (Results 1 - 9 of 9) sorted by relevance

/linux-4.4.14/drivers/md/
H A Ddm.c105 struct dm_rq_target_io *tio; member in struct:dm_rq_clone_bio_info
640 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) free_tio() argument
642 bio_put(&tio->clone); free_tio()
651 static void free_rq_tio(struct dm_rq_target_io *tio) free_rq_tio() argument
653 mempool_free(tio, tio->md->io_pool); free_rq_tio()
995 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); clone_endio() local
996 struct dm_io *io = tio->io; clone_endio()
997 struct mapped_device *md = tio->io->md; clone_endio()
998 dm_endio_fn endio = tio->ti->type->end_io; clone_endio()
1001 r = endio(tio->ti, bio, error); clone_endio()
1021 free_tio(md, tio); clone_endio()
1032 struct dm_rq_target_io *tio = info->tio; end_clone_bio() local
1039 if (tio->error) end_clone_bio()
1052 tio->error = error; end_clone_bio()
1066 if (tio->orig->bio != bio) end_clone_bio()
1074 blk_update_request(tio->orig, 0, nr_bytes); end_clone_bio()
1085 struct dm_rq_target_io *tio = tio_from_request(orig); rq_end_stats() local
1086 tio->duration_jiffies = jiffies - tio->duration_jiffies; rq_end_stats()
1088 tio->n_sectors, true, tio->duration_jiffies, rq_end_stats()
1089 &tio->stats_aux); rq_end_stats()
1123 struct dm_rq_target_io *tio = clone->end_io_data; free_rq_clone() local
1124 struct mapped_device *md = tio->md; free_rq_clone()
1130 tio->ti->type->release_clone_rq(clone); free_rq_clone()
1141 free_rq_tio(tio); free_rq_clone()
1152 struct dm_rq_target_io *tio = clone->end_io_data; dm_end_request() local
1153 struct mapped_device *md = tio->md; dm_end_request()
1154 struct request *rq = tio->orig; dm_end_request()
1180 struct dm_rq_target_io *tio = tio_from_request(rq); dm_unprep_request() local
1181 struct request *clone = tio->clone; dm_unprep_request()
1190 else if (!tio->md->queue->mq_ops) dm_unprep_request()
1191 free_rq_tio(tio); dm_unprep_request()
1267 struct dm_rq_target_io *tio = clone->end_io_data; dm_done() local
1270 if (tio->ti) { dm_done()
1271 rq_end_io = tio->ti->type->rq_end_io; dm_done()
1274 r = rq_end_io(tio->ti, clone, error, &tio->info); dm_done()
1279 disable_write_same(tio->md); dm_done()
1289 dm_requeue_original_request(tio->md, tio->orig); dm_done()
1302 struct dm_rq_target_io *tio = tio_from_request(rq); dm_softirq_done() local
1303 struct request *clone = tio->clone; dm_softirq_done()
1307 rq_end_stats(tio->md, rq); dm_softirq_done()
1310 blk_end_request_all(rq, tio->error); dm_softirq_done()
1311 rq_completed(tio->md, rw, false); dm_softirq_done()
1312 free_rq_tio(tio); dm_softirq_done()
1314 blk_mq_end_request(rq, tio->error); dm_softirq_done()
1315 rq_completed(tio->md, rw, false); dm_softirq_done()
1323 dm_done(clone, tio->error, mapped); dm_softirq_done()
1332 struct dm_rq_target_io *tio = tio_from_request(rq); dm_complete_request() local
1334 tio->error = error; dm_complete_request()
1358 struct dm_rq_target_io *tio = clone->end_io_data; end_clone_request() local
1378 dm_complete_request(tio->orig, error); end_clone_request()
1443 * <-------------- *tio->len_ptr --------------->
1460 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); dm_accept_partial_bio() local
1463 BUG_ON(bi_size > *tio->len_ptr); dm_accept_partial_bio()
1465 *tio->len_ptr -= bi_size - n_sectors; dm_accept_partial_bio()
1470 static void __map_bio(struct dm_target_io *tio) __map_bio() argument
1475 struct bio *clone = &tio->clone; __map_bio()
1476 struct dm_target *ti = tio->ti; __map_bio()
1485 atomic_inc(&tio->io->io_count); __map_bio()
1492 tio->io->bio->bi_bdev->bd_dev, sector); __map_bio()
1497 md = tio->io->md; __map_bio()
1498 dec_pending(tio->io, r); __map_bio()
1499 free_tio(md, tio); __map_bio()
1524 static void clone_bio(struct dm_target_io *tio, struct bio *bio, clone_bio() argument
1527 struct bio *clone = &tio->clone; clone_bio()
1545 struct dm_target_io *tio; alloc_tio() local
1549 tio = container_of(clone, struct dm_target_io, clone); alloc_tio()
1551 tio->io = ci->io; alloc_tio()
1552 tio->ti = ti; alloc_tio()
1553 tio->target_bio_nr = target_bio_nr; alloc_tio()
1555 return tio; alloc_tio()
1562 struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); __clone_and_map_simple_bio() local
1563 struct bio *clone = &tio->clone; __clone_and_map_simple_bio()
1565 tio->len_ptr = len; __clone_and_map_simple_bio()
1571 __map_bio(tio); __clone_and_map_simple_bio()
1599 struct dm_target_io *tio; __clone_and_map_data_bio() local
1610 tio = alloc_tio(ci, ti, target_bio_nr); __clone_and_map_data_bio()
1611 tio->len_ptr = len; __clone_and_map_data_bio()
1612 clone_bio(tio, bio, sector, *len); __clone_and_map_data_bio()
1613 __map_bio(tio); __clone_and_map_data_bio()
1809 struct dm_rq_target_io *tio = data; dm_rq_bio_constructor() local
1814 info->tio = tio; dm_rq_bio_constructor()
1821 struct dm_rq_target_io *tio, gfp_t gfp_mask) setup_clone()
1825 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, setup_clone()
1826 dm_rq_bio_constructor, tio); setup_clone()
1834 clone->end_io_data = tio; setup_clone()
1836 tio->clone = clone; setup_clone()
1842 struct dm_rq_target_io *tio, gfp_t gfp_mask) clone_rq()
1845 * Do not allocate a clone if tio->clone was already set clone_rq()
1848 bool alloc_clone = !tio->clone; clone_rq()
1856 clone = tio->clone; clone_rq()
1859 if (setup_clone(clone, rq, tio, gfp_mask)) { clone_rq()
1871 static void init_tio(struct dm_rq_target_io *tio, struct request *rq, init_tio() argument
1874 tio->md = md; init_tio()
1875 tio->ti = NULL; init_tio()
1876 tio->clone = NULL; init_tio()
1877 tio->orig = rq; init_tio()
1878 tio->error = 0; init_tio()
1879 memset(&tio->info, 0, sizeof(tio->info)); init_tio()
1881 init_kthread_work(&tio->work, map_tio_request); init_tio()
1887 struct dm_rq_target_io *tio; prep_tio() local
1891 tio = alloc_rq_tio(md, gfp_mask); prep_tio()
1892 if (!tio) prep_tio()
1895 init_tio(tio, rq, md); prep_tio()
1899 if (!clone_rq(rq, md, tio, gfp_mask)) { prep_tio()
1901 free_rq_tio(tio); prep_tio()
1907 return tio; prep_tio()
1916 struct dm_rq_target_io *tio; dm_prep_fn() local
1923 tio = prep_tio(rq, md, GFP_ATOMIC); dm_prep_fn()
1924 if (!tio) dm_prep_fn()
1927 rq->special = tio; dm_prep_fn()
1939 static int map_request(struct dm_rq_target_io *tio, struct request *rq, map_request() argument
1943 struct dm_target *ti = tio->ti; map_request()
1946 if (tio->clone) { map_request()
1947 clone = tio->clone; map_request()
1948 r = ti->type->map_rq(ti, clone, &tio->info); map_request()
1950 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); map_request()
1958 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { map_request()
1977 dm_requeue_original_request(md, tio->orig); map_request()
1995 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); map_tio_request() local
1996 struct request *rq = tio->orig; map_tio_request()
1997 struct mapped_device *md = tio->md; map_tio_request()
1999 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) map_tio_request()
2018 struct dm_rq_target_io *tio = tio_from_request(orig); dm_start_request() local
2019 tio->duration_jiffies = jiffies; dm_start_request()
2020 tio->n_sectors = blk_rq_sectors(orig); dm_start_request()
2022 tio->n_sectors, false, 0, &tio->stats_aux); dm_start_request()
2085 struct dm_rq_target_io *tio; dm_request_fn() local
2126 tio = tio_from_request(rq); dm_request_fn()
2127 /* Establish tio->ti before queuing work (map_tio_request) */ dm_request_fn()
2128 tio->ti = ti; dm_request_fn()
2129 queue_kthread_work(&md->kworker, &tio->work); dm_request_fn()
2636 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); dm_mq_init_request() local
2639 * Must initialize md member of tio, otherwise it won't dm_mq_init_request()
2642 tio->md = md; dm_mq_init_request()
2651 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); dm_mq_queue_rq() local
2652 struct mapped_device *md = tio->md; dm_mq_queue_rq()
2681 /* Init tio using md established in .init_request */ dm_mq_queue_rq()
2682 init_tio(tio, rq, md); dm_mq_queue_rq()
2685 * Establish tio->ti before queuing work (map_tio_request) dm_mq_queue_rq()
2688 tio->ti = ti; dm_mq_queue_rq()
2693 tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); dm_mq_queue_rq()
2694 (void) clone_rq(rq, md, tio, GFP_ATOMIC); dm_mq_queue_rq()
2695 queue_kthread_work(&md->kworker, &tio->work); dm_mq_queue_rq()
2698 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { dm_mq_queue_rq()
1820 setup_clone(struct request *clone, struct request *rq, struct dm_rq_target_io *tio, gfp_t gfp_mask) setup_clone() argument
1841 clone_rq(struct request *rq, struct mapped_device *md, struct dm_rq_target_io *tio, gfp_t gfp_mask) clone_rq() argument
/linux-4.4.14/arch/ia64/sn/pci/pcibr/
H A Dpcibr_reg.c19 struct tiocp tio; member in union:br_ptr
33 __sn_clrq_relaxed(&ptr->tio.cp_control, bits); pcireg_control_bit_clr()
53 __sn_setq_relaxed(&ptr->tio.cp_control, bits); pcireg_control_bit_set()
77 ret = __sn_readq_relaxed(&ptr->tio.cp_tflush); pcireg_tflush_get()
107 ret = __sn_readq_relaxed(&ptr->tio.cp_int_status); pcireg_intr_status_get()
131 __sn_clrq_relaxed(&ptr->tio.cp_int_enable, bits); pcireg_intr_enable_bit_clr()
151 __sn_setq_relaxed(&ptr->tio.cp_int_enable, bits); pcireg_intr_enable_bit_set()
175 __sn_clrq_relaxed(&ptr->tio.cp_int_addr[int_n], pcireg_intr_addr_addr_set()
177 __sn_setq_relaxed(&ptr->tio.cp_int_addr[int_n], pcireg_intr_addr_addr_set()
204 writeq(1, &ptr->tio.cp_force_pin[int_n]); pcireg_force_intr_set()
229 __sn_readq_relaxed(&ptr->tio.cp_wr_req_buf[device]); pcireg_wrb_flush_get()
252 writeq(val, &ptr->tio.cp_int_ate_ram[ate_index]); pcireg_int_ate_set()
273 ret = &ptr->tio.cp_int_ate_ram[ate_index]; pcireg_int_ate_addr()
H A Dpcibr_dma.c20 #include "tio.h"
/linux-4.4.14/drivers/char/
H A Dnsc_gpio.c28 dev_info(amp->dev, "io%02u: 0x%04x %s %s %s %s %s %s %s\tio:%d/%d\n", nsc_gpio_dump()
/linux-4.4.14/arch/ia64/sn/kernel/
H A Dhuberror.c202 * Purpose : initialize the error handling requirements for a given tio.
204 * Assumptions : Called only once per tio.
H A Dtiocx.c26 #include "tio.h"
/linux-4.4.14/drivers/staging/comedi/drivers/
H A Dni_660x.c1151 /* to be safe, set counterswap bits on tio chips after all the counter ni_660x_auto_attach()
/linux-4.4.14/drivers/scsi/aic7xxx/
H A Daic7xxx_core.c7957 * to this accept tio. ahc_handle_target_cmd()
H A Daic79xx_core.c10807 * to this accept tio. ahd_handle_target_cmd()

Completed in 451 milliseconds