data_dev 54 drivers/md/dm-raid.c struct dm_dev *data_dev; data_dev 793 drivers/md/dm-raid.c if (rs->dev[i].data_dev) data_dev 794 drivers/md/dm-raid.c dm_put_device(rs->ti, rs->dev[i].data_dev); data_dev 833 drivers/md/dm-raid.c rs->dev[i].data_dev = NULL; data_dev 882 drivers/md/dm-raid.c &rs->dev[i].data_dev); data_dev 892 drivers/md/dm-raid.c rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; data_dev 2115 drivers/md/dm-raid.c if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) { data_dev 3665 drivers/md/dm-raid.c __get_dev_name(rs->dev[i].data_dev)); data_dev 3728 drivers/md/dm-raid.c if (rs->dev[i].data_dev) data_dev 3730 drivers/md/dm-raid.c rs->dev[i].data_dev, data_dev 234 drivers/md/dm-thin.c struct block_device *data_dev; data_dev 325 drivers/md/dm-thin.c struct dm_dev *data_dev; data_dev 2817 drivers/md/dm-thin.c q = bdev_get_queue(pt->data_dev->bdev); data_dev 2841 drivers/md/dm-thin.c struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); data_dev 2858 drivers/md/dm-thin.c struct block_device *data_bdev = pt->data_dev->bdev; data_dev 2949 drivers/md/dm-thin.c struct block_device *data_dev, data_dev 3057 drivers/md/dm-thin.c pool->data_dev = data_dev; data_dev 3099 drivers/md/dm-thin.c struct block_device *data_dev, data_dev 3110 drivers/md/dm-thin.c if (pool->data_dev != data_dev) { data_dev 3119 drivers/md/dm-thin.c if (pool->md_dev != metadata_dev || pool->data_dev != data_dev) { data_dev 3126 drivers/md/dm-thin.c pool = pool_create(pool_md, metadata_dev, data_dev, block_size, read_only, error); data_dev 3146 drivers/md/dm-thin.c dm_put_device(ti, pt->data_dev); data_dev 3230 drivers/md/dm-thin.c bio_set_dev(flush_bio, pt->data_dev->bdev); data_dev 3307 drivers/md/dm-thin.c struct dm_dev *data_dev; data_dev 3352 drivers/md/dm-thin.c r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev); data_dev 3379 drivers/md/dm-thin.c pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev, data_dev 3401 drivers/md/dm-thin.c pt->data_dev = data_dev; data_dev 3443 drivers/md/dm-thin.c dm_put_device(ti, data_dev); data_dev 3463 drivers/md/dm-thin.c bio_set_dev(bio, pt->data_dev->bdev); data_dev 4041 drivers/md/dm-thin.c format_dev_t(buf2, pt->data_dev->bdev->bd_dev), data_dev 4058 drivers/md/dm-thin.c return fn(ti, pt->data_dev, 0, ti->len, data); data_dev 74 drivers/md/dm-verity-fec.c v->data_dev->name, (unsigned long long)rsb, data_dev 173 drivers/md/dm-verity-fec.c v->data_dev->name, (unsigned long long)rsb, r); data_dev 176 drivers/md/dm-verity-fec.c v->data_dev->name, (unsigned long long)rsb, r); data_dev 254 drivers/md/dm-verity-fec.c v->data_dev->name, data_dev 397 drivers/md/dm-verity-fec.c v->data_dev->name, (unsigned long long)rsb, neras); data_dev 431 drivers/md/dm-verity-fec.c DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name); data_dev 760 drivers/md/dm-verity-fec.c f->data_bufio = dm_bufio_client_create(v->data_dev->bdev, data_dev 239 drivers/md/dm-verity-target.c DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name, data_dev 243 drivers/md/dm-verity-target.c DMERR("%s: reached maximum errors", v->data_dev->name); data_dev 638 drivers/md/dm-verity-target.c bio_set_dev(bio, v->data_dev->bdev); data_dev 693 drivers/md/dm-verity-target.c v->data_dev->name, data_dev 751 drivers/md/dm-verity-target.c *bdev = v->data_dev->bdev; data_dev 754 drivers/md/dm-verity-target.c ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT) data_dev 764 drivers/md/dm-verity-target.c return fn(ti, v->data_dev, v->data_start, ti->len, data); data_dev 803 drivers/md/dm-verity-target.c if (v->data_dev) data_dev 804 drivers/md/dm-verity-target.c dm_put_device(ti, v->data_dev); data_dev 991 drivers/md/dm-verity-target.c r = dm_get_device(ti, argv[1], FMODE_READ, &v->data_dev); data_dev 1005 drivers/md/dm-verity-target.c num < bdev_logical_block_size(v->data_dev->bdev) || data_dev 34 drivers/md/dm-verity.h struct dm_dev *data_dev;