Home
last modified time | relevance | path

Searched refs:bdev_get_queue (Results 1 – 55 of 55) sorted by relevance

/linux-4.4.14/block/
Dblk-lib.c44 struct request_queue *q = bdev_get_queue(bdev); in blkdev_issue_discard()
149 struct request_queue *q = bdev_get_queue(bdev); in blkdev_issue_write_same()
288 struct request_queue *q = bdev_get_queue(bdev); in blkdev_issue_zeroout()
Dbio-integrity.c149 bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev), in bio_integrity_add_page()
281 q = bdev_get_queue(bio->bi_bdev); in bio_integrity_prep()
Dioctl.c542 queue_max_sectors(bdev_get_queue(bdev))); in blkdev_ioctl()
545 return put_ushort(arg, !blk_queue_nonrot(bdev_get_queue(bdev))); in blkdev_ioctl()
Dblk-flush.c472 q = bdev_get_queue(bdev); in blkdev_issue_flush()
Dcompat_ioctl.c722 queue_max_sectors(bdev_get_queue(bdev))); in compat_blkdev_ioctl()
726 !blk_queue_nonrot(bdev_get_queue(bdev))); in compat_blkdev_ioctl()
Dblk-core.c116 struct request_queue *q = bdev_get_queue(bdev); in blk_get_backing_dev_info()
1830 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, in blk_partition_remap()
1926 q = bdev_get_queue(bio->bi_bdev); in generic_make_request_checks()
2061 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in generic_make_request()
Dblk-settings.c651 struct request_queue *bq = bdev_get_queue(bdev); in bdev_stack_limits()
/linux-4.4.14/drivers/md/
Ddm-table.c294 q = bdev_get_queue(bdev); in device_area_is_invalid()
436 struct request_queue *q = bdev_get_queue(bdev); in dm_set_device_limits()
893 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); in dm_table_set_type()
908 if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) { in dm_table_set_type()
1328 struct request_queue *q = bdev_get_queue(dev->bdev); in device_flush_capable()
1380 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_nonrot()
1388 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_not_random()
1396 struct request_queue *q = bdev_get_queue(dev->bdev); in queue_supports_sg_merge()
1421 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_same_capable()
1448 struct request_queue *q = bdev_get_queue(dev->bdev); in device_discard_capable()
[all …]
Dlinear.c63 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); in linear_congested()
122 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in linear_conf()
256 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { in linear_make_request()
Draid0.c36 struct request_queue *q = bdev_get_queue(devlist[i]->bdev); in raid0_congested()
390 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid0_run()
492 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { in raid0_make_request()
Ddm-mpath.c422 clone->q = bdev_get_queue(bdev); in __multipath_map()
427 *__clone = blk_get_request(bdev_get_queue(bdev), in __multipath_map()
577 q = bdev_get_queue(p->path.dev->bdev); in parse_path()
1225 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), in activate_path()
1609 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in __pgpath_busy()
Draid1.c616 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); in read_balance()
725 struct request_queue *q = bdev_get_queue(rdev->bdev); in raid1_congested()
763 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) in flush_pending_writes()
1037 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) in raid1_unplug()
1638 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid1_add_disk()
2824 q = bdev_get_queue(rdev->bdev); in setup_conf()
2935 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in run()
Ddm-io.c290 struct request_queue *q = bdev_get_queue(where->bdev); in do_region()
Dmultipath.c167 struct request_queue *q = bdev_get_queue(rdev->bdev); in multipath_congested()
Ddm-log-writes.c766 struct request_queue *q = bdev_get_queue(lc->dev->bdev); in log_writes_io_hints()
Draid10.c839 struct request_queue *q = bdev_get_queue(rdev->bdev); in raid10_congested()
869 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) in flush_pending_writes()
1045 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) in raid10_unplug()
1745 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid10_add_disk()
3587 q = bdev_get_queue(rdev->bdev); in run()
3602 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in run()
Ddm-thin.c340 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_discard_async()
2636 q = bdev_get_queue(pt->data_dev->bdev); in pool_is_congested()
2660 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); in data_dev_supports_discard()
2678 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits; in disable_passdown_if_not_supported()
Draid5.c231 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), in return_io()
1038 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), in ops_run_io()
1084 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), in ops_run_io()
4769 trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev), in raid5_align_endio()
4854 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), in raid5_read_one_chunk()
5324 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), in make_request()
5732 trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev), in retry_aligned_read()
7006 if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) || in run()
7007 !bdev_get_queue(rdev->bdev)-> in run()
Draid5-cache.c661 if (!blk_queue_discard(bdev_get_queue(bdev))) in r5l_write_super_and_discard_space()
Ddm-raid.c1178 q = bdev_get_queue(rs->dev[i].rdev.bdev); in configure_discard_support()
Ddm-era-target.c1381 struct request_queue *q = bdev_get_queue(dev->bdev); in dev_is_congested()
Ddm.c1018 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) in clone_endio()
1491 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, in __map_bio()
Ddm-cache-target.c2293 struct request_queue *q = bdev_get_queue(dev->bdev); in is_congested()
/linux-4.4.14/include/linux/
Dblkdev.h823 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) in bdev_get_queue() function
1202 return queue_logical_block_size(bdev_get_queue(bdev)); in bdev_logical_block_size()
1212 return queue_physical_block_size(bdev_get_queue(bdev)); in bdev_physical_block_size()
1222 return queue_io_min(bdev_get_queue(bdev)); in bdev_io_min()
1232 return queue_io_opt(bdev_get_queue(bdev)); in bdev_io_opt()
1253 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset()
1297 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment()
1315 return queue_discard_zeroes_data(bdev_get_queue(bdev)); in bdev_discard_zeroes_data()
1320 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_same()
/linux-4.4.14/fs/f2fs/
Dgc.h107 struct request_queue *q = bdev_get_queue(bdev); in is_idle()
Dsegment.h686 struct request_queue *q = bdev_get_queue(bdev); in max_hw_blocks()
Dsuper.c324 q = bdev_get_queue(sb->s_bdev); in parse_options()
Dfile.c1497 struct request_queue *q = bdev_get_queue(sb->s_bdev); in f2fs_ioc_fitrim()
/linux-4.4.14/drivers/target/
Dtarget_core_iblock.c118 q = bdev_get_queue(bd); in iblock_configure_device()
651 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); in iblock_execute_rw()
742 struct request_queue *q = bdev_get_queue(bd); in iblock_get_blocks()
803 struct request_queue *q = bdev_get_queue(bd); in iblock_get_write_cache()
Dtarget_core_file.c148 struct request_queue *q = bdev_get_queue(inode->i_bdev); in fd_configure_device()
/linux-4.4.14/fs/jfs/
Dioctl.c134 struct request_queue *q = bdev_get_queue(sb->s_bdev); in jfs_ioctl()
Dsuper.c376 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options()
395 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options()
/linux-4.4.14/drivers/md/bcache/
Drequest.c917 blk_queue_discard(bdev_get_queue(dc->bdev))) in cached_dev_write()
996 !blk_queue_discard(bdev_get_queue(dc->bdev))) in cached_dev_make_request()
1016 struct request_queue *q = bdev_get_queue(dc->bdev); in cached_dev_congested()
1027 q = bdev_get_queue(ca->bdev); in cached_dev_congested()
1134 q = bdev_get_queue(ca->bdev); in flash_dev_congested()
Dsysfs.c848 if (blk_queue_discard(bdev_get_queue(ca->bdev))) in STORE()
Dsuper.c1096 struct request_queue *q = bdev_get_queue(dc->bdev); in cached_dev_init()
1858 if (blk_queue_discard(bdev_get_queue(ca->bdev))) in register_cache()
/linux-4.4.14/fs/xfs/
Dxfs_discard.c159 struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev); in xfs_ioc_trim()
/linux-4.4.14/drivers/block/xen-blkback/
Dxenbus.c415 q = bdev_get_queue(bdev); in xen_vbd_create()
474 struct request_queue *q = bdev_get_queue(bdev); in xen_blkbk_discard()
/linux-4.4.14/fs/ext4/
Dioctl.c592 struct request_queue *q = bdev_get_queue(sb->s_bdev); in ext4_ioctl()
Dsuper.c3940 struct request_queue *q = bdev_get_queue(sb->s_bdev); in ext4_fill_super()
/linux-4.4.14/fs/logfs/
Dsuper.c122 sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; in logfs_sb_set()
/linux-4.4.14/kernel/trace/
Dblktrace.c645 q = bdev_get_queue(bdev); in blk_trace_ioctl()
1623 return bdev_get_queue(bdev); in blk_trace_get_queue()
/linux-4.4.14/fs/ocfs2/
Dioctl.c929 struct request_queue *q = bdev_get_queue(sb->s_bdev); in ocfs2_ioctl()
/linux-4.4.14/fs/nilfs2/
Dioctl.c1086 struct request_queue *q = bdev_get_queue(nilfs->ns_bdev); in nilfs_ioctl_trim_fs()
Dsuper.c1082 sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; in nilfs_fill_super()
/linux-4.4.14/fs/
Ddirect-io.c448 if (!blk_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie)) in dio_await_one()
Dsuper.c956 s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; in set_bdev_super()
/linux-4.4.14/mm/
Dswapfile.c2378 struct request_queue *q = bdev_get_queue(si->bdev); in swap_discardable()
2464 if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) { in SYSCALL_DEFINE2()
/linux-4.4.14/fs/btrfs/
Dvolumes.c935 q = bdev_get_queue(bdev); in __btrfs_open_devices()
943 if (!blk_queue_nonrot(bdev_get_queue(bdev))) in __btrfs_open_devices()
2306 q = bdev_get_queue(bdev); in btrfs_init_new_device()
2348 if (!blk_queue_nonrot(bdev_get_queue(bdev))) in btrfs_init_new_device()
2511 q = bdev_get_queue(bdev); in btrfs_init_dev_replace_tgtdev()
Dioctl.c392 q = bdev_get_queue(device->bdev); in btrfs_ioctl_fitrim()
/linux-4.4.14/drivers/block/
Dpktcdvd.c702 struct request_queue *q = bdev_get_queue(pd->bdev); in pkt_generic_packet()
2212 q = bdev_get_queue(pd->bdev); in pkt_open_dev()
/linux-4.4.14/fs/gfs2/
Dops_fstype.c1225 s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; in set_gfs2_super()
Drgrp.c1362 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev); in gfs2_fitrim()
/linux-4.4.14/fs/fat/
Dinode.c1746 struct request_queue *q = bdev_get_queue(sb->s_bdev); in fat_fill_super()
/linux-4.4.14/drivers/block/drbd/
Ddrbd_main.c2395 q = bdev_get_queue(device->ldev->backing_bdev); in drbd_congested()
Ddrbd_receiver.c2321 struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev); in receive_Data()