/linux-4.4.14/block/ |
D | blk-lib.c | 44 struct request_queue *q = bdev_get_queue(bdev); in blkdev_issue_discard() 149 struct request_queue *q = bdev_get_queue(bdev); in blkdev_issue_write_same() 288 struct request_queue *q = bdev_get_queue(bdev); in blkdev_issue_zeroout()
|
D | bio-integrity.c | 149 bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev), in bio_integrity_add_page() 281 q = bdev_get_queue(bio->bi_bdev); in bio_integrity_prep()
|
D | ioctl.c | 542 queue_max_sectors(bdev_get_queue(bdev))); in blkdev_ioctl() 545 return put_ushort(arg, !blk_queue_nonrot(bdev_get_queue(bdev))); in blkdev_ioctl()
|
D | blk-flush.c | 472 q = bdev_get_queue(bdev); in blkdev_issue_flush()
|
D | compat_ioctl.c | 722 queue_max_sectors(bdev_get_queue(bdev))); in compat_blkdev_ioctl() 726 !blk_queue_nonrot(bdev_get_queue(bdev))); in compat_blkdev_ioctl()
|
D | blk-core.c | 116 struct request_queue *q = bdev_get_queue(bdev); in blk_get_backing_dev_info() 1830 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, in blk_partition_remap() 1926 q = bdev_get_queue(bio->bi_bdev); in generic_make_request_checks() 2061 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in generic_make_request()
|
D | blk-settings.c | 651 struct request_queue *bq = bdev_get_queue(bdev); in bdev_stack_limits()
|
/linux-4.4.14/drivers/md/ |
D | dm-table.c | 294 q = bdev_get_queue(bdev); in device_area_is_invalid() 436 struct request_queue *q = bdev_get_queue(bdev); in dm_set_device_limits() 893 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); in dm_table_set_type() 908 if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) { in dm_table_set_type() 1328 struct request_queue *q = bdev_get_queue(dev->bdev); in device_flush_capable() 1380 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_nonrot() 1388 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_not_random() 1396 struct request_queue *q = bdev_get_queue(dev->bdev); in queue_supports_sg_merge() 1421 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_same_capable() 1448 struct request_queue *q = bdev_get_queue(dev->bdev); in device_discard_capable() [all …]
|
D | linear.c | 63 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); in linear_congested() 122 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in linear_conf() 256 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { in linear_make_request()
|
D | raid0.c | 36 struct request_queue *q = bdev_get_queue(devlist[i]->bdev); in raid0_congested() 390 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid0_run() 492 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { in raid0_make_request()
|
D | dm-mpath.c | 422 clone->q = bdev_get_queue(bdev); in __multipath_map() 427 *__clone = blk_get_request(bdev_get_queue(bdev), in __multipath_map() 577 q = bdev_get_queue(p->path.dev->bdev); in parse_path() 1225 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), in activate_path() 1609 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in __pgpath_busy()
|
D | raid1.c | 616 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); in read_balance() 725 struct request_queue *q = bdev_get_queue(rdev->bdev); in raid1_congested() 763 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) in flush_pending_writes() 1037 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) in raid1_unplug() 1638 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid1_add_disk() 2824 q = bdev_get_queue(rdev->bdev); in setup_conf() 2935 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in run()
|
D | dm-io.c | 290 struct request_queue *q = bdev_get_queue(where->bdev); in do_region()
|
D | multipath.c | 167 struct request_queue *q = bdev_get_queue(rdev->bdev); in multipath_congested()
|
D | dm-log-writes.c | 766 struct request_queue *q = bdev_get_queue(lc->dev->bdev); in log_writes_io_hints()
|
D | raid10.c | 839 struct request_queue *q = bdev_get_queue(rdev->bdev); in raid10_congested() 869 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) in flush_pending_writes() 1045 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) in raid10_unplug() 1745 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid10_add_disk() 3587 q = bdev_get_queue(rdev->bdev); in run() 3602 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in run()
|
D | dm-thin.c | 340 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_discard_async() 2636 q = bdev_get_queue(pt->data_dev->bdev); in pool_is_congested() 2660 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); in data_dev_supports_discard() 2678 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits; in disable_passdown_if_not_supported()
|
D | raid5.c | 231 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), in return_io() 1038 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), in ops_run_io() 1084 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), in ops_run_io() 4769 trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev), in raid5_align_endio() 4854 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), in raid5_read_one_chunk() 5324 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), in make_request() 5732 trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev), in retry_aligned_read() 7006 if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) || in run() 7007 !bdev_get_queue(rdev->bdev)-> in run()
|
D | raid5-cache.c | 661 if (!blk_queue_discard(bdev_get_queue(bdev))) in r5l_write_super_and_discard_space()
|
D | dm-raid.c | 1178 q = bdev_get_queue(rs->dev[i].rdev.bdev); in configure_discard_support()
|
D | dm-era-target.c | 1381 struct request_queue *q = bdev_get_queue(dev->bdev); in dev_is_congested()
|
D | dm.c | 1018 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) in clone_endio() 1491 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, in __map_bio()
|
D | dm-cache-target.c | 2293 struct request_queue *q = bdev_get_queue(dev->bdev); in is_congested()
|
/linux-4.4.14/include/linux/ |
D | blkdev.h | 823 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) in bdev_get_queue() function 1202 return queue_logical_block_size(bdev_get_queue(bdev)); in bdev_logical_block_size() 1212 return queue_physical_block_size(bdev_get_queue(bdev)); in bdev_physical_block_size() 1222 return queue_io_min(bdev_get_queue(bdev)); in bdev_io_min() 1232 return queue_io_opt(bdev_get_queue(bdev)); in bdev_io_opt() 1253 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset() 1297 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment() 1315 return queue_discard_zeroes_data(bdev_get_queue(bdev)); in bdev_discard_zeroes_data() 1320 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_same()
|
/linux-4.4.14/fs/f2fs/ |
D | gc.h | 107 struct request_queue *q = bdev_get_queue(bdev); in is_idle()
|
D | segment.h | 686 struct request_queue *q = bdev_get_queue(bdev); in max_hw_blocks()
|
D | super.c | 324 q = bdev_get_queue(sb->s_bdev); in parse_options()
|
D | file.c | 1497 struct request_queue *q = bdev_get_queue(sb->s_bdev); in f2fs_ioc_fitrim()
|
/linux-4.4.14/drivers/target/ |
D | target_core_iblock.c | 118 q = bdev_get_queue(bd); in iblock_configure_device() 651 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); in iblock_execute_rw() 742 struct request_queue *q = bdev_get_queue(bd); in iblock_get_blocks() 803 struct request_queue *q = bdev_get_queue(bd); in iblock_get_write_cache()
|
D | target_core_file.c | 148 struct request_queue *q = bdev_get_queue(inode->i_bdev); in fd_configure_device()
|
/linux-4.4.14/fs/jfs/ |
D | ioctl.c | 134 struct request_queue *q = bdev_get_queue(sb->s_bdev); in jfs_ioctl()
|
D | super.c | 376 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options() 395 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options()
|
/linux-4.4.14/drivers/md/bcache/ |
D | request.c | 917 blk_queue_discard(bdev_get_queue(dc->bdev))) in cached_dev_write() 996 !blk_queue_discard(bdev_get_queue(dc->bdev))) in cached_dev_make_request() 1016 struct request_queue *q = bdev_get_queue(dc->bdev); in cached_dev_congested() 1027 q = bdev_get_queue(ca->bdev); in cached_dev_congested() 1134 q = bdev_get_queue(ca->bdev); in flash_dev_congested()
|
D | sysfs.c | 848 if (blk_queue_discard(bdev_get_queue(ca->bdev))) in STORE()
|
D | super.c | 1096 struct request_queue *q = bdev_get_queue(dc->bdev); in cached_dev_init() 1858 if (blk_queue_discard(bdev_get_queue(ca->bdev))) in register_cache()
|
/linux-4.4.14/fs/xfs/ |
D | xfs_discard.c | 159 struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev); in xfs_ioc_trim()
|
/linux-4.4.14/drivers/block/xen-blkback/ |
D | xenbus.c | 415 q = bdev_get_queue(bdev); in xen_vbd_create() 474 struct request_queue *q = bdev_get_queue(bdev); in xen_blkbk_discard()
|
/linux-4.4.14/fs/ext4/ |
D | ioctl.c | 592 struct request_queue *q = bdev_get_queue(sb->s_bdev); in ext4_ioctl()
|
D | super.c | 3940 struct request_queue *q = bdev_get_queue(sb->s_bdev); in ext4_fill_super()
|
/linux-4.4.14/fs/logfs/ |
D | super.c | 122 sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; in logfs_sb_set()
|
/linux-4.4.14/kernel/trace/ |
D | blktrace.c | 645 q = bdev_get_queue(bdev); in blk_trace_ioctl() 1623 return bdev_get_queue(bdev); in blk_trace_get_queue()
|
/linux-4.4.14/fs/ocfs2/ |
D | ioctl.c | 929 struct request_queue *q = bdev_get_queue(sb->s_bdev); in ocfs2_ioctl()
|
/linux-4.4.14/fs/nilfs2/ |
D | ioctl.c | 1086 struct request_queue *q = bdev_get_queue(nilfs->ns_bdev); in nilfs_ioctl_trim_fs()
|
D | super.c | 1082 sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; in nilfs_fill_super()
|
/linux-4.4.14/fs/ |
D | direct-io.c | 448 if (!blk_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie)) in dio_await_one()
|
D | super.c | 956 s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; in set_bdev_super()
|
/linux-4.4.14/mm/ |
D | swapfile.c | 2378 struct request_queue *q = bdev_get_queue(si->bdev); in swap_discardable() 2464 if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) { in SYSCALL_DEFINE2()
|
/linux-4.4.14/fs/btrfs/ |
D | volumes.c | 935 q = bdev_get_queue(bdev); in __btrfs_open_devices() 943 if (!blk_queue_nonrot(bdev_get_queue(bdev))) in __btrfs_open_devices() 2306 q = bdev_get_queue(bdev); in btrfs_init_new_device() 2348 if (!blk_queue_nonrot(bdev_get_queue(bdev))) in btrfs_init_new_device() 2511 q = bdev_get_queue(bdev); in btrfs_init_dev_replace_tgtdev()
|
D | ioctl.c | 392 q = bdev_get_queue(device->bdev); in btrfs_ioctl_fitrim()
|
/linux-4.4.14/drivers/block/ |
D | pktcdvd.c | 702 struct request_queue *q = bdev_get_queue(pd->bdev); in pkt_generic_packet() 2212 q = bdev_get_queue(pd->bdev); in pkt_open_dev()
|
/linux-4.4.14/fs/gfs2/ |
D | ops_fstype.c | 1225 s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; in set_gfs2_super()
|
D | rgrp.c | 1362 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev); in gfs2_fitrim()
|
/linux-4.4.14/fs/fat/ |
D | inode.c | 1746 struct request_queue *q = bdev_get_queue(sb->s_bdev); in fat_fill_super()
|
/linux-4.4.14/drivers/block/drbd/ |
D | drbd_main.c | 2395 q = bdev_get_queue(device->ldev->backing_bdev); in drbd_congested()
|
D | drbd_receiver.c | 2321 struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev); in receive_Data()
|