Searched refs:queue_max_hw_sectors (Results 1 – 16 of 16) sorted by relevance
127 if (queue_max_hw_sectors(sdev->request_queue) > max_sectors) in slave_configure()512 return sprintf(buf, "%u\n", queue_max_hw_sectors(sdev->request_queue)); in max_sectors_show()
212 if (len > (queue_max_hw_sectors(q) << 9)) in blk_rq_map_kern()
201 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, in queue_max_sectors_store()223 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; in queue_max_hw_sectors_show()
300 if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) in sg_io()
723 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q)) in bio_add_pc_page()
317 u32 len = queue_max_hw_sectors(dev->admin_q) << 9; in nvme_nvm_get_l2p_tbl()
738 nvme_block_nr(ns, queue_max_hw_sectors(ns->queue))); in nvme_trans_bdev_limits_page()1661 u32 max_blocks = queue_max_hw_sectors(ns->queue); in nvme_trans_do_nvme_io()
121 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); in iblock_configure_device()
319 min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); in pscsi_add_device_to_list()
1136 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); in drbd_setup_queue_param()1186 now = queue_max_hw_sectors(device->rq_queue) << 9; in drbd_reconsider_max_bio_size()1191 local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9; in drbd_reconsider_max_bio_size()
594 max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9; in make_resync_request()
938 max_bio_size = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9; in drbd_send_sizes()
1175 static inline unsigned int queue_max_hw_sectors(struct request_queue *q) in queue_max_hw_sectors() function
1332 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue)); in rrpc_init()
2889 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); in sd_revalidate_disk()
3769 q->limits.max_sectors = queue_max_hw_sectors(q); in rbd_init_disk()