Home
last modified time | relevance | path

Searched refs:nr_phys_segments (Results 1 – 15 of 15) sorted by relevance

/linux-4.1.27/block/
Dblk-merge.c94 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, in blk_recalc_rq_segments()
292 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) in ll_new_hw_segment()
302 req->nr_phys_segments += nr_phys_segs; in ll_new_hw_segment()
392 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; in ll_merge_requests_fn()
394 if (req->nr_phys_segments == 1) in ll_merge_requests_fn()
396 if (next->nr_phys_segments == 1) in ll_merge_requests_fn()
408 req->nr_phys_segments = total_phys_segments; in ll_merge_requests_fn()
Dbsg-lib.c92 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); in bsg_map_buffer()
94 BUG_ON(!req->nr_phys_segments); in bsg_map_buffer()
99 sg_init_table(buf->sg_list, req->nr_phys_segments); in bsg_map_buffer()
Dblk-core.c1458 rq->nr_phys_segments = 1; in blk_add_request_payload()
2041 if (rq->nr_phys_segments > queue_max_segments(q)) { in blk_rq_check_limits()
2300 rq->nr_phys_segments++; in blk_peek_request()
2322 --rq->nr_phys_segments; in blk_peek_request()
2851 rq->nr_phys_segments = bio_phys_segments(q, bio); in blk_rq_bio_prep()
2937 dst->nr_phys_segments = src->nr_phys_segments; in __blk_rq_prep_clone()
Dblk-mq.c205 rq->nr_phys_segments = 0; in blk_mq_rq_ctx_init()
452 rq->nr_phys_segments++; in blk_mq_start_request()
465 rq->nr_phys_segments--; in __blk_mq_requeue_request()
/linux-4.1.27/Documentation/block/
Drequest.txt61 unsigned short nr_phys_segments DB Number of physical scatter gather
Dbiodoc.txt529 unsigned short nr_phys_segments;
/linux-4.1.27/drivers/block/
Dxen-blkfront.c411 max_grefs = req->nr_phys_segments; in blkif_queue_request()
417 max_grefs += INDIRECT_GREFS(req->nr_phys_segments); in blkif_queue_request()
451 req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); in blkif_queue_request()
453 req->nr_phys_segments > info->max_indirect_segments); in blkif_queue_request()
1557 BUG_ON(req->nr_phys_segments > segs); in blkif_recover()
Dnvme-core.c434 if (rq->nr_phys_segments <= NVME_INT_PAGES && in nvme_alloc_iod()
439 iod_init(iod, size, rq->nr_phys_segments, in nvme_alloc_iod()
444 return __nvme_alloc_iod(rq->nr_phys_segments, size, dev, in nvme_alloc_iod()
854 } else if (req->nr_phys_segments) { in nvme_queue_rq()
857 sg_init_table(iod->sg, req->nr_phys_segments); in nvme_queue_rq()
Dvirtio_blk.c172 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); in virtio_queue_rq()
Dcpqarray.c917 BUG_ON(creq->nr_phys_segments > SG_MAX); in do_ida_request()
Dcciss.c3305 BUG_ON(creq->nr_phys_segments > h->maxsgentries); in do_cciss_request()
/linux-4.1.27/drivers/scsi/
Dscsi_lib.c1089 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, in scsi_init_sgtable()
1122 BUG_ON(!rq->nr_phys_segments); in scsi_init_io()
Dscsi_transport_fc.c3675 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); in fc_bsg_map_buffer()
3677 BUG_ON(!req->nr_phys_segments); in fc_bsg_map_buffer()
3682 sg_init_table(buf->sg_list, req->nr_phys_segments); in fc_bsg_map_buffer()
/linux-4.1.27/drivers/mmc/card/
Dblock.c1578 phys_segments += cur->nr_phys_segments; in mmc_blk_prep_packed_list()
1618 phys_segments += next->nr_phys_segments; in mmc_blk_prep_packed_list()
/linux-4.1.27/include/linux/
Dblkdev.h175 unsigned short nr_phys_segments; member