Searched refs:BLKIF_MAX_SEGMENTS_PER_REQUEST (Results 1 – 4 of 4) sorted by relevance
134 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 macro153 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
271 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in free_persistent_gnts()272 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in free_persistent_gnts()293 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || in free_persistent_gnts()312 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap_purged_grants()313 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap_purged_grants()336 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { in xen_blkbk_unmap_purged_grants()757 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap()758 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap()763 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST); in xen_blkbk_unmap()781 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_map()[all …]
76 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];137 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];399 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; in blkif_get_x86_32_req()447 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; in blkif_get_x86_64_req()
143 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)412 if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST) in blkif_queue_request()451 req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); in blkif_queue_request()456 if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { in blkif_queue_request()861 BLKIF_MAX_SEGMENTS_PER_REQUEST)) { in xlvbd_alloc_gendisk()1492 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; in blkif_recover()1695 segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; in blkfront_setup_indirect()