Lines Matching refs:bio
13 struct bio *bio, in __blk_recalc_rq_segments() argument
19 struct bio *fbio, *bbio; in __blk_recalc_rq_segments()
22 if (!bio) in __blk_recalc_rq_segments()
29 if (bio->bi_rw & REQ_DISCARD) in __blk_recalc_rq_segments()
32 if (bio->bi_rw & REQ_WRITE_SAME) in __blk_recalc_rq_segments()
35 fbio = bio; in __blk_recalc_rq_segments()
40 for_each_bio(bio) { in __blk_recalc_rq_segments()
41 bio_for_each_segment(bv, bio, iter) { in __blk_recalc_rq_segments()
78 bbio = bio; in __blk_recalc_rq_segments()
94 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, in blk_recalc_rq_segments()
98 void blk_recount_segments(struct request_queue *q, struct bio *bio) in blk_recount_segments() argument
103 if (bio_flagged(bio, BIO_CLONED)) in blk_recount_segments()
104 seg_cnt = bio_segments(bio); in blk_recount_segments()
106 seg_cnt = bio->bi_vcnt; in blk_recount_segments()
110 bio->bi_phys_segments = seg_cnt; in blk_recount_segments()
112 struct bio *nxt = bio->bi_next; in blk_recount_segments()
114 bio->bi_next = NULL; in blk_recount_segments()
115 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); in blk_recount_segments()
116 bio->bi_next = nxt; in blk_recount_segments()
119 bio->bi_flags |= (1 << BIO_SEG_VALID); in blk_recount_segments()
123 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, in blk_phys_contig_segment() argument
124 struct bio *nxt) in blk_phys_contig_segment()
132 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > in blk_phys_contig_segment()
136 if (!bio_has_data(bio)) in blk_phys_contig_segment()
139 bio_for_each_segment(end_bv, bio, iter) in blk_phys_contig_segment()
201 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, in __blk_bios_map_sg() argument
212 if (bio->bi_rw & REQ_DISCARD) { in __blk_bios_map_sg()
221 if (bio->bi_vcnt) in __blk_bios_map_sg()
227 if (bio->bi_rw & REQ_WRITE_SAME) { in __blk_bios_map_sg()
230 bvec = bio_iovec(bio); in __blk_bios_map_sg()
235 for_each_bio(bio) in __blk_bios_map_sg()
236 bio_for_each_segment(bvec, bio, iter) in __blk_bios_map_sg()
253 if (rq->bio) in blk_rq_map_sg()
254 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); in blk_rq_map_sg()
288 struct bio *bio) in ll_new_hw_segment() argument
290 int nr_phys_segs = bio_phys_segments(q, bio); in ll_new_hw_segment()
295 if (blk_integrity_merge_bio(q, req, bio) == false) in ll_new_hw_segment()
313 struct bio *bio) in ll_back_merge_fn() argument
315 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_back_merge_fn()
324 if (!bio_flagged(bio, BIO_SEG_VALID)) in ll_back_merge_fn()
325 blk_recount_segments(q, bio); in ll_back_merge_fn()
327 return ll_new_hw_segment(q, req, bio); in ll_back_merge_fn()
331 struct bio *bio) in ll_front_merge_fn() argument
333 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_front_merge_fn()
340 if (!bio_flagged(bio, BIO_SEG_VALID)) in ll_front_merge_fn()
341 blk_recount_segments(q, bio); in ll_front_merge_fn()
342 if (!bio_flagged(req->bio, BIO_SEG_VALID)) in ll_front_merge_fn()
343 blk_recount_segments(q, req->bio); in ll_front_merge_fn()
345 return ll_new_hw_segment(q, req, bio); in ll_front_merge_fn()
361 struct bio *prev = req->biotail; in req_gap_to_prev()
364 next->bio->bi_io_vec[0].bv_offset); in req_gap_to_prev()
372 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; in ll_merge_requests_fn()
393 if (blk_phys_contig_segment(q, req->biotail, next->bio)) { in ll_merge_requests_fn()
395 req->bio->bi_seg_front_size = seg_size; in ll_merge_requests_fn()
424 struct bio *bio; in blk_rq_set_mixed_merge() local
434 for (bio = rq->bio; bio; bio = bio->bi_next) { in blk_rq_set_mixed_merge()
435 WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && in blk_rq_set_mixed_merge()
436 (bio->bi_rw & REQ_FAILFAST_MASK) != ff); in blk_rq_set_mixed_merge()
437 bio->bi_rw |= ff; in blk_rq_set_mixed_merge()
483 !blk_write_same_mergeable(req->bio, next->bio)) in attempt_merge()
517 req->biotail->bi_next = next->bio; in attempt_merge()
534 next->bio = NULL; in attempt_merge()
565 bool blk_rq_merge_ok(struct request *rq, struct bio *bio) in blk_rq_merge_ok() argument
569 if (!rq_mergeable(rq) || !bio_mergeable(bio)) in blk_rq_merge_ok()
572 if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) in blk_rq_merge_ok()
576 if (bio_data_dir(bio) != rq_data_dir(rq)) in blk_rq_merge_ok()
580 if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) in blk_rq_merge_ok()
584 if (blk_integrity_merge_bio(rq->q, rq, bio) == false) in blk_rq_merge_ok()
589 !blk_write_same_mergeable(rq->bio, bio)) in blk_rq_merge_ok()
596 if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) in blk_rq_merge_ok()
603 int blk_try_merge(struct request *rq, struct bio *bio) in blk_try_merge() argument
605 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) in blk_try_merge()
607 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge()