Lines Matching refs:bio
12 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split()
13 struct bio *bio, in blk_bio_discard_split() argument
35 if (bio_sectors(bio) <= max_discard_sectors) in blk_bio_discard_split()
46 tmp = bio->bi_iter.bi_sector + split_sectors - alignment; in blk_bio_discard_split()
52 return bio_split(bio, split_sectors, GFP_NOIO, bs); in blk_bio_discard_split()
55 static struct bio *blk_bio_write_same_split(struct request_queue *q, in blk_bio_write_same_split()
56 struct bio *bio, in blk_bio_write_same_split() argument
65 if (bio_sectors(bio) <= q->limits.max_write_same_sectors) in blk_bio_write_same_split()
68 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); in blk_bio_write_same_split()
72 struct bio *bio) in get_max_io_size() argument
74 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); in get_max_io_size()
83 static struct bio *blk_bio_segment_split(struct request_queue *q, in blk_bio_segment_split()
84 struct bio *bio, in blk_bio_segment_split() argument
91 unsigned front_seg_size = bio->bi_seg_front_size; in blk_bio_segment_split()
93 struct bio *new = NULL; in blk_bio_segment_split()
94 const unsigned max_sectors = get_max_io_size(q, bio); in blk_bio_segment_split()
96 bio_for_each_segment(bv, bio, iter) { in blk_bio_segment_split()
155 new = bio_split(bio, sectors, GFP_NOIO, bs); in blk_bio_segment_split()
157 bio = new; in blk_bio_segment_split()
160 bio->bi_seg_front_size = front_seg_size; in blk_bio_segment_split()
161 if (seg_size > bio->bi_seg_back_size) in blk_bio_segment_split()
162 bio->bi_seg_back_size = seg_size; in blk_bio_segment_split()
167 void blk_queue_split(struct request_queue *q, struct bio **bio, in blk_queue_split() argument
170 struct bio *split, *res; in blk_queue_split()
173 if ((*bio)->bi_rw & REQ_DISCARD) in blk_queue_split()
174 split = blk_bio_discard_split(q, *bio, bs, &nsegs); in blk_queue_split()
175 else if ((*bio)->bi_rw & REQ_WRITE_SAME) in blk_queue_split()
176 split = blk_bio_write_same_split(q, *bio, bs, &nsegs); in blk_queue_split()
178 split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); in blk_queue_split()
181 res = split ? split : *bio; in blk_queue_split()
189 bio_chain(split, *bio); in blk_queue_split()
190 generic_make_request(*bio); in blk_queue_split()
191 *bio = split; in blk_queue_split()
197 struct bio *bio, in __blk_recalc_rq_segments() argument
203 struct bio *fbio, *bbio; in __blk_recalc_rq_segments()
206 if (!bio) in __blk_recalc_rq_segments()
213 if (bio->bi_rw & REQ_DISCARD) in __blk_recalc_rq_segments()
216 if (bio->bi_rw & REQ_WRITE_SAME) in __blk_recalc_rq_segments()
219 fbio = bio; in __blk_recalc_rq_segments()
223 for_each_bio(bio) { in __blk_recalc_rq_segments()
224 bio_for_each_segment(bv, bio, iter) { in __blk_recalc_rq_segments()
255 bbio = bio; in __blk_recalc_rq_segments()
271 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, in blk_recalc_rq_segments()
275 void blk_recount_segments(struct request_queue *q, struct bio *bio) in blk_recount_segments() argument
280 if (bio_flagged(bio, BIO_CLONED)) in blk_recount_segments()
281 seg_cnt = bio_segments(bio); in blk_recount_segments()
283 seg_cnt = bio->bi_vcnt; in blk_recount_segments()
287 bio->bi_phys_segments = seg_cnt; in blk_recount_segments()
289 struct bio *nxt = bio->bi_next; in blk_recount_segments()
291 bio->bi_next = NULL; in blk_recount_segments()
292 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); in blk_recount_segments()
293 bio->bi_next = nxt; in blk_recount_segments()
296 bio_set_flag(bio, BIO_SEG_VALID); in blk_recount_segments()
300 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, in blk_phys_contig_segment() argument
301 struct bio *nxt) in blk_phys_contig_segment()
309 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > in blk_phys_contig_segment()
313 if (!bio_has_data(bio)) in blk_phys_contig_segment()
316 bio_for_each_segment(end_bv, bio, iter) in blk_phys_contig_segment()
378 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, in __blk_bios_map_sg() argument
389 if (bio->bi_rw & REQ_DISCARD) { in __blk_bios_map_sg()
398 if (bio->bi_vcnt) in __blk_bios_map_sg()
404 if (bio->bi_rw & REQ_WRITE_SAME) { in __blk_bios_map_sg()
407 bvec = bio_iovec(bio); in __blk_bios_map_sg()
412 for_each_bio(bio) in __blk_bios_map_sg()
413 bio_for_each_segment(bvec, bio, iter) in __blk_bios_map_sg()
430 if (rq->bio) in blk_rq_map_sg()
431 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); in blk_rq_map_sg()
471 struct bio *bio) in ll_new_hw_segment() argument
473 int nr_phys_segs = bio_phys_segments(q, bio); in ll_new_hw_segment()
478 if (blk_integrity_merge_bio(q, req, bio) == false) in ll_new_hw_segment()
496 struct bio *bio) in ll_back_merge_fn() argument
498 if (req_gap_back_merge(req, bio)) in ll_back_merge_fn()
501 integrity_req_gap_back_merge(req, bio)) in ll_back_merge_fn()
503 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_back_merge_fn()
512 if (!bio_flagged(bio, BIO_SEG_VALID)) in ll_back_merge_fn()
513 blk_recount_segments(q, bio); in ll_back_merge_fn()
515 return ll_new_hw_segment(q, req, bio); in ll_back_merge_fn()
519 struct bio *bio) in ll_front_merge_fn() argument
522 if (req_gap_front_merge(req, bio)) in ll_front_merge_fn()
525 integrity_req_gap_front_merge(req, bio)) in ll_front_merge_fn()
527 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_front_merge_fn()
534 if (!bio_flagged(bio, BIO_SEG_VALID)) in ll_front_merge_fn()
535 blk_recount_segments(q, bio); in ll_front_merge_fn()
536 if (!bio_flagged(req->bio, BIO_SEG_VALID)) in ll_front_merge_fn()
537 blk_recount_segments(q, req->bio); in ll_front_merge_fn()
539 return ll_new_hw_segment(q, req, bio); in ll_front_merge_fn()
558 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; in ll_merge_requests_fn()
567 if (req_gap_back_merge(req, next->bio)) in ll_merge_requests_fn()
578 if (blk_phys_contig_segment(q, req->biotail, next->bio)) { in ll_merge_requests_fn()
580 req->bio->bi_seg_front_size = seg_size; in ll_merge_requests_fn()
609 struct bio *bio; in blk_rq_set_mixed_merge() local
619 for (bio = rq->bio; bio; bio = bio->bi_next) { in blk_rq_set_mixed_merge()
620 WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && in blk_rq_set_mixed_merge()
621 (bio->bi_rw & REQ_FAILFAST_MASK) != ff); in blk_rq_set_mixed_merge()
622 bio->bi_rw |= ff; in blk_rq_set_mixed_merge()
668 !blk_write_same_mergeable(req->bio, next->bio)) in attempt_merge()
702 req->biotail->bi_next = next->bio; in attempt_merge()
719 next->bio = NULL; in attempt_merge()
750 bool blk_rq_merge_ok(struct request *rq, struct bio *bio) in blk_rq_merge_ok() argument
752 if (!rq_mergeable(rq) || !bio_mergeable(bio)) in blk_rq_merge_ok()
755 if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) in blk_rq_merge_ok()
759 if (bio_data_dir(bio) != rq_data_dir(rq)) in blk_rq_merge_ok()
763 if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) in blk_rq_merge_ok()
767 if (blk_integrity_merge_bio(rq->q, rq, bio) == false) in blk_rq_merge_ok()
772 !blk_write_same_mergeable(rq->bio, bio)) in blk_rq_merge_ok()
778 int blk_try_merge(struct request *rq, struct bio *bio) in blk_try_merge() argument
780 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) in blk_try_merge()
782 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge()