Lines Matching refs:bio

142 static void req_bio_endio(struct request *rq, struct bio *bio,  in req_bio_endio()  argument
146 bio->bi_error = error; in req_bio_endio()
149 bio_set_flag(bio, BIO_QUIET); in req_bio_endio()
151 bio_advance(bio, nbytes); in req_bio_endio()
154 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) in req_bio_endio()
155 bio_endio(bio); in req_bio_endio()
170 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags()
828 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
1015 static bool blk_rq_should_init_elevator(struct bio *bio) in blk_rq_should_init_elevator() argument
1017 if (!bio) in blk_rq_should_init_elevator()
1024 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) in blk_rq_should_init_elevator()
1037 static struct io_context *rq_ioc(struct bio *bio) in rq_ioc() argument
1040 if (bio && bio->bi_ioc) in rq_ioc()
1041 return bio->bi_ioc; in rq_ioc()
1061 struct bio *bio, gfp_t gfp_mask) in __get_request() argument
1066 struct io_context *ioc = rq_ioc(bio); in __get_request()
1126 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { in __get_request()
1156 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) in __get_request()
1173 trace_block_getrq(q, bio, rw_flags & 1); in __get_request()
1233 struct bio *bio, gfp_t gfp_mask) in get_request() argument
1240 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ in get_request()
1242 rq = __get_request(rl, rw_flags, bio, gfp_mask); in get_request()
1255 trace_block_sleeprq(q, bio, rw_flags & 1); in get_request()
1332 struct request *blk_make_request(struct request_queue *q, struct bio *bio, in blk_make_request() argument
1335 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); in blk_make_request()
1342 for_each_bio(bio) { in blk_make_request()
1343 struct bio *bounce_bio = bio; in blk_make_request()
1368 rq->bio = rq->biotail = NULL; in blk_rq_set_block_pc()
1476 WARN_ON(req->bio != NULL); in __blk_put_request()
1528 struct bio *bio = rq->bio; in blk_add_request_payload() local
1530 bio->bi_io_vec->bv_page = page; in blk_add_request_payload()
1531 bio->bi_io_vec->bv_offset = 0; in blk_add_request_payload()
1532 bio->bi_io_vec->bv_len = len; in blk_add_request_payload()
1534 bio->bi_iter.bi_size = len; in blk_add_request_payload()
1535 bio->bi_vcnt = 1; in blk_add_request_payload()
1536 bio->bi_phys_segments = 1; in blk_add_request_payload()
1544 struct bio *bio) in bio_attempt_back_merge() argument
1546 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; in bio_attempt_back_merge()
1548 if (!ll_back_merge_fn(q, req, bio)) in bio_attempt_back_merge()
1551 trace_block_bio_backmerge(q, req, bio); in bio_attempt_back_merge()
1556 req->biotail->bi_next = bio; in bio_attempt_back_merge()
1557 req->biotail = bio; in bio_attempt_back_merge()
1558 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_back_merge()
1559 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); in bio_attempt_back_merge()
1566 struct bio *bio) in bio_attempt_front_merge() argument
1568 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; in bio_attempt_front_merge()
1570 if (!ll_front_merge_fn(q, req, bio)) in bio_attempt_front_merge()
1573 trace_block_bio_frontmerge(q, req, bio); in bio_attempt_front_merge()
1578 bio->bi_next = req->bio; in bio_attempt_front_merge()
1579 req->bio = bio; in bio_attempt_front_merge()
1581 req->__sector = bio->bi_iter.bi_sector; in bio_attempt_front_merge()
1582 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_front_merge()
1583 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); in bio_attempt_front_merge()
1611 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, in blk_attempt_plug_merge() argument
1644 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) in blk_attempt_plug_merge()
1647 el_ret = blk_try_merge(rq, bio); in blk_attempt_plug_merge()
1649 ret = bio_attempt_back_merge(q, rq, bio); in blk_attempt_plug_merge()
1653 ret = bio_attempt_front_merge(q, rq, bio); in blk_attempt_plug_merge()
1686 void init_request_from_bio(struct request *req, struct bio *bio) in init_request_from_bio() argument
1690 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; in init_request_from_bio()
1691 if (bio->bi_rw & REQ_RAHEAD) in init_request_from_bio()
1695 req->__sector = bio->bi_iter.bi_sector; in init_request_from_bio()
1696 req->ioprio = bio_prio(bio); in init_request_from_bio()
1697 blk_rq_bio_prep(req->q, req, bio); in init_request_from_bio()
1700 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) in blk_queue_bio() argument
1702 const bool sync = !!(bio->bi_rw & REQ_SYNC); in blk_queue_bio()
1713 blk_queue_bounce(q, &bio); in blk_queue_bio()
1715 blk_queue_split(q, &bio, q->bio_split); in blk_queue_bio()
1717 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { in blk_queue_bio()
1718 bio->bi_error = -EIO; in blk_queue_bio()
1719 bio_endio(bio); in blk_queue_bio()
1723 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { in blk_queue_bio()
1734 if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) in blk_queue_bio()
1741 el_ret = elv_merge(q, &req, bio); in blk_queue_bio()
1743 if (bio_attempt_back_merge(q, req, bio)) { in blk_queue_bio()
1744 elv_bio_merged(q, req, bio); in blk_queue_bio()
1750 if (bio_attempt_front_merge(q, req, bio)) { in blk_queue_bio()
1751 elv_bio_merged(q, req, bio); in blk_queue_bio()
1764 rw_flags = bio_data_dir(bio); in blk_queue_bio()
1772 req = get_request(q, rw_flags, bio, GFP_NOIO); in blk_queue_bio()
1774 bio->bi_error = PTR_ERR(req); in blk_queue_bio()
1775 bio_endio(bio); in blk_queue_bio()
1785 init_request_from_bio(req, bio); in blk_queue_bio()
1820 static inline void blk_partition_remap(struct bio *bio) in blk_partition_remap() argument
1822 struct block_device *bdev = bio->bi_bdev; in blk_partition_remap()
1824 if (bio_sectors(bio) && bdev != bdev->bd_contains) { in blk_partition_remap()
1827 bio->bi_iter.bi_sector += p->start_sect; in blk_partition_remap()
1828 bio->bi_bdev = bdev->bd_contains; in blk_partition_remap()
1830 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, in blk_partition_remap()
1832 bio->bi_iter.bi_sector - p->start_sect); in blk_partition_remap()
1836 static void handle_bad_sector(struct bio *bio) in handle_bad_sector() argument
1842 bdevname(bio->bi_bdev, b), in handle_bad_sector()
1843 bio->bi_rw, in handle_bad_sector()
1844 (unsigned long long)bio_end_sector(bio), in handle_bad_sector()
1845 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); in handle_bad_sector()
1886 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) in bio_check_eod() argument
1894 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; in bio_check_eod()
1896 sector_t sector = bio->bi_iter.bi_sector; in bio_check_eod()
1904 handle_bad_sector(bio); in bio_check_eod()
1913 generic_make_request_checks(struct bio *bio) in generic_make_request_checks() argument
1916 int nr_sectors = bio_sectors(bio); in generic_make_request_checks()
1923 if (bio_check_eod(bio, nr_sectors)) in generic_make_request_checks()
1926 q = bdev_get_queue(bio->bi_bdev); in generic_make_request_checks()
1931 bdevname(bio->bi_bdev, b), in generic_make_request_checks()
1932 (long long) bio->bi_iter.bi_sector); in generic_make_request_checks()
1936 part = bio->bi_bdev->bd_part; in generic_make_request_checks()
1937 if (should_fail_request(part, bio->bi_iter.bi_size) || in generic_make_request_checks()
1939 bio->bi_iter.bi_size)) in generic_make_request_checks()
1946 blk_partition_remap(bio); in generic_make_request_checks()
1948 if (bio_check_eod(bio, nr_sectors)) in generic_make_request_checks()
1956 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { in generic_make_request_checks()
1957 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); in generic_make_request_checks()
1964 if ((bio->bi_rw & REQ_DISCARD) && in generic_make_request_checks()
1966 ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { in generic_make_request_checks()
1971 if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) { in generic_make_request_checks()
1984 if (!blkcg_bio_issue_check(q, bio)) in generic_make_request_checks()
1987 trace_block_bio_queue(q, bio); in generic_make_request_checks()
1991 bio->bi_error = err; in generic_make_request_checks()
1992 bio_endio(bio); in generic_make_request_checks()
2020 blk_qc_t generic_make_request(struct bio *bio) in generic_make_request() argument
2025 if (!generic_make_request_checks(bio)) in generic_make_request()
2039 bio_list_add(current->bio_list, bio); in generic_make_request()
2057 BUG_ON(bio->bi_next); in generic_make_request()
2061 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in generic_make_request()
2065 ret = q->make_request_fn(q, bio); in generic_make_request()
2069 bio = bio_list_pop(current->bio_list); in generic_make_request()
2071 struct bio *bio_next = bio_list_pop(current->bio_list); in generic_make_request()
2073 bio_io_error(bio); in generic_make_request()
2074 bio = bio_next; in generic_make_request()
2076 } while (bio); in generic_make_request()
2094 blk_qc_t submit_bio(int rw, struct bio *bio) in submit_bio() argument
2096 bio->bi_rw |= rw; in submit_bio()
2102 if (bio_has_data(bio)) { in submit_bio()
2106 count = bdev_logical_block_size(bio->bi_bdev) >> 9; in submit_bio()
2108 count = bio_sectors(bio); in submit_bio()
2113 task_io_account_read(bio->bi_iter.bi_size); in submit_bio()
2122 (unsigned long long)bio->bi_iter.bi_sector, in submit_bio()
2123 bdevname(bio->bi_bdev, b), in submit_bio()
2128 return generic_make_request(bio); in submit_bio()
2240 struct bio *bio; in blk_rq_err_bytes() local
2252 for (bio = rq->bio; bio; bio = bio->bi_next) { in blk_rq_err_bytes()
2253 if ((bio->bi_rw & ff) != ff) in blk_rq_err_bytes()
2255 bytes += bio->bi_iter.bi_size; in blk_rq_err_bytes()
2572 if (!req->bio) in blk_update_request()
2624 while (req->bio) { in blk_update_request()
2625 struct bio *bio = req->bio; in blk_update_request() local
2626 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); in blk_update_request()
2628 if (bio_bytes == bio->bi_iter.bi_size) in blk_update_request()
2629 req->bio = bio->bi_next; in blk_update_request()
2631 req_bio_endio(req, bio, bio_bytes, error); in blk_update_request()
2643 if (!req->bio) { in blk_update_request()
2662 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; in blk_update_request()
2967 struct bio *bio) in blk_rq_bio_prep() argument
2970 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; in blk_rq_bio_prep()
2972 if (bio_has_data(bio)) in blk_rq_bio_prep()
2973 rq->nr_phys_segments = bio_phys_segments(q, bio); in blk_rq_bio_prep()
2975 rq->__data_len = bio->bi_iter.bi_size; in blk_rq_bio_prep()
2976 rq->bio = rq->biotail = bio; in blk_rq_bio_prep()
2978 if (bio->bi_bdev) in blk_rq_bio_prep()
2979 rq->rq_disk = bio->bi_bdev->bd_disk; in blk_rq_bio_prep()
3038 struct bio *bio; in blk_rq_unprep_clone() local
3040 while ((bio = rq->bio) != NULL) { in blk_rq_unprep_clone()
3041 rq->bio = bio->bi_next; in blk_rq_unprep_clone()
3043 bio_put(bio); in blk_rq_unprep_clone()
3085 int (*bio_ctr)(struct bio *, struct bio *, void *), in blk_rq_prep_clone() argument
3088 struct bio *bio, *bio_src; in blk_rq_prep_clone() local
3094 bio = bio_clone_fast(bio_src, gfp_mask, bs); in blk_rq_prep_clone()
3095 if (!bio) in blk_rq_prep_clone()
3098 if (bio_ctr && bio_ctr(bio, bio_src, data)) in blk_rq_prep_clone()
3101 if (rq->bio) { in blk_rq_prep_clone()
3102 rq->biotail->bi_next = bio; in blk_rq_prep_clone()
3103 rq->biotail = bio; in blk_rq_prep_clone()
3105 rq->bio = rq->biotail = bio; in blk_rq_prep_clone()
3113 if (bio) in blk_rq_prep_clone()
3114 bio_put(bio); in blk_rq_prep_clone()