Lines Matching refs:bio
117 static void req_bio_endio(struct request *rq, struct bio *bio, in req_bio_endio() argument
121 clear_bit(BIO_UPTODATE, &bio->bi_flags); in req_bio_endio()
122 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) in req_bio_endio()
126 set_bit(BIO_QUIET, &bio->bi_flags); in req_bio_endio()
128 bio_advance(bio, nbytes); in req_bio_endio()
131 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) in req_bio_endio()
132 bio_endio(bio, error); in req_bio_endio()
147 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags()
737 static void blk_queue_bio(struct request_queue *q, struct bio *bio);
929 static bool blk_rq_should_init_elevator(struct bio *bio) in blk_rq_should_init_elevator() argument
931 if (!bio) in blk_rq_should_init_elevator()
938 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) in blk_rq_should_init_elevator()
951 static struct io_context *rq_ioc(struct bio *bio) in rq_ioc() argument
954 if (bio && bio->bi_ioc) in rq_ioc()
955 return bio->bi_ioc; in rq_ioc()
975 struct bio *bio, gfp_t gfp_mask) in __get_request() argument
980 struct io_context *ioc = rq_ioc(bio); in __get_request()
1045 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { in __get_request()
1075 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) in __get_request()
1092 trace_block_getrq(q, bio, rw_flags & 1); in __get_request()
1152 struct bio *bio, gfp_t gfp_mask) in get_request() argument
1159 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ in get_request()
1161 rq = __get_request(rl, rw_flags, bio, gfp_mask); in get_request()
1174 trace_block_sleeprq(q, bio, rw_flags & 1); in get_request()
1251 struct request *blk_make_request(struct request_queue *q, struct bio *bio, in blk_make_request() argument
1254 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); in blk_make_request()
1261 for_each_bio(bio) { in blk_make_request()
1262 struct bio *bounce_bio = bio; in blk_make_request()
1287 rq->bio = rq->biotail = NULL; in blk_rq_set_block_pc()
1395 WARN_ON(req->bio != NULL); in __blk_put_request()
1447 struct bio *bio = rq->bio; in blk_add_request_payload() local
1449 bio->bi_io_vec->bv_page = page; in blk_add_request_payload()
1450 bio->bi_io_vec->bv_offset = 0; in blk_add_request_payload()
1451 bio->bi_io_vec->bv_len = len; in blk_add_request_payload()
1453 bio->bi_iter.bi_size = len; in blk_add_request_payload()
1454 bio->bi_vcnt = 1; in blk_add_request_payload()
1455 bio->bi_phys_segments = 1; in blk_add_request_payload()
1463 struct bio *bio) in bio_attempt_back_merge() argument
1465 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; in bio_attempt_back_merge()
1467 if (!ll_back_merge_fn(q, req, bio)) in bio_attempt_back_merge()
1470 trace_block_bio_backmerge(q, req, bio); in bio_attempt_back_merge()
1475 req->biotail->bi_next = bio; in bio_attempt_back_merge()
1476 req->biotail = bio; in bio_attempt_back_merge()
1477 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_back_merge()
1478 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); in bio_attempt_back_merge()
1485 struct bio *bio) in bio_attempt_front_merge() argument
1487 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; in bio_attempt_front_merge()
1489 if (!ll_front_merge_fn(q, req, bio)) in bio_attempt_front_merge()
1492 trace_block_bio_frontmerge(q, req, bio); in bio_attempt_front_merge()
1497 bio->bi_next = req->bio; in bio_attempt_front_merge()
1498 req->bio = bio; in bio_attempt_front_merge()
1500 req->__sector = bio->bi_iter.bi_sector; in bio_attempt_front_merge()
1501 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_front_merge()
1502 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); in bio_attempt_front_merge()
1527 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, in blk_attempt_plug_merge() argument
1551 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) in blk_attempt_plug_merge()
1554 el_ret = blk_try_merge(rq, bio); in blk_attempt_plug_merge()
1556 ret = bio_attempt_back_merge(q, rq, bio); in blk_attempt_plug_merge()
1560 ret = bio_attempt_front_merge(q, rq, bio); in blk_attempt_plug_merge()
1569 void init_request_from_bio(struct request *req, struct bio *bio) in init_request_from_bio() argument
1573 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; in init_request_from_bio()
1574 if (bio->bi_rw & REQ_RAHEAD) in init_request_from_bio()
1578 req->__sector = bio->bi_iter.bi_sector; in init_request_from_bio()
1579 req->ioprio = bio_prio(bio); in init_request_from_bio()
1580 blk_rq_bio_prep(req->q, req, bio); in init_request_from_bio()
1583 static void blk_queue_bio(struct request_queue *q, struct bio *bio) in blk_queue_bio() argument
1585 const bool sync = !!(bio->bi_rw & REQ_SYNC); in blk_queue_bio()
1596 blk_queue_bounce(q, &bio); in blk_queue_bio()
1598 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { in blk_queue_bio()
1599 bio_endio(bio, -EIO); in blk_queue_bio()
1603 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { in blk_queue_bio()
1614 blk_attempt_plug_merge(q, bio, &request_count)) in blk_queue_bio()
1619 el_ret = elv_merge(q, &req, bio); in blk_queue_bio()
1621 if (bio_attempt_back_merge(q, req, bio)) { in blk_queue_bio()
1622 elv_bio_merged(q, req, bio); in blk_queue_bio()
1628 if (bio_attempt_front_merge(q, req, bio)) { in blk_queue_bio()
1629 elv_bio_merged(q, req, bio); in blk_queue_bio()
1642 rw_flags = bio_data_dir(bio); in blk_queue_bio()
1650 req = get_request(q, rw_flags, bio, GFP_NOIO); in blk_queue_bio()
1652 bio_endio(bio, PTR_ERR(req)); /* @q is dead */ in blk_queue_bio()
1662 init_request_from_bio(req, bio); in blk_queue_bio()
1695 static inline void blk_partition_remap(struct bio *bio) in blk_partition_remap() argument
1697 struct block_device *bdev = bio->bi_bdev; in blk_partition_remap()
1699 if (bio_sectors(bio) && bdev != bdev->bd_contains) { in blk_partition_remap()
1702 bio->bi_iter.bi_sector += p->start_sect; in blk_partition_remap()
1703 bio->bi_bdev = bdev->bd_contains; in blk_partition_remap()
1705 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, in blk_partition_remap()
1707 bio->bi_iter.bi_sector - p->start_sect); in blk_partition_remap()
1711 static void handle_bad_sector(struct bio *bio) in handle_bad_sector() argument
1717 bdevname(bio->bi_bdev, b), in handle_bad_sector()
1718 bio->bi_rw, in handle_bad_sector()
1719 (unsigned long long)bio_end_sector(bio), in handle_bad_sector()
1720 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); in handle_bad_sector()
1722 set_bit(BIO_EOF, &bio->bi_flags); in handle_bad_sector()
1763 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) in bio_check_eod() argument
1771 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; in bio_check_eod()
1773 sector_t sector = bio->bi_iter.bi_sector; in bio_check_eod()
1781 handle_bad_sector(bio); in bio_check_eod()
1790 generic_make_request_checks(struct bio *bio) in generic_make_request_checks() argument
1793 int nr_sectors = bio_sectors(bio); in generic_make_request_checks()
1800 if (bio_check_eod(bio, nr_sectors)) in generic_make_request_checks()
1803 q = bdev_get_queue(bio->bi_bdev); in generic_make_request_checks()
1808 bdevname(bio->bi_bdev, b), in generic_make_request_checks()
1809 (long long) bio->bi_iter.bi_sector); in generic_make_request_checks()
1813 if (likely(bio_is_rw(bio) && in generic_make_request_checks()
1816 bdevname(bio->bi_bdev, b), in generic_make_request_checks()
1817 bio_sectors(bio), in generic_make_request_checks()
1822 part = bio->bi_bdev->bd_part; in generic_make_request_checks()
1823 if (should_fail_request(part, bio->bi_iter.bi_size) || in generic_make_request_checks()
1825 bio->bi_iter.bi_size)) in generic_make_request_checks()
1832 blk_partition_remap(bio); in generic_make_request_checks()
1834 if (bio_check_eod(bio, nr_sectors)) in generic_make_request_checks()
1842 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { in generic_make_request_checks()
1843 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); in generic_make_request_checks()
1850 if ((bio->bi_rw & REQ_DISCARD) && in generic_make_request_checks()
1852 ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { in generic_make_request_checks()
1857 if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) { in generic_make_request_checks()
1870 if (blk_throtl_bio(q, bio)) in generic_make_request_checks()
1873 trace_block_bio_queue(q, bio); in generic_make_request_checks()
1877 bio_endio(bio, err); in generic_make_request_checks()
1905 void generic_make_request(struct bio *bio) in generic_make_request() argument
1909 if (!generic_make_request_checks(bio)) in generic_make_request()
1923 bio_list_add(current->bio_list, bio); in generic_make_request()
1941 BUG_ON(bio->bi_next); in generic_make_request()
1945 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in generic_make_request()
1947 q->make_request_fn(q, bio); in generic_make_request()
1949 bio = bio_list_pop(current->bio_list); in generic_make_request()
1950 } while (bio); in generic_make_request()
1965 void submit_bio(int rw, struct bio *bio) in submit_bio() argument
1967 bio->bi_rw |= rw; in submit_bio()
1973 if (bio_has_data(bio)) { in submit_bio()
1977 count = bdev_logical_block_size(bio->bi_bdev) >> 9; in submit_bio()
1979 count = bio_sectors(bio); in submit_bio()
1984 task_io_account_read(bio->bi_iter.bi_size); in submit_bio()
1993 (unsigned long long)bio->bi_iter.bi_sector, in submit_bio()
1994 bdevname(bio->bi_bdev, b), in submit_bio()
1999 generic_make_request(bio); in submit_bio()
2118 struct bio *bio; in blk_rq_err_bytes() local
2130 for (bio = rq->bio; bio; bio = bio->bi_next) { in blk_rq_err_bytes()
2131 if ((bio->bi_rw & ff) != ff) in blk_rq_err_bytes()
2133 bytes += bio->bi_iter.bi_size; in blk_rq_err_bytes()
2450 if (!req->bio) in blk_update_request()
2502 while (req->bio) { in blk_update_request()
2503 struct bio *bio = req->bio; in blk_update_request() local
2504 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); in blk_update_request()
2506 if (bio_bytes == bio->bi_iter.bi_size) in blk_update_request()
2507 req->bio = bio->bi_next; in blk_update_request()
2509 req_bio_endio(req, bio, bio_bytes, error); in blk_update_request()
2521 if (!req->bio) { in blk_update_request()
2540 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; in blk_update_request()
2845 struct bio *bio) in blk_rq_bio_prep() argument
2848 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; in blk_rq_bio_prep()
2850 if (bio_has_data(bio)) in blk_rq_bio_prep()
2851 rq->nr_phys_segments = bio_phys_segments(q, bio); in blk_rq_bio_prep()
2853 rq->__data_len = bio->bi_iter.bi_size; in blk_rq_bio_prep()
2854 rq->bio = rq->biotail = bio; in blk_rq_bio_prep()
2856 if (bio->bi_bdev) in blk_rq_bio_prep()
2857 rq->rq_disk = bio->bi_bdev->bd_disk; in blk_rq_bio_prep()
2916 struct bio *bio; in blk_rq_unprep_clone() local
2918 while ((bio = rq->bio) != NULL) { in blk_rq_unprep_clone()
2919 rq->bio = bio->bi_next; in blk_rq_unprep_clone()
2921 bio_put(bio); in blk_rq_unprep_clone()
2963 int (*bio_ctr)(struct bio *, struct bio *, void *), in blk_rq_prep_clone() argument
2966 struct bio *bio, *bio_src; in blk_rq_prep_clone() local
2972 bio = bio_clone_fast(bio_src, gfp_mask, bs); in blk_rq_prep_clone()
2973 if (!bio) in blk_rq_prep_clone()
2976 if (bio_ctr && bio_ctr(bio, bio_src, data)) in blk_rq_prep_clone()
2979 if (rq->bio) { in blk_rq_prep_clone()
2980 rq->biotail->bi_next = bio; in blk_rq_prep_clone()
2981 rq->biotail = bio; in blk_rq_prep_clone()
2983 rq->bio = rq->biotail = bio; in blk_rq_prep_clone()
2991 if (bio) in blk_rq_prep_clone()
2992 bio_put(bio); in blk_rq_prep_clone()