Lines Matching refs:bio
14 static unsigned bch_bio_max_sectors(struct bio *bio) in bch_bio_max_sectors() argument
16 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bch_bio_max_sectors()
21 if (bio->bi_rw & REQ_DISCARD) in bch_bio_max_sectors()
22 return min(bio_sectors(bio), q->limits.max_discard_sectors); in bch_bio_max_sectors()
24 bio_for_each_segment(bv, bio, iter) { in bch_bio_max_sectors()
26 .bi_bdev = bio->bi_bdev, in bch_bio_max_sectors()
27 .bi_sector = bio->bi_iter.bi_sector, in bch_bio_max_sectors()
29 .bi_rw = bio->bi_rw, in bch_bio_max_sectors()
47 ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9); in bch_bio_max_sectors()
56 s->bio->bi_end_io = s->bi_end_io; in bch_bio_submit_split_done()
57 s->bio->bi_private = s->bi_private; in bch_bio_submit_split_done()
58 bio_endio_nodec(s->bio, 0); in bch_bio_submit_split_done()
64 static void bch_bio_submit_split_endio(struct bio *bio, int error) in bch_bio_submit_split_endio() argument
66 struct closure *cl = bio->bi_private; in bch_bio_submit_split_endio()
70 clear_bit(BIO_UPTODATE, &s->bio->bi_flags); in bch_bio_submit_split_endio()
72 bio_put(bio); in bch_bio_submit_split_endio()
76 void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) in bch_generic_make_request() argument
79 struct bio *n; in bch_generic_make_request()
81 if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD)) in bch_generic_make_request()
84 if (bio_sectors(bio) <= bch_bio_max_sectors(bio)) in bch_generic_make_request()
90 s->bio = bio; in bch_generic_make_request()
92 s->bi_end_io = bio->bi_end_io; in bch_generic_make_request()
93 s->bi_private = bio->bi_private; in bch_generic_make_request()
94 bio_get(bio); in bch_generic_make_request()
97 n = bio_next_split(bio, bch_bio_max_sectors(bio), in bch_generic_make_request()
105 } while (n != bio); in bch_generic_make_request()
109 generic_make_request(bio); in bch_generic_make_request()
114 void bch_bbio_free(struct bio *bio, struct cache_set *c) in bch_bbio_free() argument
116 struct bbio *b = container_of(bio, struct bbio, bio); in bch_bbio_free()
120 struct bio *bch_bbio_alloc(struct cache_set *c) in bch_bbio_alloc()
123 struct bio *bio = &b->bio; in bch_bbio_alloc() local
125 bio_init(bio); in bch_bbio_alloc()
126 bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET; in bch_bbio_alloc()
127 bio->bi_max_vecs = bucket_pages(c); in bch_bbio_alloc()
128 bio->bi_io_vec = bio->bi_inline_vecs; in bch_bbio_alloc()
130 return bio; in bch_bbio_alloc()
133 void __bch_submit_bbio(struct bio *bio, struct cache_set *c) in __bch_submit_bbio() argument
135 struct bbio *b = container_of(bio, struct bbio, bio); in __bch_submit_bbio()
137 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in __bch_submit_bbio()
138 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; in __bch_submit_bbio()
141 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); in __bch_submit_bbio()
144 void bch_submit_bbio(struct bio *bio, struct cache_set *c, in bch_submit_bbio() argument
147 struct bbio *b = container_of(bio, struct bbio, bio); in bch_submit_bbio()
149 __bch_submit_bbio(bio, c); in bch_submit_bbio()
206 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, in bch_bbio_count_io_errors() argument
209 struct bbio *b = container_of(bio, struct bbio, bio); in bch_bbio_count_io_errors()
212 unsigned threshold = bio->bi_rw & REQ_WRITE in bch_bbio_count_io_errors()
235 void bch_bbio_endio(struct cache_set *c, struct bio *bio, in bch_bbio_endio() argument
238 struct closure *cl = bio->bi_private; in bch_bbio_endio()
240 bch_bbio_count_io_errors(c, bio, error, m); in bch_bbio_endio()
241 bio_put(bio); in bch_bbio_endio()