Lines Matching refs:bio

83 #define IO_BLOCKED ((struct bio *)1)
88 #define IO_MADE_GOOD ((struct bio *)2)
90 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) argument
104 static void end_reshape_write(struct bio *bio, int error);
142 struct bio *bio; in r10buf_pool_alloc() local
160 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); in r10buf_pool_alloc()
161 if (!bio) in r10buf_pool_alloc()
163 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
166 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); in r10buf_pool_alloc()
167 if (!bio) in r10buf_pool_alloc()
169 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
176 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
177 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
183 struct bio *rbio = r10_bio->devs[0].bio; in r10buf_pool_alloc()
191 bio->bi_io_vec[i].bv_page = page; in r10buf_pool_alloc()
201 safe_put_page(bio->bi_io_vec[i-1].bv_page); in r10buf_pool_alloc()
204 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); in r10buf_pool_alloc()
208 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
209 bio_put(r10_bio->devs[j].bio); in r10buf_pool_alloc()
225 struct bio *bio = r10bio->devs[j].bio; in r10buf_pool_free() local
226 if (bio) { in r10buf_pool_free()
228 safe_put_page(bio->bi_io_vec[i].bv_page); in r10buf_pool_free()
229 bio->bi_io_vec[i].bv_page = NULL; in r10buf_pool_free()
231 bio_put(bio); in r10buf_pool_free()
233 bio = r10bio->devs[j].repl_bio; in r10buf_pool_free()
234 if (bio) in r10buf_pool_free()
235 bio_put(bio); in r10buf_pool_free()
245 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios() local
246 if (!BIO_SPECIAL(*bio)) in put_all_bios()
247 bio_put(*bio); in put_all_bios()
248 *bio = NULL; in put_all_bios()
249 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
250 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio)) in put_all_bios()
251 bio_put(*bio); in put_all_bios()
252 *bio = NULL; in put_all_bios()
297 struct bio *bio = r10_bio->master_bio; in raid_end_bio_io() local
301 if (bio->bi_phys_segments) { in raid_end_bio_io()
304 bio->bi_phys_segments--; in raid_end_bio_io()
305 done = (bio->bi_phys_segments == 0); in raid_end_bio_io()
310 clear_bit(BIO_UPTODATE, &bio->bi_flags); in raid_end_bio_io()
312 bio_endio(bio, 0); in raid_end_bio_io()
337 struct bio *bio, int *slotp, int *replp) in find_bio_disk() argument
343 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
345 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
361 static void raid10_end_read_request(struct bio *bio, int error) in raid10_end_read_request() argument
363 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); in raid10_end_read_request()
364 struct r10bio *r10_bio = bio->bi_private; in raid10_end_read_request()
441 static void raid10_end_write_request(struct bio *bio, int error) in raid10_end_write_request() argument
443 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); in raid10_end_write_request()
444 struct r10bio *r10_bio = bio->bi_private; in raid10_end_write_request()
451 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_write_request()
507 bio_put(bio); in raid10_end_write_request()
511 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
819 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
945 struct bio *bio; in flush_pending_writes() local
946 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
954 while (bio) { /* submit pending writes */ in flush_pending_writes()
955 struct bio *next = bio->bi_next; in flush_pending_writes()
956 bio->bi_next = NULL; in flush_pending_writes()
957 if (unlikely((bio->bi_rw & REQ_DISCARD) && in flush_pending_writes()
958 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) in flush_pending_writes()
960 bio_endio(bio, 0); in flush_pending_writes()
962 generic_make_request(bio); in flush_pending_writes()
963 bio = next; in flush_pending_writes()
1112 struct bio *bio; in raid10_unplug() local
1126 bio = bio_list_get(&plug->pending); in raid10_unplug()
1130 while (bio) { /* submit pending writes */ in raid10_unplug()
1131 struct bio *next = bio->bi_next; in raid10_unplug()
1132 bio->bi_next = NULL; in raid10_unplug()
1133 if (unlikely((bio->bi_rw & REQ_DISCARD) && in raid10_unplug()
1134 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) in raid10_unplug()
1136 bio_endio(bio, 0); in raid10_unplug()
1138 generic_make_request(bio); in raid10_unplug()
1139 bio = next; in raid10_unplug()
1144 static void __make_request(struct mddev *mddev, struct bio *bio) in __make_request() argument
1148 struct bio *read_bio; in __make_request()
1150 const int rw = bio_data_dir(bio); in __make_request()
1151 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); in __make_request()
1152 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); in __make_request()
1153 const unsigned long do_discard = (bio->bi_rw in __make_request()
1155 const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); in __make_request()
1171 sectors = bio_sectors(bio); in __make_request()
1173 bio->bi_iter.bi_sector < conf->reshape_progress && in __make_request()
1174 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in __make_request()
1180 conf->reshape_progress <= bio->bi_iter.bi_sector || in __make_request()
1181 conf->reshape_progress >= bio->bi_iter.bi_sector + in __make_request()
1186 bio_data_dir(bio) == WRITE && in __make_request()
1188 ? (bio->bi_iter.bi_sector < conf->reshape_safe && in __make_request()
1189 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in __make_request()
1190 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in __make_request()
1191 bio->bi_iter.bi_sector < conf->reshape_progress))) { in __make_request()
1205 r10_bio->master_bio = bio; in __make_request()
1209 r10_bio->sector = bio->bi_iter.bi_sector; in __make_request()
1219 bio->bi_phys_segments = 0; in __make_request()
1220 clear_bit(BIO_SEG_VALID, &bio->bi_flags); in __make_request()
1237 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); in __make_request()
1238 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
1241 r10_bio->devs[slot].bio = read_bio; in __make_request()
1256 - bio->bi_iter.bi_sector); in __make_request()
1259 if (bio->bi_phys_segments == 0) in __make_request()
1260 bio->bi_phys_segments = 2; in __make_request()
1262 bio->bi_phys_segments++; in __make_request()
1273 r10_bio->master_bio = bio; in __make_request()
1274 r10_bio->sectors = bio_sectors(bio) - sectors_handled; in __make_request()
1277 r10_bio->sector = bio->bi_iter.bi_sector + in __make_request()
1336 r10_bio->devs[i].bio = NULL; in __make_request()
1386 r10_bio->devs[i].bio = bio; in __make_request()
1390 r10_bio->devs[i].repl_bio = bio; in __make_request()
1402 if (r10_bio->devs[j].bio) { in __make_request()
1430 if (bio->bi_phys_segments == 0) in __make_request()
1431 bio->bi_phys_segments = 2; in __make_request()
1433 bio->bi_phys_segments++; in __make_request()
1437 bio->bi_iter.bi_sector; in __make_request()
1443 struct bio *mbio; in __make_request()
1445 if (r10_bio->devs[i].bio) { in __make_request()
1447 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); in __make_request()
1448 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
1450 r10_bio->devs[i].bio = mbio; in __make_request()
1490 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); in __make_request()
1491 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
1518 if (sectors_handled < bio_sectors(bio)) { in __make_request()
1525 r10_bio->master_bio = bio; in __make_request()
1526 r10_bio->sectors = bio_sectors(bio) - sectors_handled; in __make_request()
1529 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; in __make_request()
1536 static void make_request(struct mddev *mddev, struct bio *bio) in make_request() argument
1542 struct bio *split; in make_request()
1544 if (unlikely(bio->bi_rw & REQ_FLUSH)) { in make_request()
1545 md_flush_request(mddev, bio); in make_request()
1549 md_write_start(mddev, bio); in make_request()
1557 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + in make_request()
1558 bio_sectors(bio) > chunk_sects in make_request()
1562 split = bio_split(bio, chunk_sects - in make_request()
1563 (bio->bi_iter.bi_sector & in make_request()
1566 bio_chain(split, bio); in make_request()
1568 split = bio; in make_request()
1572 } while (split != bio); in make_request()
1919 static void end_sync_read(struct bio *bio, int error) in end_sync_read() argument
1921 struct r10bio *r10_bio = bio->bi_private; in end_sync_read()
1925 if (bio == r10_bio->master_bio) { in end_sync_read()
1929 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); in end_sync_read()
1931 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) in end_sync_read()
1980 static void end_sync_write(struct bio *bio, int error) in end_sync_write() argument
1982 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); in end_sync_write()
1983 struct r10bio *r10_bio = bio->bi_private; in end_sync_write()
1993 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_sync_write()
2040 struct bio *tbio, *fbio; in sync_request_write()
2047 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) in sync_request_write()
2054 fbio = r10_bio->devs[i].bio; in sync_request_write()
2061 tbio = r10_bio->devs[i].bio; in sync_request_write()
2067 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) { in sync_request_write()
2132 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2133 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2173 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error() local
2194 bio->bi_io_vec[idx].bv_page, in fix_recovery_read_error()
2202 bio->bi_io_vec[idx].bv_page, in fix_recovery_read_error()
2250 struct bio *wbio, *wbio2; in recovery_request_write()
2263 wbio = r10_bio->devs[1].bio; in recovery_request_write()
2386 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; in fix_read_error()
2443 r10_bio->devs[r10_bio->read_slot].bio in fix_read_error()
2551 struct bio *bio = r10_bio->master_bio; in narrow_write_error() local
2583 struct bio *wbio; in narrow_write_error()
2587 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); in narrow_write_error()
2588 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); in narrow_write_error()
2610 struct bio *bio; in handle_read_error() local
2625 bio = r10_bio->devs[slot].bio; in handle_read_error()
2626 bdevname(bio->bi_bdev, b); in handle_read_error()
2627 bio_put(bio); in handle_read_error()
2628 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2635 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
2659 bio = bio_clone_mddev(r10_bio->master_bio, in handle_read_error()
2661 bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); in handle_read_error()
2662 r10_bio->devs[slot].bio = bio; in handle_read_error()
2664 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr in handle_read_error()
2666 bio->bi_bdev = rdev->bdev; in handle_read_error()
2667 bio->bi_rw = READ | do_sync; in handle_read_error()
2668 bio->bi_private = r10_bio; in handle_read_error()
2669 bio->bi_end_io = raid10_end_read_request; in handle_read_error()
2672 struct bio *mbio = r10_bio->master_bio; in handle_read_error()
2683 generic_make_request(bio); in handle_read_error()
2698 generic_make_request(bio); in handle_read_error()
2717 if (r10_bio->devs[m].bio == NULL) in handle_write_completed()
2720 &r10_bio->devs[m].bio->bi_flags)) { in handle_write_completed()
2753 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed() local
2755 if (bio == IO_MADE_GOOD) { in handle_write_completed()
2761 } else if (bio != NULL && in handle_write_completed()
2762 !test_bit(BIO_UPTODATE, &bio->bi_flags)) { in handle_write_completed()
2770 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
2772 if (rdev && bio == IO_MADE_GOOD) { in handle_write_completed()
2831 generic_make_request(r10_bio->devs[slot].bio); in raid10d()
2896 struct bio *biolist = NULL, *bio; in sync_request() local
3068 r10_bio->master_bio = (struct bio*)rb2; in sync_request()
3118 bio = r10_bio->devs[0].bio; in sync_request()
3119 bio_reset(bio); in sync_request()
3120 bio->bi_next = biolist; in sync_request()
3121 biolist = bio; in sync_request()
3122 bio->bi_private = r10_bio; in sync_request()
3123 bio->bi_end_io = end_sync_read; in sync_request()
3124 bio->bi_rw = READ; in sync_request()
3126 bio->bi_iter.bi_sector = from_addr + in sync_request()
3128 bio->bi_bdev = rdev->bdev; in sync_request()
3144 bio = r10_bio->devs[1].bio; in sync_request()
3145 bio_reset(bio); in sync_request()
3146 bio->bi_next = biolist; in sync_request()
3147 biolist = bio; in sync_request()
3148 bio->bi_private = r10_bio; in sync_request()
3149 bio->bi_end_io = end_sync_write; in sync_request()
3150 bio->bi_rw = WRITE; in sync_request()
3151 bio->bi_iter.bi_sector = to_addr in sync_request()
3153 bio->bi_bdev = rdev->bdev; in sync_request()
3156 r10_bio->devs[1].bio->bi_end_io = NULL; in sync_request()
3159 bio = r10_bio->devs[1].repl_bio; in sync_request()
3160 if (bio) in sync_request()
3161 bio->bi_end_io = NULL; in sync_request()
3171 if (rdev == NULL || bio == NULL || in sync_request()
3174 bio_reset(bio); in sync_request()
3175 bio->bi_next = biolist; in sync_request()
3176 biolist = bio; in sync_request()
3177 bio->bi_private = r10_bio; in sync_request()
3178 bio->bi_end_io = end_sync_write; in sync_request()
3179 bio->bi_rw = WRITE; in sync_request()
3180 bio->bi_iter.bi_sector = to_addr + in sync_request()
3182 bio->bi_bdev = rdev->bdev; in sync_request()
3274 bio = r10_bio->devs[i].bio; in sync_request()
3275 bio_reset(bio); in sync_request()
3276 clear_bit(BIO_UPTODATE, &bio->bi_flags); in sync_request()
3295 bio->bi_next = biolist; in sync_request()
3296 biolist = bio; in sync_request()
3297 bio->bi_private = r10_bio; in sync_request()
3298 bio->bi_end_io = end_sync_read; in sync_request()
3299 bio->bi_rw = READ; in sync_request()
3300 bio->bi_iter.bi_sector = sector + in sync_request()
3302 bio->bi_bdev = conf->mirrors[d].rdev->bdev; in sync_request()
3311 bio = r10_bio->devs[i].repl_bio; in sync_request()
3312 bio_reset(bio); in sync_request()
3313 clear_bit(BIO_UPTODATE, &bio->bi_flags); in sync_request()
3317 bio->bi_next = biolist; in sync_request()
3318 biolist = bio; in sync_request()
3319 bio->bi_private = r10_bio; in sync_request()
3320 bio->bi_end_io = end_sync_write; in sync_request()
3321 bio->bi_rw = WRITE; in sync_request()
3322 bio->bi_iter.bi_sector = sector + in sync_request()
3324 bio->bi_bdev = conf->mirrors[d].replacement->bdev; in sync_request()
3331 if (r10_bio->devs[i].bio->bi_end_io) in sync_request()
3356 for (bio= biolist ; bio ; bio=bio->bi_next) { in sync_request()
3357 struct bio *bio2; in sync_request()
3358 page = bio->bi_io_vec[bio->bi_vcnt].bv_page; in sync_request()
3359 if (bio_add_page(bio, page, len, 0)) in sync_request()
3363 bio->bi_io_vec[bio->bi_vcnt].bv_page = page; in sync_request()
3365 bio2 && bio2 != bio; in sync_request()
3381 bio = biolist; in sync_request()
3384 bio->bi_next = NULL; in sync_request()
3385 r10_bio = bio->bi_private; in sync_request()
3388 if (bio->bi_end_io == end_sync_read) { in sync_request()
3389 md_sync_acct(bio->bi_bdev, nr_sectors); in sync_request()
3390 set_bit(BIO_UPTODATE, &bio->bi_flags); in sync_request()
3391 generic_make_request(bio); in sync_request()
4269 struct bio *blist; in reshape_request()
4270 struct bio *bio, *read_bio; in reshape_request() local
4408 struct bio *b; in reshape_request()
4416 b = r10_bio->devs[s/2].bio; in reshape_request()
4436 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page; in reshape_request()
4440 for (bio = blist; bio ; bio = bio->bi_next) { in reshape_request()
4441 struct bio *bio2; in reshape_request()
4442 if (bio_add_page(bio, page, len, 0)) in reshape_request()
4447 bio2 && bio2 != bio; in reshape_request()
4508 struct bio *b; in reshape_request_write()
4516 b = r10_bio->devs[s/2].bio; in reshape_request_write()
4617 static void end_reshape_write(struct bio *bio, int error) in end_reshape_write() argument
4619 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); in end_reshape_write()
4620 struct r10bio *r10_bio = bio->bi_private; in end_reshape_write()
4628 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_reshape_write()