Lines Matching refs:bio

84 #define IO_BLOCKED ((struct bio *)1)
89 #define IO_MADE_GOOD ((struct bio *)2)
91 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) argument
105 static void end_reshape_write(struct bio *bio);
143 struct bio *bio; in r10buf_pool_alloc() local
161 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); in r10buf_pool_alloc()
162 if (!bio) in r10buf_pool_alloc()
164 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
167 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); in r10buf_pool_alloc()
168 if (!bio) in r10buf_pool_alloc()
170 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
177 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
178 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
184 struct bio *rbio = r10_bio->devs[0].bio; in r10buf_pool_alloc()
192 bio->bi_io_vec[i].bv_page = page; in r10buf_pool_alloc()
202 safe_put_page(bio->bi_io_vec[i-1].bv_page); in r10buf_pool_alloc()
205 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); in r10buf_pool_alloc()
209 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
210 bio_put(r10_bio->devs[j].bio); in r10buf_pool_alloc()
226 struct bio *bio = r10bio->devs[j].bio; in r10buf_pool_free() local
227 if (bio) { in r10buf_pool_free()
229 safe_put_page(bio->bi_io_vec[i].bv_page); in r10buf_pool_free()
230 bio->bi_io_vec[i].bv_page = NULL; in r10buf_pool_free()
232 bio_put(bio); in r10buf_pool_free()
234 bio = r10bio->devs[j].repl_bio; in r10buf_pool_free()
235 if (bio) in r10buf_pool_free()
236 bio_put(bio); in r10buf_pool_free()
246 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios() local
247 if (!BIO_SPECIAL(*bio)) in put_all_bios()
248 bio_put(*bio); in put_all_bios()
249 *bio = NULL; in put_all_bios()
250 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
251 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio)) in put_all_bios()
252 bio_put(*bio); in put_all_bios()
253 *bio = NULL; in put_all_bios()
298 struct bio *bio = r10_bio->master_bio; in raid_end_bio_io() local
302 if (bio->bi_phys_segments) { in raid_end_bio_io()
305 bio->bi_phys_segments--; in raid_end_bio_io()
306 done = (bio->bi_phys_segments == 0); in raid_end_bio_io()
311 bio->bi_error = -EIO; in raid_end_bio_io()
313 bio_endio(bio); in raid_end_bio_io()
338 struct bio *bio, int *slotp, int *replp) in find_bio_disk() argument
344 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
346 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
362 static void raid10_end_read_request(struct bio *bio) in raid10_end_read_request() argument
364 int uptodate = !bio->bi_error; in raid10_end_read_request()
365 struct r10bio *r10_bio = bio->bi_private; in raid10_end_read_request()
442 static void raid10_end_write_request(struct bio *bio) in raid10_end_write_request() argument
444 struct r10bio *r10_bio = bio->bi_private; in raid10_end_write_request()
451 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_write_request()
463 if (bio->bi_error) { in raid10_end_write_request()
507 bio_put(bio); in raid10_end_write_request()
511 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
732 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
856 struct bio *bio; in flush_pending_writes() local
857 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
865 while (bio) { /* submit pending writes */ in flush_pending_writes()
866 struct bio *next = bio->bi_next; in flush_pending_writes()
867 bio->bi_next = NULL; in flush_pending_writes()
868 if (unlikely((bio->bi_rw & REQ_DISCARD) && in flush_pending_writes()
869 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) in flush_pending_writes()
871 bio_endio(bio); in flush_pending_writes()
873 generic_make_request(bio); in flush_pending_writes()
874 bio = next; in flush_pending_writes()
1023 struct bio *bio; in raid10_unplug() local
1037 bio = bio_list_get(&plug->pending); in raid10_unplug()
1041 while (bio) { /* submit pending writes */ in raid10_unplug()
1042 struct bio *next = bio->bi_next; in raid10_unplug()
1043 bio->bi_next = NULL; in raid10_unplug()
1044 if (unlikely((bio->bi_rw & REQ_DISCARD) && in raid10_unplug()
1045 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) in raid10_unplug()
1047 bio_endio(bio); in raid10_unplug()
1049 generic_make_request(bio); in raid10_unplug()
1050 bio = next; in raid10_unplug()
1055 static void __make_request(struct mddev *mddev, struct bio *bio) in __make_request() argument
1059 struct bio *read_bio; in __make_request()
1061 const int rw = bio_data_dir(bio); in __make_request()
1062 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); in __make_request()
1063 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); in __make_request()
1064 const unsigned long do_discard = (bio->bi_rw in __make_request()
1066 const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); in __make_request()
1082 sectors = bio_sectors(bio); in __make_request()
1084 bio->bi_iter.bi_sector < conf->reshape_progress && in __make_request()
1085 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in __make_request()
1091 conf->reshape_progress <= bio->bi_iter.bi_sector || in __make_request()
1092 conf->reshape_progress >= bio->bi_iter.bi_sector + in __make_request()
1097 bio_data_dir(bio) == WRITE && in __make_request()
1099 ? (bio->bi_iter.bi_sector < conf->reshape_safe && in __make_request()
1100 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in __make_request()
1101 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in __make_request()
1102 bio->bi_iter.bi_sector < conf->reshape_progress))) { in __make_request()
1116 r10_bio->master_bio = bio; in __make_request()
1120 r10_bio->sector = bio->bi_iter.bi_sector; in __make_request()
1130 bio->bi_phys_segments = 0; in __make_request()
1131 bio_clear_flag(bio, BIO_SEG_VALID); in __make_request()
1148 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); in __make_request()
1149 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
1152 r10_bio->devs[slot].bio = read_bio; in __make_request()
1167 - bio->bi_iter.bi_sector); in __make_request()
1170 if (bio->bi_phys_segments == 0) in __make_request()
1171 bio->bi_phys_segments = 2; in __make_request()
1173 bio->bi_phys_segments++; in __make_request()
1184 r10_bio->master_bio = bio; in __make_request()
1185 r10_bio->sectors = bio_sectors(bio) - sectors_handled; in __make_request()
1188 r10_bio->sector = bio->bi_iter.bi_sector + in __make_request()
1245 r10_bio->devs[i].bio = NULL; in __make_request()
1295 r10_bio->devs[i].bio = bio; in __make_request()
1299 r10_bio->devs[i].repl_bio = bio; in __make_request()
1311 if (r10_bio->devs[j].bio) { in __make_request()
1339 if (bio->bi_phys_segments == 0) in __make_request()
1340 bio->bi_phys_segments = 2; in __make_request()
1342 bio->bi_phys_segments++; in __make_request()
1346 bio->bi_iter.bi_sector; in __make_request()
1352 struct bio *mbio; in __make_request()
1354 if (r10_bio->devs[i].bio) { in __make_request()
1356 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); in __make_request()
1357 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
1359 r10_bio->devs[i].bio = mbio; in __make_request()
1399 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); in __make_request()
1400 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
1427 if (sectors_handled < bio_sectors(bio)) { in __make_request()
1434 r10_bio->master_bio = bio; in __make_request()
1435 r10_bio->sectors = bio_sectors(bio) - sectors_handled; in __make_request()
1438 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; in __make_request()
1445 static void make_request(struct mddev *mddev, struct bio *bio) in make_request() argument
1451 struct bio *split; in make_request()
1453 if (unlikely(bio->bi_rw & REQ_FLUSH)) { in make_request()
1454 md_flush_request(mddev, bio); in make_request()
1458 md_write_start(mddev, bio); in make_request()
1466 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + in make_request()
1467 bio_sectors(bio) > chunk_sects in make_request()
1471 split = bio_split(bio, chunk_sects - in make_request()
1472 (bio->bi_iter.bi_sector & in make_request()
1475 bio_chain(split, bio); in make_request()
1477 split = bio; in make_request()
1481 } while (split != bio); in make_request()
1814 static void end_sync_read(struct bio *bio) in end_sync_read() argument
1816 struct r10bio *r10_bio = bio->bi_private; in end_sync_read()
1820 if (bio == r10_bio->master_bio) { in end_sync_read()
1824 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); in end_sync_read()
1826 if (!bio->bi_error) in end_sync_read()
1875 static void end_sync_write(struct bio *bio) in end_sync_write() argument
1877 struct r10bio *r10_bio = bio->bi_private; in end_sync_write()
1887 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_sync_write()
1893 if (bio->bi_error) { in end_sync_write()
1934 struct bio *tbio, *fbio; in sync_request_write()
1941 if (!r10_bio->devs[i].bio->bi_error) in sync_request_write()
1948 fbio = r10_bio->devs[i].bio; in sync_request_write()
1957 tbio = r10_bio->devs[i].bio; in sync_request_write()
1963 if (!r10_bio->devs[i].bio->bi_error) { in sync_request_write()
2021 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2022 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2059 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error() local
2080 bio->bi_io_vec[idx].bv_page, in fix_recovery_read_error()
2088 bio->bi_io_vec[idx].bv_page, in fix_recovery_read_error()
2136 struct bio *wbio, *wbio2; in recovery_request_write()
2149 wbio = r10_bio->devs[1].bio; in recovery_request_write()
2272 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; in fix_read_error()
2328 r10_bio->devs[r10_bio->read_slot].bio in fix_read_error()
2435 struct bio *bio = r10_bio->master_bio; in narrow_write_error() local
2467 struct bio *wbio; in narrow_write_error()
2471 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); in narrow_write_error()
2472 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); in narrow_write_error()
2494 struct bio *bio; in handle_read_error() local
2509 bio = r10_bio->devs[slot].bio; in handle_read_error()
2510 bdevname(bio->bi_bdev, b); in handle_read_error()
2511 bio_put(bio); in handle_read_error()
2512 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2519 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
2543 bio = bio_clone_mddev(r10_bio->master_bio, in handle_read_error()
2545 bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); in handle_read_error()
2546 r10_bio->devs[slot].bio = bio; in handle_read_error()
2548 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr in handle_read_error()
2550 bio->bi_bdev = rdev->bdev; in handle_read_error()
2551 bio->bi_rw = READ | do_sync; in handle_read_error()
2552 bio->bi_private = r10_bio; in handle_read_error()
2553 bio->bi_end_io = raid10_end_read_request; in handle_read_error()
2556 struct bio *mbio = r10_bio->master_bio; in handle_read_error()
2567 generic_make_request(bio); in handle_read_error()
2582 generic_make_request(bio); in handle_read_error()
2601 if (r10_bio->devs[m].bio == NULL) in handle_write_completed()
2603 if (!r10_bio->devs[m].bio->bi_error) { in handle_write_completed()
2637 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed() local
2639 if (bio == IO_MADE_GOOD) { in handle_write_completed()
2645 } else if (bio != NULL && bio->bi_error) { in handle_write_completed()
2654 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
2656 if (rdev && bio == IO_MADE_GOOD) { in handle_write_completed()
2748 generic_make_request(r10_bio->devs[slot].bio); in raid10d()
2813 struct bio *biolist = NULL, *bio; in sync_request() local
2985 r10_bio->master_bio = (struct bio*)rb2; in sync_request()
3035 bio = r10_bio->devs[0].bio; in sync_request()
3036 bio_reset(bio); in sync_request()
3037 bio->bi_next = biolist; in sync_request()
3038 biolist = bio; in sync_request()
3039 bio->bi_private = r10_bio; in sync_request()
3040 bio->bi_end_io = end_sync_read; in sync_request()
3041 bio->bi_rw = READ; in sync_request()
3043 bio->bi_iter.bi_sector = from_addr + in sync_request()
3045 bio->bi_bdev = rdev->bdev; in sync_request()
3061 bio = r10_bio->devs[1].bio; in sync_request()
3062 bio_reset(bio); in sync_request()
3063 bio->bi_next = biolist; in sync_request()
3064 biolist = bio; in sync_request()
3065 bio->bi_private = r10_bio; in sync_request()
3066 bio->bi_end_io = end_sync_write; in sync_request()
3067 bio->bi_rw = WRITE; in sync_request()
3068 bio->bi_iter.bi_sector = to_addr in sync_request()
3070 bio->bi_bdev = rdev->bdev; in sync_request()
3073 r10_bio->devs[1].bio->bi_end_io = NULL; in sync_request()
3076 bio = r10_bio->devs[1].repl_bio; in sync_request()
3077 if (bio) in sync_request()
3078 bio->bi_end_io = NULL; in sync_request()
3088 if (rdev == NULL || bio == NULL || in sync_request()
3091 bio_reset(bio); in sync_request()
3092 bio->bi_next = biolist; in sync_request()
3093 biolist = bio; in sync_request()
3094 bio->bi_private = r10_bio; in sync_request()
3095 bio->bi_end_io = end_sync_write; in sync_request()
3096 bio->bi_rw = WRITE; in sync_request()
3097 bio->bi_iter.bi_sector = to_addr + in sync_request()
3099 bio->bi_bdev = rdev->bdev; in sync_request()
3191 bio = r10_bio->devs[i].bio; in sync_request()
3192 bio_reset(bio); in sync_request()
3193 bio->bi_error = -EIO; in sync_request()
3212 bio->bi_next = biolist; in sync_request()
3213 biolist = bio; in sync_request()
3214 bio->bi_private = r10_bio; in sync_request()
3215 bio->bi_end_io = end_sync_read; in sync_request()
3216 bio->bi_rw = READ; in sync_request()
3217 bio->bi_iter.bi_sector = sector + in sync_request()
3219 bio->bi_bdev = conf->mirrors[d].rdev->bdev; in sync_request()
3228 bio = r10_bio->devs[i].repl_bio; in sync_request()
3229 bio_reset(bio); in sync_request()
3230 bio->bi_error = -EIO; in sync_request()
3234 bio->bi_next = biolist; in sync_request()
3235 biolist = bio; in sync_request()
3236 bio->bi_private = r10_bio; in sync_request()
3237 bio->bi_end_io = end_sync_write; in sync_request()
3238 bio->bi_rw = WRITE; in sync_request()
3239 bio->bi_iter.bi_sector = sector + in sync_request()
3241 bio->bi_bdev = conf->mirrors[d].replacement->bdev; in sync_request()
3248 if (r10_bio->devs[i].bio->bi_end_io) in sync_request()
3273 for (bio= biolist ; bio ; bio=bio->bi_next) { in sync_request()
3274 struct bio *bio2; in sync_request()
3275 page = bio->bi_io_vec[bio->bi_vcnt].bv_page; in sync_request()
3276 if (bio_add_page(bio, page, len, 0)) in sync_request()
3280 bio->bi_io_vec[bio->bi_vcnt].bv_page = page; in sync_request()
3282 bio2 && bio2 != bio; in sync_request()
3298 bio = biolist; in sync_request()
3301 bio->bi_next = NULL; in sync_request()
3302 r10_bio = bio->bi_private; in sync_request()
3305 if (bio->bi_end_io == end_sync_read) { in sync_request()
3306 md_sync_acct(bio->bi_bdev, nr_sectors); in sync_request()
3307 bio->bi_error = 0; in sync_request()
3308 generic_make_request(bio); in sync_request()
4197 struct bio *blist; in reshape_request()
4198 struct bio *bio, *read_bio; in reshape_request() local
4336 struct bio *b; in reshape_request()
4344 b = r10_bio->devs[s/2].bio; in reshape_request()
4364 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page; in reshape_request()
4368 for (bio = blist; bio ; bio = bio->bi_next) { in reshape_request()
4369 struct bio *bio2; in reshape_request()
4370 if (bio_add_page(bio, page, len, 0)) in reshape_request()
4375 bio2 && bio2 != bio; in reshape_request()
4436 struct bio *b; in reshape_request_write()
4444 b = r10_bio->devs[s/2].bio; in reshape_request_write()
4545 static void end_reshape_write(struct bio *bio) in end_reshape_write() argument
4547 struct r10bio *r10_bio = bio->bi_private; in end_reshape_write()
4555 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_reshape_write()
4563 if (bio->bi_error) { in end_reshape_write()