Lines Matching refs:r10_bio
104 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
118 static void r10bio_pool_free(void *r10_bio, void *data) in r10bio_pool_free() argument
120 kfree(r10_bio); in r10bio_pool_free()
142 struct r10bio *r10_bio; in r10buf_pool_alloc() local
147 r10_bio = r10bio_pool_alloc(gfp_flags, conf); in r10buf_pool_alloc()
148 if (!r10_bio) in r10buf_pool_alloc()
164 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
170 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
177 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
178 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
184 struct bio *rbio = r10_bio->devs[0].bio; in r10buf_pool_alloc()
198 return r10_bio; in r10buf_pool_alloc()
205 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); in r10buf_pool_alloc()
209 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
210 bio_put(r10_bio->devs[j].bio); in r10buf_pool_alloc()
211 if (r10_bio->devs[j].repl_bio) in r10buf_pool_alloc()
212 bio_put(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
214 r10bio_pool_free(r10_bio, conf); in r10buf_pool_alloc()
241 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) in put_all_bios() argument
246 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios()
250 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
251 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio)) in put_all_bios()
257 static void free_r10bio(struct r10bio *r10_bio) in free_r10bio() argument
259 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio()
261 put_all_bios(conf, r10_bio); in free_r10bio()
262 mempool_free(r10_bio, conf->r10bio_pool); in free_r10bio()
265 static void put_buf(struct r10bio *r10_bio) in put_buf() argument
267 struct r10conf *conf = r10_bio->mddev->private; in put_buf()
269 mempool_free(r10_bio, conf->r10buf_pool); in put_buf()
274 static void reschedule_retry(struct r10bio *r10_bio) in reschedule_retry() argument
277 struct mddev *mddev = r10_bio->mddev; in reschedule_retry()
281 list_add(&r10_bio->retry_list, &conf->retry_list); in reschedule_retry()
296 static void raid_end_bio_io(struct r10bio *r10_bio) in raid_end_bio_io() argument
298 struct bio *bio = r10_bio->master_bio; in raid_end_bio_io()
300 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io()
310 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) in raid_end_bio_io()
320 free_r10bio(r10_bio); in raid_end_bio_io()
326 static inline void update_head_pos(int slot, struct r10bio *r10_bio) in update_head_pos() argument
328 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos()
330 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
331 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
337 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, in find_bio_disk() argument
344 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
346 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
353 update_head_pos(slot, r10_bio); in find_bio_disk()
359 return r10_bio->devs[slot].devnum; in find_bio_disk()
365 struct r10bio *r10_bio = bio->bi_private; in raid10_end_read_request() local
368 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request()
370 slot = r10_bio->read_slot; in raid10_end_read_request()
371 dev = r10_bio->devs[slot].devnum; in raid10_end_read_request()
372 rdev = r10_bio->devs[slot].rdev; in raid10_end_read_request()
376 update_head_pos(slot, r10_bio); in raid10_end_read_request()
388 set_bit(R10BIO_Uptodate, &r10_bio->state); in raid10_end_read_request()
395 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), in raid10_end_read_request()
400 raid_end_bio_io(r10_bio); in raid10_end_read_request()
411 (unsigned long long)r10_bio->sector); in raid10_end_read_request()
412 set_bit(R10BIO_ReadError, &r10_bio->state); in raid10_end_read_request()
413 reschedule_retry(r10_bio); in raid10_end_read_request()
417 static void close_write(struct r10bio *r10_bio) in close_write() argument
420 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, in close_write()
421 r10_bio->sectors, in close_write()
422 !test_bit(R10BIO_Degraded, &r10_bio->state), in close_write()
424 md_write_end(r10_bio->mddev); in close_write()
427 static void one_write_done(struct r10bio *r10_bio) in one_write_done() argument
429 if (atomic_dec_and_test(&r10_bio->remaining)) { in one_write_done()
430 if (test_bit(R10BIO_WriteError, &r10_bio->state)) in one_write_done()
431 reschedule_retry(r10_bio); in one_write_done()
433 close_write(r10_bio); in one_write_done()
434 if (test_bit(R10BIO_MadeGood, &r10_bio->state)) in one_write_done()
435 reschedule_retry(r10_bio); in one_write_done()
437 raid_end_bio_io(r10_bio); in one_write_done()
444 struct r10bio *r10_bio = bio->bi_private; in raid10_end_write_request() local
447 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request()
451 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_write_request()
474 set_bit(R10BIO_WriteError, &r10_bio->state); in raid10_end_write_request()
500 set_bit(R10BIO_Uptodate, &r10_bio->state); in raid10_end_write_request()
504 r10_bio->devs[slot].addr, in raid10_end_write_request()
505 r10_bio->sectors, in raid10_end_write_request()
509 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; in raid10_end_write_request()
511 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
513 set_bit(R10BIO_MadeGood, &r10_bio->state); in raid10_end_write_request()
522 one_write_done(r10_bio); in raid10_end_write_request()
695 struct r10bio *r10_bio, in read_balance() argument
698 const sector_t this_sector = r10_bio->sector; in read_balance()
700 int sectors = r10_bio->sectors; in read_balance()
708 raid10_find_phys(conf, r10_bio); in read_balance()
711 sectors = r10_bio->sectors; in read_balance()
732 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
734 disk = r10_bio->devs[slot].devnum; in read_balance()
737 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
743 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
746 dev_sector = r10_bio->devs[slot].addr; in read_balance()
790 new_distance = r10_bio->devs[slot].addr; in read_balance()
792 new_distance = abs(r10_bio->devs[slot].addr - in read_balance()
814 r10_bio->read_slot = slot; in read_balance()
1001 static sector_t choose_data_offset(struct r10bio *r10_bio, in choose_data_offset() argument
1005 test_bit(R10BIO_Previous, &r10_bio->state)) in choose_data_offset()
1058 struct r10bio *r10_bio; in __make_request() local
1114 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); in __make_request()
1116 r10_bio->master_bio = bio; in __make_request()
1117 r10_bio->sectors = sectors; in __make_request()
1119 r10_bio->mddev = mddev; in __make_request()
1120 r10_bio->sector = bio->bi_iter.bi_sector; in __make_request()
1121 r10_bio->state = 0; in __make_request()
1141 rdev = read_balance(conf, r10_bio, &max_sectors); in __make_request()
1143 raid_end_bio_io(r10_bio); in __make_request()
1146 slot = r10_bio->read_slot; in __make_request()
1149 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
1152 r10_bio->devs[slot].bio = read_bio; in __make_request()
1153 r10_bio->devs[slot].rdev = rdev; in __make_request()
1155 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + in __make_request()
1156 choose_data_offset(r10_bio, rdev); in __make_request()
1160 read_bio->bi_private = r10_bio; in __make_request()
1162 if (max_sectors < r10_bio->sectors) { in __make_request()
1166 sectors_handled = (r10_bio->sector + max_sectors in __make_request()
1168 r10_bio->sectors = max_sectors; in __make_request()
1180 reschedule_retry(r10_bio); in __make_request()
1182 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); in __make_request()
1184 r10_bio->master_bio = bio; in __make_request()
1185 r10_bio->sectors = bio_sectors(bio) - sectors_handled; in __make_request()
1186 r10_bio->state = 0; in __make_request()
1187 r10_bio->mddev = mddev; in __make_request()
1188 r10_bio->sector = bio->bi_iter.bi_sector + in __make_request()
1216 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ in __make_request()
1217 raid10_find_phys(conf, r10_bio); in __make_request()
1221 max_sectors = r10_bio->sectors; in __make_request()
1224 int d = r10_bio->devs[i].devnum; in __make_request()
1245 r10_bio->devs[i].bio = NULL; in __make_request()
1246 r10_bio->devs[i].repl_bio = NULL; in __make_request()
1249 set_bit(R10BIO_Degraded, &r10_bio->state); in __make_request()
1254 sector_t dev_sector = r10_bio->devs[i].addr; in __make_request()
1295 r10_bio->devs[i].bio = bio; in __make_request()
1299 r10_bio->devs[i].repl_bio = bio; in __make_request()
1311 if (r10_bio->devs[j].bio) { in __make_request()
1312 d = r10_bio->devs[j].devnum; in __make_request()
1315 if (r10_bio->devs[j].repl_bio) { in __make_request()
1317 d = r10_bio->devs[j].devnum; in __make_request()
1333 if (max_sectors < r10_bio->sectors) { in __make_request()
1337 r10_bio->sectors = max_sectors; in __make_request()
1345 sectors_handled = r10_bio->sector + max_sectors - in __make_request()
1348 atomic_set(&r10_bio->remaining, 1); in __make_request()
1349 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); in __make_request()
1353 int d = r10_bio->devs[i].devnum; in __make_request()
1354 if (r10_bio->devs[i].bio) { in __make_request()
1357 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
1359 r10_bio->devs[i].bio = mbio; in __make_request()
1361 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ in __make_request()
1362 choose_data_offset(r10_bio, in __make_request()
1368 mbio->bi_private = r10_bio; in __make_request()
1370 atomic_inc(&r10_bio->remaining); in __make_request()
1392 if (r10_bio->devs[i].repl_bio) { in __make_request()
1400 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
1402 r10_bio->devs[i].repl_bio = mbio; in __make_request()
1404 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + in __make_request()
1406 r10_bio, rdev)); in __make_request()
1411 mbio->bi_private = r10_bio; in __make_request()
1413 atomic_inc(&r10_bio->remaining); in __make_request()
1428 one_write_done(r10_bio); in __make_request()
1432 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); in __make_request()
1434 r10_bio->master_bio = bio; in __make_request()
1435 r10_bio->sectors = bio_sectors(bio) - sectors_handled; in __make_request()
1437 r10_bio->mddev = mddev; in __make_request()
1438 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; in __make_request()
1439 r10_bio->state = 0; in __make_request()
1442 one_write_done(r10_bio); in __make_request()
1816 struct r10bio *r10_bio = bio->bi_private; in end_sync_read() local
1817 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read()
1820 if (bio == r10_bio->master_bio) { in end_sync_read()
1822 d = r10_bio->read_slot; /* really the read dev */ in end_sync_read()
1824 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); in end_sync_read()
1827 set_bit(R10BIO_Uptodate, &r10_bio->state); in end_sync_read()
1832 atomic_add(r10_bio->sectors, in end_sync_read()
1839 if (test_bit(R10BIO_IsRecover, &r10_bio->state) || in end_sync_read()
1840 atomic_dec_and_test(&r10_bio->remaining)) { in end_sync_read()
1844 reschedule_retry(r10_bio); in end_sync_read()
1848 static void end_sync_request(struct r10bio *r10_bio) in end_sync_request() argument
1850 struct mddev *mddev = r10_bio->mddev; in end_sync_request()
1852 while (atomic_dec_and_test(&r10_bio->remaining)) { in end_sync_request()
1853 if (r10_bio->master_bio == NULL) { in end_sync_request()
1855 sector_t s = r10_bio->sectors; in end_sync_request()
1856 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in end_sync_request()
1857 test_bit(R10BIO_WriteError, &r10_bio->state)) in end_sync_request()
1858 reschedule_retry(r10_bio); in end_sync_request()
1860 put_buf(r10_bio); in end_sync_request()
1864 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio; in end_sync_request()
1865 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in end_sync_request()
1866 test_bit(R10BIO_WriteError, &r10_bio->state)) in end_sync_request()
1867 reschedule_retry(r10_bio); in end_sync_request()
1869 put_buf(r10_bio); in end_sync_request()
1870 r10_bio = r10_bio2; in end_sync_request()
1877 struct r10bio *r10_bio = bio->bi_private; in end_sync_write() local
1878 struct mddev *mddev = r10_bio->mddev; in end_sync_write()
1887 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_sync_write()
1901 set_bit(R10BIO_WriteError, &r10_bio->state); in end_sync_write()
1904 r10_bio->devs[slot].addr, in end_sync_write()
1905 r10_bio->sectors, in end_sync_write()
1907 set_bit(R10BIO_MadeGood, &r10_bio->state); in end_sync_write()
1911 end_sync_request(r10_bio); in end_sync_write()
1930 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) in sync_request_write() argument
1937 atomic_set(&r10_bio->remaining, 1); in sync_request_write()
1941 if (!r10_bio->devs[i].bio->bi_error) in sync_request_write()
1948 fbio = r10_bio->devs[i].bio; in sync_request_write()
1949 fbio->bi_iter.bi_size = r10_bio->sectors << 9; in sync_request_write()
1952 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); in sync_request_write()
1957 tbio = r10_bio->devs[i].bio; in sync_request_write()
1963 if (!r10_bio->devs[i].bio->bi_error) { in sync_request_write()
1968 int sectors = r10_bio->sectors; in sync_request_write()
1981 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
1996 tbio->bi_private = r10_bio; in sync_request_write()
1997 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; in sync_request_write()
2002 d = r10_bio->devs[i].devnum; in sync_request_write()
2004 atomic_inc(&r10_bio->remaining); in sync_request_write()
2018 tbio = r10_bio->devs[i].repl_bio; in sync_request_write()
2021 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2022 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2024 d = r10_bio->devs[i].devnum; in sync_request_write()
2025 atomic_inc(&r10_bio->remaining); in sync_request_write()
2032 if (atomic_dec_and_test(&r10_bio->remaining)) { in sync_request_write()
2033 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2034 put_buf(r10_bio); in sync_request_write()
2048 static void fix_recovery_read_error(struct r10bio *r10_bio) in fix_recovery_read_error() argument
2057 struct mddev *mddev = r10_bio->mddev; in fix_recovery_read_error()
2059 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error()
2061 int sectors = r10_bio->sectors; in fix_recovery_read_error()
2063 int dr = r10_bio->devs[0].devnum; in fix_recovery_read_error()
2064 int dw = r10_bio->devs[1].devnum; in fix_recovery_read_error()
2076 addr = r10_bio->devs[0].addr + sect, in fix_recovery_read_error()
2084 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2108 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2132 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) in recovery_request_write() argument
2138 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) { in recovery_request_write()
2139 fix_recovery_read_error(r10_bio); in recovery_request_write()
2140 end_sync_request(r10_bio); in recovery_request_write()
2148 d = r10_bio->devs[1].devnum; in recovery_request_write()
2149 wbio = r10_bio->devs[1].bio; in recovery_request_write()
2150 wbio2 = r10_bio->devs[1].repl_bio; in recovery_request_write()
2239 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2242 int sectors = r10_bio->sectors; in fix_read_error()
2245 int d = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2272 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; in fix_read_error()
2278 int sl = r10_bio->read_slot; in fix_read_error()
2290 d = r10_bio->devs[sl].devnum; in fix_read_error()
2294 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, in fix_read_error()
2299 r10_bio->devs[sl].addr + in fix_read_error()
2311 } while (!success && sl != r10_bio->read_slot); in fix_read_error()
2319 int dn = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2324 r10_bio->devs[r10_bio->read_slot].addr in fix_read_error()
2328 r10_bio->devs[r10_bio->read_slot].bio in fix_read_error()
2337 while (sl != r10_bio->read_slot) { in fix_read_error()
2343 d = r10_bio->devs[sl].devnum; in fix_read_error()
2352 r10_bio->devs[sl].addr + in fix_read_error()
2364 choose_data_offset(r10_bio, in fix_read_error()
2376 while (sl != r10_bio->read_slot) { in fix_read_error()
2382 d = r10_bio->devs[sl].devnum; in fix_read_error()
2391 r10_bio->devs[sl].addr + in fix_read_error()
2404 choose_data_offset(r10_bio, rdev)), in fix_read_error()
2418 choose_data_offset(r10_bio, rdev)), in fix_read_error()
2433 static int narrow_write_error(struct r10bio *r10_bio, int i) in narrow_write_error() argument
2435 struct bio *bio = r10_bio->master_bio; in narrow_write_error()
2436 struct mddev *mddev = r10_bio->mddev; in narrow_write_error()
2438 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2453 int sect_to_write = r10_bio->sectors; in narrow_write_error()
2461 sector = r10_bio->sector; in narrow_write_error()
2462 sectors = ((r10_bio->sector + block_sectors) in narrow_write_error()
2473 wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ in narrow_write_error()
2474 choose_data_offset(r10_bio, rdev) + in narrow_write_error()
2475 (sector - r10_bio->sector)); in narrow_write_error()
2491 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) in handle_read_error() argument
2493 int slot = r10_bio->read_slot; in handle_read_error()
2496 struct md_rdev *rdev = r10_bio->devs[slot].rdev; in handle_read_error()
2509 bio = r10_bio->devs[slot].bio; in handle_read_error()
2512 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2516 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2519 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
2524 rdev = read_balance(conf, r10_bio, &max_sectors); in handle_read_error()
2529 (unsigned long long)r10_bio->sector); in handle_read_error()
2530 raid_end_bio_io(r10_bio); in handle_read_error()
2534 do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); in handle_read_error()
2535 slot = r10_bio->read_slot; in handle_read_error()
2542 (unsigned long long)r10_bio->sector); in handle_read_error()
2543 bio = bio_clone_mddev(r10_bio->master_bio, in handle_read_error()
2545 bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); in handle_read_error()
2546 r10_bio->devs[slot].bio = bio; in handle_read_error()
2547 r10_bio->devs[slot].rdev = rdev; in handle_read_error()
2548 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr in handle_read_error()
2549 + choose_data_offset(r10_bio, rdev); in handle_read_error()
2552 bio->bi_private = r10_bio; in handle_read_error()
2554 if (max_sectors < r10_bio->sectors) { in handle_read_error()
2556 struct bio *mbio = r10_bio->master_bio; in handle_read_error()
2558 r10_bio->sector + max_sectors in handle_read_error()
2560 r10_bio->sectors = max_sectors; in handle_read_error()
2569 r10_bio = mempool_alloc(conf->r10bio_pool, in handle_read_error()
2571 r10_bio->master_bio = mbio; in handle_read_error()
2572 r10_bio->sectors = bio_sectors(mbio) - sectors_handled; in handle_read_error()
2573 r10_bio->state = 0; in handle_read_error()
2575 &r10_bio->state); in handle_read_error()
2576 r10_bio->mddev = mddev; in handle_read_error()
2577 r10_bio->sector = mbio->bi_iter.bi_sector in handle_read_error()
2585 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) in handle_write_completed() argument
2596 if (test_bit(R10BIO_IsSync, &r10_bio->state) || in handle_write_completed()
2597 test_bit(R10BIO_IsRecover, &r10_bio->state)) { in handle_write_completed()
2599 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2601 if (r10_bio->devs[m].bio == NULL) in handle_write_completed()
2603 if (!r10_bio->devs[m].bio->bi_error) { in handle_write_completed()
2606 r10_bio->devs[m].addr, in handle_write_completed()
2607 r10_bio->sectors, 0); in handle_write_completed()
2611 r10_bio->devs[m].addr, in handle_write_completed()
2612 r10_bio->sectors, 0)) in handle_write_completed()
2616 if (r10_bio->devs[m].repl_bio == NULL) in handle_write_completed()
2619 if (!r10_bio->devs[m].repl_bio->bi_error) { in handle_write_completed()
2622 r10_bio->devs[m].addr, in handle_write_completed()
2623 r10_bio->sectors, 0); in handle_write_completed()
2627 r10_bio->devs[m].addr, in handle_write_completed()
2628 r10_bio->sectors, 0)) in handle_write_completed()
2632 put_buf(r10_bio); in handle_write_completed()
2636 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2637 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed()
2642 r10_bio->devs[m].addr, in handle_write_completed()
2643 r10_bio->sectors, 0); in handle_write_completed()
2647 if (!narrow_write_error(r10_bio, m)) { in handle_write_completed()
2650 &r10_bio->state); in handle_write_completed()
2654 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
2659 r10_bio->devs[m].addr, in handle_write_completed()
2660 r10_bio->sectors, 0); in handle_write_completed()
2666 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); in handle_write_completed()
2672 &r10_bio->state)) in handle_write_completed()
2673 close_write(r10_bio); in handle_write_completed()
2674 raid_end_bio_io(r10_bio); in handle_write_completed()
2682 struct r10bio *r10_bio; in raid10d() local
2702 r10_bio = list_first_entry(&tmp, struct r10bio, in raid10d()
2704 list_del(&r10_bio->retry_list); in raid10d()
2706 set_bit(R10BIO_Degraded, &r10_bio->state); in raid10d()
2709 &r10_bio->state)) in raid10d()
2710 close_write(r10_bio); in raid10d()
2711 raid_end_bio_io(r10_bio); in raid10d()
2725 r10_bio = list_entry(head->prev, struct r10bio, retry_list); in raid10d()
2730 mddev = r10_bio->mddev; in raid10d()
2732 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in raid10d()
2733 test_bit(R10BIO_WriteError, &r10_bio->state)) in raid10d()
2734 handle_write_completed(conf, r10_bio); in raid10d()
2735 else if (test_bit(R10BIO_IsReshape, &r10_bio->state)) in raid10d()
2736 reshape_request_write(mddev, r10_bio); in raid10d()
2737 else if (test_bit(R10BIO_IsSync, &r10_bio->state)) in raid10d()
2738 sync_request_write(mddev, r10_bio); in raid10d()
2739 else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) in raid10d()
2740 recovery_request_write(mddev, r10_bio); in raid10d()
2741 else if (test_bit(R10BIO_ReadError, &r10_bio->state)) in raid10d()
2742 handle_read_error(mddev, r10_bio); in raid10d()
2747 int slot = r10_bio->read_slot; in raid10d()
2748 generic_make_request(r10_bio->devs[slot].bio); in raid10d()
2812 struct r10bio *r10_bio; in sync_request() local
2934 r10_bio = NULL; in sync_request()
2954 rb2 = r10_bio; in sync_request()
2980 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); in sync_request()
2981 r10_bio->state = 0; in sync_request()
2983 atomic_set(&r10_bio->remaining, 0); in sync_request()
2985 r10_bio->master_bio = (struct bio*)rb2; in sync_request()
2988 r10_bio->mddev = mddev; in sync_request()
2989 set_bit(R10BIO_IsRecover, &r10_bio->state); in sync_request()
2990 r10_bio->sector = sect; in sync_request()
2992 raid10_find_phys(conf, r10_bio); in sync_request()
3010 int d = r10_bio->devs[j].devnum; in sync_request()
3021 sector = r10_bio->devs[j].addr; in sync_request()
3035 bio = r10_bio->devs[0].bio; in sync_request()
3039 bio->bi_private = r10_bio; in sync_request()
3042 from_addr = r10_bio->devs[j].addr; in sync_request()
3050 if (r10_bio->devs[k].devnum == i) in sync_request()
3053 to_addr = r10_bio->devs[k].addr; in sync_request()
3054 r10_bio->devs[0].devnum = d; in sync_request()
3055 r10_bio->devs[0].addr = from_addr; in sync_request()
3056 r10_bio->devs[1].devnum = i; in sync_request()
3057 r10_bio->devs[1].addr = to_addr; in sync_request()
3061 bio = r10_bio->devs[1].bio; in sync_request()
3065 bio->bi_private = r10_bio; in sync_request()
3071 atomic_inc(&r10_bio->remaining); in sync_request()
3073 r10_bio->devs[1].bio->bi_end_io = NULL; in sync_request()
3076 bio = r10_bio->devs[1].repl_bio; in sync_request()
3094 bio->bi_private = r10_bio; in sync_request()
3100 atomic_inc(&r10_bio->remaining); in sync_request()
3112 if (r10_bio->devs[k].devnum == i) in sync_request()
3118 r10_bio->devs[k].addr, in sync_request()
3124 r10_bio->devs[k].addr, in sync_request()
3137 put_buf(r10_bio); in sync_request()
3140 r10_bio = rb2; in sync_request()
3145 while (r10_bio) { in sync_request()
3146 struct r10bio *rb2 = r10_bio; in sync_request()
3147 r10_bio = (struct r10bio*) rb2->master_bio; in sync_request()
3169 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); in sync_request()
3170 r10_bio->state = 0; in sync_request()
3172 r10_bio->mddev = mddev; in sync_request()
3173 atomic_set(&r10_bio->remaining, 0); in sync_request()
3177 r10_bio->master_bio = NULL; in sync_request()
3178 r10_bio->sector = sector_nr; in sync_request()
3179 set_bit(R10BIO_IsSync, &r10_bio->state); in sync_request()
3180 raid10_find_phys(conf, r10_bio); in sync_request()
3181 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; in sync_request()
3184 int d = r10_bio->devs[i].devnum; in sync_request()
3188 if (r10_bio->devs[i].repl_bio) in sync_request()
3189 r10_bio->devs[i].repl_bio->bi_end_io = NULL; in sync_request()
3191 bio = r10_bio->devs[i].bio; in sync_request()
3197 sector = r10_bio->devs[i].addr; in sync_request()
3211 atomic_inc(&r10_bio->remaining); in sync_request()
3214 bio->bi_private = r10_bio; in sync_request()
3228 bio = r10_bio->devs[i].repl_bio; in sync_request()
3232 sector = r10_bio->devs[i].addr; in sync_request()
3236 bio->bi_private = r10_bio; in sync_request()
3247 int d = r10_bio->devs[i].devnum; in sync_request()
3248 if (r10_bio->devs[i].bio->bi_end_io) in sync_request()
3251 if (r10_bio->devs[i].repl_bio && in sync_request()
3252 r10_bio->devs[i].repl_bio->bi_end_io) in sync_request()
3257 put_buf(r10_bio); in sync_request()
3295 r10_bio->sectors = nr_sectors; in sync_request()
3302 r10_bio = bio->bi_private; in sync_request()
3303 r10_bio->sectors = nr_sectors; in sync_request()
4190 struct r10bio *r10_bio; in reshape_request() local
4293 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); in reshape_request()
4294 r10_bio->state = 0; in reshape_request()
4296 atomic_set(&r10_bio->remaining, 0); in reshape_request()
4297 r10_bio->mddev = mddev; in reshape_request()
4298 r10_bio->sector = sector_nr; in reshape_request()
4299 set_bit(R10BIO_IsReshape, &r10_bio->state); in reshape_request()
4300 r10_bio->sectors = last - sector_nr + 1; in reshape_request()
4301 rdev = read_balance(conf, r10_bio, &max_sectors); in reshape_request()
4302 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state)); in reshape_request()
4309 mempool_free(r10_bio, conf->r10buf_pool); in reshape_request()
4317 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr in reshape_request()
4319 read_bio->bi_private = r10_bio; in reshape_request()
4326 r10_bio->master_bio = read_bio; in reshape_request()
4327 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; in reshape_request()
4330 __raid10_find_phys(&conf->geo, r10_bio); in reshape_request()
4337 int d = r10_bio->devs[s/2].devnum; in reshape_request()
4341 b = r10_bio->devs[s/2].repl_bio; in reshape_request()
4344 b = r10_bio->devs[s/2].bio; in reshape_request()
4351 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + in reshape_request()
4353 b->bi_private = r10_bio; in reshape_request()
4364 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page; in reshape_request()
4388 r10_bio->sectors = nr_sectors; in reshape_request()
4391 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); in reshape_request()
4392 atomic_inc(&r10_bio->remaining); in reshape_request()
4411 static void end_reshape_request(struct r10bio *r10_bio);
4413 struct r10bio *r10_bio);
4414 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) in reshape_request_write() argument
4424 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) in reshape_request_write()
4425 if (handle_reshape_read_error(mddev, r10_bio) < 0) { in reshape_request_write()
4427 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
4434 atomic_set(&r10_bio->remaining, 1); in reshape_request_write()
4437 int d = r10_bio->devs[s/2].devnum; in reshape_request_write()
4441 b = r10_bio->devs[s/2].repl_bio; in reshape_request_write()
4444 b = r10_bio->devs[s/2].bio; in reshape_request_write()
4449 md_sync_acct(b->bi_bdev, r10_bio->sectors); in reshape_request_write()
4450 atomic_inc(&r10_bio->remaining); in reshape_request_write()
4454 end_reshape_request(r10_bio); in reshape_request_write()
4484 struct r10bio *r10_bio) in handle_reshape_read_error() argument
4487 int sectors = r10_bio->sectors; in handle_reshape_read_error()
4490 struct r10bio r10_bio; in handle_reshape_read_error() member
4493 struct r10bio *r10b = &on_stack.r10_bio; in handle_reshape_read_error()
4496 struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec; in handle_reshape_read_error()
4498 r10b->sector = r10_bio->sector; in handle_reshape_read_error()
4547 struct r10bio *r10_bio = bio->bi_private; in end_reshape_write() local
4548 struct mddev *mddev = r10_bio->mddev; in end_reshape_write()
4555 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_reshape_write()
4569 end_reshape_request(r10_bio); in end_reshape_write()
4572 static void end_reshape_request(struct r10bio *r10_bio) in end_reshape_request() argument
4574 if (!atomic_dec_and_test(&r10_bio->remaining)) in end_reshape_request()
4576 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()
4577 bio_put(r10_bio->master_bio); in end_reshape_request()
4578 put_buf(r10_bio); in end_reshape_request()