Lines Matching refs:r10_bio
103 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
117 static void r10bio_pool_free(void *r10_bio, void *data) in r10bio_pool_free() argument
119 kfree(r10_bio); in r10bio_pool_free()
141 struct r10bio *r10_bio; in r10buf_pool_alloc() local
146 r10_bio = r10bio_pool_alloc(gfp_flags, conf); in r10buf_pool_alloc()
147 if (!r10_bio) in r10buf_pool_alloc()
163 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
169 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
176 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
177 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
183 struct bio *rbio = r10_bio->devs[0].bio; in r10buf_pool_alloc()
197 return r10_bio; in r10buf_pool_alloc()
204 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); in r10buf_pool_alloc()
208 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
209 bio_put(r10_bio->devs[j].bio); in r10buf_pool_alloc()
210 if (r10_bio->devs[j].repl_bio) in r10buf_pool_alloc()
211 bio_put(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
213 r10bio_pool_free(r10_bio, conf); in r10buf_pool_alloc()
240 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) in put_all_bios() argument
245 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios()
249 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
250 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio)) in put_all_bios()
256 static void free_r10bio(struct r10bio *r10_bio) in free_r10bio() argument
258 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio()
260 put_all_bios(conf, r10_bio); in free_r10bio()
261 mempool_free(r10_bio, conf->r10bio_pool); in free_r10bio()
264 static void put_buf(struct r10bio *r10_bio) in put_buf() argument
266 struct r10conf *conf = r10_bio->mddev->private; in put_buf()
268 mempool_free(r10_bio, conf->r10buf_pool); in put_buf()
273 static void reschedule_retry(struct r10bio *r10_bio) in reschedule_retry() argument
276 struct mddev *mddev = r10_bio->mddev; in reschedule_retry()
280 list_add(&r10_bio->retry_list, &conf->retry_list); in reschedule_retry()
295 static void raid_end_bio_io(struct r10bio *r10_bio) in raid_end_bio_io() argument
297 struct bio *bio = r10_bio->master_bio; in raid_end_bio_io()
299 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io()
309 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) in raid_end_bio_io()
319 free_r10bio(r10_bio); in raid_end_bio_io()
325 static inline void update_head_pos(int slot, struct r10bio *r10_bio) in update_head_pos() argument
327 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos()
329 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
330 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
336 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, in find_bio_disk() argument
343 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
345 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
352 update_head_pos(slot, r10_bio); in find_bio_disk()
358 return r10_bio->devs[slot].devnum; in find_bio_disk()
364 struct r10bio *r10_bio = bio->bi_private; in raid10_end_read_request() local
367 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request()
369 slot = r10_bio->read_slot; in raid10_end_read_request()
370 dev = r10_bio->devs[slot].devnum; in raid10_end_read_request()
371 rdev = r10_bio->devs[slot].rdev; in raid10_end_read_request()
375 update_head_pos(slot, r10_bio); in raid10_end_read_request()
387 set_bit(R10BIO_Uptodate, &r10_bio->state); in raid10_end_read_request()
394 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), in raid10_end_read_request()
399 raid_end_bio_io(r10_bio); in raid10_end_read_request()
410 (unsigned long long)r10_bio->sector); in raid10_end_read_request()
411 set_bit(R10BIO_ReadError, &r10_bio->state); in raid10_end_read_request()
412 reschedule_retry(r10_bio); in raid10_end_read_request()
416 static void close_write(struct r10bio *r10_bio) in close_write() argument
419 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, in close_write()
420 r10_bio->sectors, in close_write()
421 !test_bit(R10BIO_Degraded, &r10_bio->state), in close_write()
423 md_write_end(r10_bio->mddev); in close_write()
426 static void one_write_done(struct r10bio *r10_bio) in one_write_done() argument
428 if (atomic_dec_and_test(&r10_bio->remaining)) { in one_write_done()
429 if (test_bit(R10BIO_WriteError, &r10_bio->state)) in one_write_done()
430 reschedule_retry(r10_bio); in one_write_done()
432 close_write(r10_bio); in one_write_done()
433 if (test_bit(R10BIO_MadeGood, &r10_bio->state)) in one_write_done()
434 reschedule_retry(r10_bio); in one_write_done()
436 raid_end_bio_io(r10_bio); in one_write_done()
444 struct r10bio *r10_bio = bio->bi_private; in raid10_end_write_request() local
447 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request()
451 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_write_request()
474 set_bit(R10BIO_WriteError, &r10_bio->state); in raid10_end_write_request()
500 set_bit(R10BIO_Uptodate, &r10_bio->state); in raid10_end_write_request()
504 r10_bio->devs[slot].addr, in raid10_end_write_request()
505 r10_bio->sectors, in raid10_end_write_request()
509 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; in raid10_end_write_request()
511 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
513 set_bit(R10BIO_MadeGood, &r10_bio->state); in raid10_end_write_request()
522 one_write_done(r10_bio); in raid10_end_write_request()
715 struct r10bio r10_bio; in raid10_mergeable_bvec() member
718 struct r10bio *r10_bio = &on_stack.r10_bio; in raid10_mergeable_bvec() local
726 r10_bio->sector = sector; in raid10_mergeable_bvec()
727 raid10_find_phys(conf, r10_bio); in raid10_mergeable_bvec()
730 int disk = r10_bio->devs[s].devnum; in raid10_mergeable_bvec()
737 bvm->bi_sector = r10_bio->devs[s].addr in raid10_mergeable_bvec()
749 bvm->bi_sector = r10_bio->devs[s].addr in raid10_mergeable_bvec()
782 struct r10bio *r10_bio, in read_balance() argument
785 const sector_t this_sector = r10_bio->sector; in read_balance()
787 int sectors = r10_bio->sectors; in read_balance()
795 raid10_find_phys(conf, r10_bio); in read_balance()
798 sectors = r10_bio->sectors; in read_balance()
819 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
821 disk = r10_bio->devs[slot].devnum; in read_balance()
825 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
832 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
835 dev_sector = r10_bio->devs[slot].addr; in read_balance()
879 new_distance = r10_bio->devs[slot].addr; in read_balance()
881 new_distance = abs(r10_bio->devs[slot].addr - in read_balance()
903 r10_bio->read_slot = slot; in read_balance()
1090 static sector_t choose_data_offset(struct r10bio *r10_bio, in choose_data_offset() argument
1094 test_bit(R10BIO_Previous, &r10_bio->state)) in choose_data_offset()
1147 struct r10bio *r10_bio; in __make_request() local
1203 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); in __make_request()
1205 r10_bio->master_bio = bio; in __make_request()
1206 r10_bio->sectors = sectors; in __make_request()
1208 r10_bio->mddev = mddev; in __make_request()
1209 r10_bio->sector = bio->bi_iter.bi_sector; in __make_request()
1210 r10_bio->state = 0; in __make_request()
1230 rdev = read_balance(conf, r10_bio, &max_sectors); in __make_request()
1232 raid_end_bio_io(r10_bio); in __make_request()
1235 slot = r10_bio->read_slot; in __make_request()
1238 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
1241 r10_bio->devs[slot].bio = read_bio; in __make_request()
1242 r10_bio->devs[slot].rdev = rdev; in __make_request()
1244 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + in __make_request()
1245 choose_data_offset(r10_bio, rdev); in __make_request()
1249 read_bio->bi_private = r10_bio; in __make_request()
1251 if (max_sectors < r10_bio->sectors) { in __make_request()
1255 sectors_handled = (r10_bio->sector + max_sectors in __make_request()
1257 r10_bio->sectors = max_sectors; in __make_request()
1269 reschedule_retry(r10_bio); in __make_request()
1271 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); in __make_request()
1273 r10_bio->master_bio = bio; in __make_request()
1274 r10_bio->sectors = bio_sectors(bio) - sectors_handled; in __make_request()
1275 r10_bio->state = 0; in __make_request()
1276 r10_bio->mddev = mddev; in __make_request()
1277 r10_bio->sector = bio->bi_iter.bi_sector + in __make_request()
1305 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ in __make_request()
1306 raid10_find_phys(conf, r10_bio); in __make_request()
1310 max_sectors = r10_bio->sectors; in __make_request()
1313 int d = r10_bio->devs[i].devnum; in __make_request()
1336 r10_bio->devs[i].bio = NULL; in __make_request()
1337 r10_bio->devs[i].repl_bio = NULL; in __make_request()
1340 set_bit(R10BIO_Degraded, &r10_bio->state); in __make_request()
1345 sector_t dev_sector = r10_bio->devs[i].addr; in __make_request()
1386 r10_bio->devs[i].bio = bio; in __make_request()
1390 r10_bio->devs[i].repl_bio = bio; in __make_request()
1402 if (r10_bio->devs[j].bio) { in __make_request()
1403 d = r10_bio->devs[j].devnum; in __make_request()
1406 if (r10_bio->devs[j].repl_bio) { in __make_request()
1408 d = r10_bio->devs[j].devnum; in __make_request()
1424 if (max_sectors < r10_bio->sectors) { in __make_request()
1428 r10_bio->sectors = max_sectors; in __make_request()
1436 sectors_handled = r10_bio->sector + max_sectors - in __make_request()
1439 atomic_set(&r10_bio->remaining, 1); in __make_request()
1440 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); in __make_request()
1444 int d = r10_bio->devs[i].devnum; in __make_request()
1445 if (r10_bio->devs[i].bio) { in __make_request()
1448 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
1450 r10_bio->devs[i].bio = mbio; in __make_request()
1452 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ in __make_request()
1453 choose_data_offset(r10_bio, in __make_request()
1459 mbio->bi_private = r10_bio; in __make_request()
1461 atomic_inc(&r10_bio->remaining); in __make_request()
1483 if (r10_bio->devs[i].repl_bio) { in __make_request()
1491 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
1493 r10_bio->devs[i].repl_bio = mbio; in __make_request()
1495 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + in __make_request()
1497 r10_bio, rdev)); in __make_request()
1502 mbio->bi_private = r10_bio; in __make_request()
1504 atomic_inc(&r10_bio->remaining); in __make_request()
1519 one_write_done(r10_bio); in __make_request()
1523 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); in __make_request()
1525 r10_bio->master_bio = bio; in __make_request()
1526 r10_bio->sectors = bio_sectors(bio) - sectors_handled; in __make_request()
1528 r10_bio->mddev = mddev; in __make_request()
1529 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; in __make_request()
1530 r10_bio->state = 0; in __make_request()
1533 one_write_done(r10_bio); in __make_request()
1921 struct r10bio *r10_bio = bio->bi_private; in end_sync_read() local
1922 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read()
1925 if (bio == r10_bio->master_bio) { in end_sync_read()
1927 d = r10_bio->read_slot; /* really the read dev */ in end_sync_read()
1929 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); in end_sync_read()
1932 set_bit(R10BIO_Uptodate, &r10_bio->state); in end_sync_read()
1937 atomic_add(r10_bio->sectors, in end_sync_read()
1944 if (test_bit(R10BIO_IsRecover, &r10_bio->state) || in end_sync_read()
1945 atomic_dec_and_test(&r10_bio->remaining)) { in end_sync_read()
1949 reschedule_retry(r10_bio); in end_sync_read()
1953 static void end_sync_request(struct r10bio *r10_bio) in end_sync_request() argument
1955 struct mddev *mddev = r10_bio->mddev; in end_sync_request()
1957 while (atomic_dec_and_test(&r10_bio->remaining)) { in end_sync_request()
1958 if (r10_bio->master_bio == NULL) { in end_sync_request()
1960 sector_t s = r10_bio->sectors; in end_sync_request()
1961 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in end_sync_request()
1962 test_bit(R10BIO_WriteError, &r10_bio->state)) in end_sync_request()
1963 reschedule_retry(r10_bio); in end_sync_request()
1965 put_buf(r10_bio); in end_sync_request()
1969 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio; in end_sync_request()
1970 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in end_sync_request()
1971 test_bit(R10BIO_WriteError, &r10_bio->state)) in end_sync_request()
1972 reschedule_retry(r10_bio); in end_sync_request()
1974 put_buf(r10_bio); in end_sync_request()
1975 r10_bio = r10_bio2; in end_sync_request()
1983 struct r10bio *r10_bio = bio->bi_private; in end_sync_write() local
1984 struct mddev *mddev = r10_bio->mddev; in end_sync_write()
1993 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_sync_write()
2007 set_bit(R10BIO_WriteError, &r10_bio->state); in end_sync_write()
2010 r10_bio->devs[slot].addr, in end_sync_write()
2011 r10_bio->sectors, in end_sync_write()
2013 set_bit(R10BIO_MadeGood, &r10_bio->state); in end_sync_write()
2017 end_sync_request(r10_bio); in end_sync_write()
2036 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) in sync_request_write() argument
2043 atomic_set(&r10_bio->remaining, 1); in sync_request_write()
2047 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) in sync_request_write()
2054 fbio = r10_bio->devs[i].bio; in sync_request_write()
2056 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); in sync_request_write()
2061 tbio = r10_bio->devs[i].bio; in sync_request_write()
2067 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) { in sync_request_write()
2072 int sectors = r10_bio->sectors; in sync_request_write()
2085 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
2098 tbio->bi_iter.bi_size = r10_bio->sectors << 9; in sync_request_write()
2100 tbio->bi_private = r10_bio; in sync_request_write()
2101 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; in sync_request_write()
2113 d = r10_bio->devs[i].devnum; in sync_request_write()
2115 atomic_inc(&r10_bio->remaining); in sync_request_write()
2129 tbio = r10_bio->devs[i].repl_bio; in sync_request_write()
2132 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2133 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2138 d = r10_bio->devs[i].devnum; in sync_request_write()
2139 atomic_inc(&r10_bio->remaining); in sync_request_write()
2146 if (atomic_dec_and_test(&r10_bio->remaining)) { in sync_request_write()
2147 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2148 put_buf(r10_bio); in sync_request_write()
2162 static void fix_recovery_read_error(struct r10bio *r10_bio) in fix_recovery_read_error() argument
2171 struct mddev *mddev = r10_bio->mddev; in fix_recovery_read_error()
2173 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error()
2175 int sectors = r10_bio->sectors; in fix_recovery_read_error()
2177 int dr = r10_bio->devs[0].devnum; in fix_recovery_read_error()
2178 int dw = r10_bio->devs[1].devnum; in fix_recovery_read_error()
2190 addr = r10_bio->devs[0].addr + sect, in fix_recovery_read_error()
2198 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2222 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2246 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) in recovery_request_write() argument
2252 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) { in recovery_request_write()
2253 fix_recovery_read_error(r10_bio); in recovery_request_write()
2254 end_sync_request(r10_bio); in recovery_request_write()
2262 d = r10_bio->devs[1].devnum; in recovery_request_write()
2263 wbio = r10_bio->devs[1].bio; in recovery_request_write()
2264 wbio2 = r10_bio->devs[1].repl_bio; in recovery_request_write()
2353 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2356 int sectors = r10_bio->sectors; in fix_read_error()
2359 int d = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2386 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; in fix_read_error()
2392 int sl = r10_bio->read_slot; in fix_read_error()
2404 d = r10_bio->devs[sl].devnum; in fix_read_error()
2409 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, in fix_read_error()
2414 r10_bio->devs[sl].addr + in fix_read_error()
2426 } while (!success && sl != r10_bio->read_slot); in fix_read_error()
2434 int dn = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2439 r10_bio->devs[r10_bio->read_slot].addr in fix_read_error()
2443 r10_bio->devs[r10_bio->read_slot].bio in fix_read_error()
2452 while (sl != r10_bio->read_slot) { in fix_read_error()
2458 d = r10_bio->devs[sl].devnum; in fix_read_error()
2468 r10_bio->devs[sl].addr + in fix_read_error()
2480 choose_data_offset(r10_bio, in fix_read_error()
2492 while (sl != r10_bio->read_slot) { in fix_read_error()
2498 d = r10_bio->devs[sl].devnum; in fix_read_error()
2507 r10_bio->devs[sl].addr + in fix_read_error()
2520 choose_data_offset(r10_bio, rdev)), in fix_read_error()
2534 choose_data_offset(r10_bio, rdev)), in fix_read_error()
2549 static int narrow_write_error(struct r10bio *r10_bio, int i) in narrow_write_error() argument
2551 struct bio *bio = r10_bio->master_bio; in narrow_write_error()
2552 struct mddev *mddev = r10_bio->mddev; in narrow_write_error()
2554 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2569 int sect_to_write = r10_bio->sectors; in narrow_write_error()
2577 sector = r10_bio->sector; in narrow_write_error()
2578 sectors = ((r10_bio->sector + block_sectors) in narrow_write_error()
2589 wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ in narrow_write_error()
2590 choose_data_offset(r10_bio, rdev) + in narrow_write_error()
2591 (sector - r10_bio->sector)); in narrow_write_error()
2607 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) in handle_read_error() argument
2609 int slot = r10_bio->read_slot; in handle_read_error()
2612 struct md_rdev *rdev = r10_bio->devs[slot].rdev; in handle_read_error()
2625 bio = r10_bio->devs[slot].bio; in handle_read_error()
2628 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2632 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2635 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
2640 rdev = read_balance(conf, r10_bio, &max_sectors); in handle_read_error()
2645 (unsigned long long)r10_bio->sector); in handle_read_error()
2646 raid_end_bio_io(r10_bio); in handle_read_error()
2650 do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); in handle_read_error()
2651 slot = r10_bio->read_slot; in handle_read_error()
2658 (unsigned long long)r10_bio->sector); in handle_read_error()
2659 bio = bio_clone_mddev(r10_bio->master_bio, in handle_read_error()
2661 bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); in handle_read_error()
2662 r10_bio->devs[slot].bio = bio; in handle_read_error()
2663 r10_bio->devs[slot].rdev = rdev; in handle_read_error()
2664 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr in handle_read_error()
2665 + choose_data_offset(r10_bio, rdev); in handle_read_error()
2668 bio->bi_private = r10_bio; in handle_read_error()
2670 if (max_sectors < r10_bio->sectors) { in handle_read_error()
2672 struct bio *mbio = r10_bio->master_bio; in handle_read_error()
2674 r10_bio->sector + max_sectors in handle_read_error()
2676 r10_bio->sectors = max_sectors; in handle_read_error()
2685 r10_bio = mempool_alloc(conf->r10bio_pool, in handle_read_error()
2687 r10_bio->master_bio = mbio; in handle_read_error()
2688 r10_bio->sectors = bio_sectors(mbio) - sectors_handled; in handle_read_error()
2689 r10_bio->state = 0; in handle_read_error()
2691 &r10_bio->state); in handle_read_error()
2692 r10_bio->mddev = mddev; in handle_read_error()
2693 r10_bio->sector = mbio->bi_iter.bi_sector in handle_read_error()
2701 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) in handle_write_completed() argument
2712 if (test_bit(R10BIO_IsSync, &r10_bio->state) || in handle_write_completed()
2713 test_bit(R10BIO_IsRecover, &r10_bio->state)) { in handle_write_completed()
2715 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2717 if (r10_bio->devs[m].bio == NULL) in handle_write_completed()
2720 &r10_bio->devs[m].bio->bi_flags)) { in handle_write_completed()
2723 r10_bio->devs[m].addr, in handle_write_completed()
2724 r10_bio->sectors, 0); in handle_write_completed()
2728 r10_bio->devs[m].addr, in handle_write_completed()
2729 r10_bio->sectors, 0)) in handle_write_completed()
2733 if (r10_bio->devs[m].repl_bio == NULL) in handle_write_completed()
2736 &r10_bio->devs[m].repl_bio->bi_flags)) { in handle_write_completed()
2739 r10_bio->devs[m].addr, in handle_write_completed()
2740 r10_bio->sectors, 0); in handle_write_completed()
2744 r10_bio->devs[m].addr, in handle_write_completed()
2745 r10_bio->sectors, 0)) in handle_write_completed()
2749 put_buf(r10_bio); in handle_write_completed()
2752 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2753 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed()
2758 r10_bio->devs[m].addr, in handle_write_completed()
2759 r10_bio->sectors, 0); in handle_write_completed()
2763 if (!narrow_write_error(r10_bio, m)) { in handle_write_completed()
2766 &r10_bio->state); in handle_write_completed()
2770 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
2775 r10_bio->devs[m].addr, in handle_write_completed()
2776 r10_bio->sectors, 0); in handle_write_completed()
2781 &r10_bio->state)) in handle_write_completed()
2782 close_write(r10_bio); in handle_write_completed()
2783 raid_end_bio_io(r10_bio); in handle_write_completed()
2790 struct r10bio *r10_bio; in raid10d() local
2808 r10_bio = list_entry(head->prev, struct r10bio, retry_list); in raid10d()
2813 mddev = r10_bio->mddev; in raid10d()
2815 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in raid10d()
2816 test_bit(R10BIO_WriteError, &r10_bio->state)) in raid10d()
2817 handle_write_completed(conf, r10_bio); in raid10d()
2818 else if (test_bit(R10BIO_IsReshape, &r10_bio->state)) in raid10d()
2819 reshape_request_write(mddev, r10_bio); in raid10d()
2820 else if (test_bit(R10BIO_IsSync, &r10_bio->state)) in raid10d()
2821 sync_request_write(mddev, r10_bio); in raid10d()
2822 else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) in raid10d()
2823 recovery_request_write(mddev, r10_bio); in raid10d()
2824 else if (test_bit(R10BIO_ReadError, &r10_bio->state)) in raid10d()
2825 handle_read_error(mddev, r10_bio); in raid10d()
2830 int slot = r10_bio->read_slot; in raid10d()
2831 generic_make_request(r10_bio->devs[slot].bio); in raid10d()
2895 struct r10bio *r10_bio; in sync_request() local
3017 r10_bio = NULL; in sync_request()
3037 rb2 = r10_bio; in sync_request()
3063 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); in sync_request()
3064 r10_bio->state = 0; in sync_request()
3066 atomic_set(&r10_bio->remaining, 0); in sync_request()
3068 r10_bio->master_bio = (struct bio*)rb2; in sync_request()
3071 r10_bio->mddev = mddev; in sync_request()
3072 set_bit(R10BIO_IsRecover, &r10_bio->state); in sync_request()
3073 r10_bio->sector = sect; in sync_request()
3075 raid10_find_phys(conf, r10_bio); in sync_request()
3093 int d = r10_bio->devs[j].devnum; in sync_request()
3104 sector = r10_bio->devs[j].addr; in sync_request()
3118 bio = r10_bio->devs[0].bio; in sync_request()
3122 bio->bi_private = r10_bio; in sync_request()
3125 from_addr = r10_bio->devs[j].addr; in sync_request()
3133 if (r10_bio->devs[k].devnum == i) in sync_request()
3136 to_addr = r10_bio->devs[k].addr; in sync_request()
3137 r10_bio->devs[0].devnum = d; in sync_request()
3138 r10_bio->devs[0].addr = from_addr; in sync_request()
3139 r10_bio->devs[1].devnum = i; in sync_request()
3140 r10_bio->devs[1].addr = to_addr; in sync_request()
3144 bio = r10_bio->devs[1].bio; in sync_request()
3148 bio->bi_private = r10_bio; in sync_request()
3154 atomic_inc(&r10_bio->remaining); in sync_request()
3156 r10_bio->devs[1].bio->bi_end_io = NULL; in sync_request()
3159 bio = r10_bio->devs[1].repl_bio; in sync_request()
3177 bio->bi_private = r10_bio; in sync_request()
3183 atomic_inc(&r10_bio->remaining); in sync_request()
3195 if (r10_bio->devs[k].devnum == i) in sync_request()
3201 r10_bio->devs[k].addr, in sync_request()
3207 r10_bio->devs[k].addr, in sync_request()
3220 put_buf(r10_bio); in sync_request()
3223 r10_bio = rb2; in sync_request()
3228 while (r10_bio) { in sync_request()
3229 struct r10bio *rb2 = r10_bio; in sync_request()
3230 r10_bio = (struct r10bio*) rb2->master_bio; in sync_request()
3252 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); in sync_request()
3253 r10_bio->state = 0; in sync_request()
3255 r10_bio->mddev = mddev; in sync_request()
3256 atomic_set(&r10_bio->remaining, 0); in sync_request()
3260 r10_bio->master_bio = NULL; in sync_request()
3261 r10_bio->sector = sector_nr; in sync_request()
3262 set_bit(R10BIO_IsSync, &r10_bio->state); in sync_request()
3263 raid10_find_phys(conf, r10_bio); in sync_request()
3264 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; in sync_request()
3267 int d = r10_bio->devs[i].devnum; in sync_request()
3271 if (r10_bio->devs[i].repl_bio) in sync_request()
3272 r10_bio->devs[i].repl_bio->bi_end_io = NULL; in sync_request()
3274 bio = r10_bio->devs[i].bio; in sync_request()
3280 sector = r10_bio->devs[i].addr; in sync_request()
3294 atomic_inc(&r10_bio->remaining); in sync_request()
3297 bio->bi_private = r10_bio; in sync_request()
3311 bio = r10_bio->devs[i].repl_bio; in sync_request()
3315 sector = r10_bio->devs[i].addr; in sync_request()
3319 bio->bi_private = r10_bio; in sync_request()
3330 int d = r10_bio->devs[i].devnum; in sync_request()
3331 if (r10_bio->devs[i].bio->bi_end_io) in sync_request()
3334 if (r10_bio->devs[i].repl_bio && in sync_request()
3335 r10_bio->devs[i].repl_bio->bi_end_io) in sync_request()
3340 put_buf(r10_bio); in sync_request()
3378 r10_bio->sectors = nr_sectors; in sync_request()
3385 r10_bio = bio->bi_private; in sync_request()
3386 r10_bio->sectors = nr_sectors; in sync_request()
4262 struct r10bio *r10_bio; in reshape_request() local
4365 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); in reshape_request()
4366 r10_bio->state = 0; in reshape_request()
4368 atomic_set(&r10_bio->remaining, 0); in reshape_request()
4369 r10_bio->mddev = mddev; in reshape_request()
4370 r10_bio->sector = sector_nr; in reshape_request()
4371 set_bit(R10BIO_IsReshape, &r10_bio->state); in reshape_request()
4372 r10_bio->sectors = last - sector_nr + 1; in reshape_request()
4373 rdev = read_balance(conf, r10_bio, &max_sectors); in reshape_request()
4374 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state)); in reshape_request()
4381 mempool_free(r10_bio, conf->r10buf_pool); in reshape_request()
4389 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr in reshape_request()
4391 read_bio->bi_private = r10_bio; in reshape_request()
4398 r10_bio->master_bio = read_bio; in reshape_request()
4399 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; in reshape_request()
4402 __raid10_find_phys(&conf->geo, r10_bio); in reshape_request()
4409 int d = r10_bio->devs[s/2].devnum; in reshape_request()
4413 b = r10_bio->devs[s/2].repl_bio; in reshape_request()
4416 b = r10_bio->devs[s/2].bio; in reshape_request()
4423 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + in reshape_request()
4425 b->bi_private = r10_bio; in reshape_request()
4436 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page; in reshape_request()
4460 r10_bio->sectors = nr_sectors; in reshape_request()
4463 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); in reshape_request()
4464 atomic_inc(&r10_bio->remaining); in reshape_request()
4483 static void end_reshape_request(struct r10bio *r10_bio);
4485 struct r10bio *r10_bio);
4486 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) in reshape_request_write() argument
4496 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) in reshape_request_write()
4497 if (handle_reshape_read_error(mddev, r10_bio) < 0) { in reshape_request_write()
4499 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
4506 atomic_set(&r10_bio->remaining, 1); in reshape_request_write()
4509 int d = r10_bio->devs[s/2].devnum; in reshape_request_write()
4513 b = r10_bio->devs[s/2].repl_bio; in reshape_request_write()
4516 b = r10_bio->devs[s/2].bio; in reshape_request_write()
4521 md_sync_acct(b->bi_bdev, r10_bio->sectors); in reshape_request_write()
4522 atomic_inc(&r10_bio->remaining); in reshape_request_write()
4526 end_reshape_request(r10_bio); in reshape_request_write()
4556 struct r10bio *r10_bio) in handle_reshape_read_error() argument
4559 int sectors = r10_bio->sectors; in handle_reshape_read_error()
4562 struct r10bio r10_bio; in handle_reshape_read_error() member
4565 struct r10bio *r10b = &on_stack.r10_bio; in handle_reshape_read_error()
4568 struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec; in handle_reshape_read_error()
4570 r10b->sector = r10_bio->sector; in handle_reshape_read_error()
4620 struct r10bio *r10_bio = bio->bi_private; in end_reshape_write() local
4621 struct mddev *mddev = r10_bio->mddev; in end_reshape_write()
4628 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_reshape_write()
4642 end_reshape_request(r10_bio); in end_reshape_write()
4645 static void end_reshape_request(struct r10bio *r10_bio) in end_reshape_request() argument
4647 if (!atomic_dec_and_test(&r10_bio->remaining)) in end_reshape_request()
4649 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()
4650 bio_put(r10_bio->master_bio); in end_reshape_request()
4651 put_buf(r10_bio); in end_reshape_request()