Lines Matching refs:r1_bio

82 static void r1bio_pool_free(void *r1_bio, void *data)  in r1bio_pool_free()  argument
84 kfree(r1_bio); in r1bio_pool_free()
100 struct r1bio *r1_bio; in r1buf_pool_alloc() local
105 r1_bio = r1bio_pool_alloc(gfp_flags, pi); in r1buf_pool_alloc()
106 if (!r1_bio) in r1buf_pool_alloc()
116 r1_bio->bios[j] = bio; in r1buf_pool_alloc()
129 bio = r1_bio->bios[j]; in r1buf_pool_alloc()
139 r1_bio->bios[j]->bi_io_vec[i].bv_page = in r1buf_pool_alloc()
140 r1_bio->bios[0]->bi_io_vec[i].bv_page; in r1buf_pool_alloc()
143 r1_bio->master_bio = NULL; in r1buf_pool_alloc()
145 return r1_bio; in r1buf_pool_alloc()
151 bio_for_each_segment_all(bv, r1_bio->bios[j], i) in r1buf_pool_alloc()
157 bio_put(r1_bio->bios[j]); in r1buf_pool_alloc()
158 r1bio_pool_free(r1_bio, data); in r1buf_pool_alloc()
181 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) in put_all_bios() argument
186 struct bio **bio = r1_bio->bios + i; in put_all_bios()
193 static void free_r1bio(struct r1bio *r1_bio) in free_r1bio() argument
195 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio()
197 put_all_bios(conf, r1_bio); in free_r1bio()
198 mempool_free(r1_bio, conf->r1bio_pool); in free_r1bio()
201 static void put_buf(struct r1bio *r1_bio) in put_buf() argument
203 struct r1conf *conf = r1_bio->mddev->private; in put_buf()
207 struct bio *bio = r1_bio->bios[i]; in put_buf()
209 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
212 mempool_free(r1_bio, conf->r1buf_pool); in put_buf()
217 static void reschedule_retry(struct r1bio *r1_bio) in reschedule_retry() argument
220 struct mddev *mddev = r1_bio->mddev; in reschedule_retry()
224 list_add(&r1_bio->retry_list, &conf->retry_list); in reschedule_retry()
237 static void call_bio_endio(struct r1bio *r1_bio) in call_bio_endio() argument
239 struct bio *bio = r1_bio->master_bio; in call_bio_endio()
241 struct r1conf *conf = r1_bio->mddev->private; in call_bio_endio()
242 sector_t start_next_window = r1_bio->start_next_window; in call_bio_endio()
259 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) in call_bio_endio()
272 static void raid_end_bio_io(struct r1bio *r1_bio) in raid_end_bio_io() argument
274 struct bio *bio = r1_bio->master_bio; in raid_end_bio_io()
277 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { in raid_end_bio_io()
283 call_bio_endio(r1_bio); in raid_end_bio_io()
285 free_r1bio(r1_bio); in raid_end_bio_io()
291 static inline void update_head_pos(int disk, struct r1bio *r1_bio) in update_head_pos() argument
293 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos()
296 r1_bio->sector + (r1_bio->sectors); in update_head_pos()
302 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) in find_bio_disk() argument
305 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk()
309 if (r1_bio->bios[mirror] == bio) in find_bio_disk()
313 update_head_pos(mirror, r1_bio); in find_bio_disk()
321 struct r1bio *r1_bio = bio->bi_private; in raid1_end_read_request() local
323 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_read_request()
325 mirror = r1_bio->read_disk; in raid1_end_read_request()
329 update_head_pos(mirror, r1_bio); in raid1_end_read_request()
332 set_bit(R1BIO_Uptodate, &r1_bio->state); in raid1_end_read_request()
340 if (r1_bio->mddev->degraded == conf->raid_disks || in raid1_end_read_request()
341 (r1_bio->mddev->degraded == conf->raid_disks-1 && in raid1_end_read_request()
348 raid_end_bio_io(r1_bio); in raid1_end_read_request()
361 (unsigned long long)r1_bio->sector); in raid1_end_read_request()
362 set_bit(R1BIO_ReadError, &r1_bio->state); in raid1_end_read_request()
363 reschedule_retry(r1_bio); in raid1_end_read_request()
368 static void close_write(struct r1bio *r1_bio) in close_write() argument
371 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { in close_write()
373 int i = r1_bio->behind_page_count; in close_write()
375 safe_put_page(r1_bio->behind_bvecs[i].bv_page); in close_write()
376 kfree(r1_bio->behind_bvecs); in close_write()
377 r1_bio->behind_bvecs = NULL; in close_write()
380 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, in close_write()
381 r1_bio->sectors, in close_write()
382 !test_bit(R1BIO_Degraded, &r1_bio->state), in close_write()
383 test_bit(R1BIO_BehindIO, &r1_bio->state)); in close_write()
384 md_write_end(r1_bio->mddev); in close_write()
387 static void r1_bio_write_done(struct r1bio *r1_bio) in r1_bio_write_done() argument
389 if (!atomic_dec_and_test(&r1_bio->remaining)) in r1_bio_write_done()
392 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in r1_bio_write_done()
393 reschedule_retry(r1_bio); in r1_bio_write_done()
395 close_write(r1_bio); in r1_bio_write_done()
396 if (test_bit(R1BIO_MadeGood, &r1_bio->state)) in r1_bio_write_done()
397 reschedule_retry(r1_bio); in r1_bio_write_done()
399 raid_end_bio_io(r1_bio); in r1_bio_write_done()
405 struct r1bio *r1_bio = bio->bi_private; in raid1_end_write_request() local
406 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); in raid1_end_write_request()
407 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_write_request()
410 mirror = find_bio_disk(r1_bio, bio); in raid1_end_write_request()
423 set_bit(R1BIO_WriteError, &r1_bio->state); in raid1_end_write_request()
438 r1_bio->bios[mirror] = NULL; in raid1_end_write_request()
450 set_bit(R1BIO_Uptodate, &r1_bio->state); in raid1_end_write_request()
454 r1_bio->sector, r1_bio->sectors, in raid1_end_write_request()
456 r1_bio->bios[mirror] = IO_MADE_GOOD; in raid1_end_write_request()
457 set_bit(R1BIO_MadeGood, &r1_bio->state); in raid1_end_write_request()
463 atomic_dec(&r1_bio->behind_remaining); in raid1_end_write_request()
472 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && in raid1_end_write_request()
473 test_bit(R1BIO_Uptodate, &r1_bio->state)) { in raid1_end_write_request()
475 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { in raid1_end_write_request()
476 struct bio *mbio = r1_bio->master_bio; in raid1_end_write_request()
481 call_bio_endio(r1_bio); in raid1_end_write_request()
485 if (r1_bio->bios[mirror] == NULL) in raid1_end_write_request()
493 r1_bio_write_done(r1_bio); in raid1_end_write_request()
513 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) in read_balance() argument
515 const sector_t this_sector = r1_bio->sector; in read_balance()
534 sectors = r1_bio->sectors; in read_balance()
560 if (r1_bio->bios[disk] == IO_BLOCKED in read_balance()
970 static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio) in alloc_behind_pages() argument
989 r1_bio->behind_bvecs = bvecs; in alloc_behind_pages()
990 r1_bio->behind_page_count = bio->bi_vcnt; in alloc_behind_pages()
991 set_bit(R1BIO_BehindIO, &r1_bio->state); in alloc_behind_pages()
1051 struct r1bio *r1_bio; in make_request() local
1113 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); in make_request()
1115 r1_bio->master_bio = bio; in make_request()
1116 r1_bio->sectors = bio_sectors(bio); in make_request()
1117 r1_bio->state = 0; in make_request()
1118 r1_bio->mddev = mddev; in make_request()
1119 r1_bio->sector = bio->bi_iter.bi_sector; in make_request()
1138 rdisk = read_balance(conf, r1_bio, &max_sectors); in make_request()
1142 raid_end_bio_io(r1_bio); in make_request()
1156 r1_bio->read_disk = rdisk; in make_request()
1157 r1_bio->start_next_window = 0; in make_request()
1160 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, in make_request()
1163 r1_bio->bios[rdisk] = read_bio; in make_request()
1165 read_bio->bi_iter.bi_sector = r1_bio->sector + in make_request()
1170 read_bio->bi_private = r1_bio; in make_request()
1172 if (max_sectors < r1_bio->sectors) { in make_request()
1177 sectors_handled = (r1_bio->sector + max_sectors in make_request()
1179 r1_bio->sectors = max_sectors; in make_request()
1191 reschedule_retry(r1_bio); in make_request()
1193 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); in make_request()
1195 r1_bio->master_bio = bio; in make_request()
1196 r1_bio->sectors = bio_sectors(bio) - sectors_handled; in make_request()
1197 r1_bio->state = 0; in make_request()
1198 r1_bio->mddev = mddev; in make_request()
1199 r1_bio->sector = bio->bi_iter.bi_sector + in make_request()
1228 r1_bio->start_next_window = start_next_window; in make_request()
1231 max_sectors = r1_bio->sectors; in make_request()
1239 r1_bio->bios[i] = NULL; in make_request()
1242 set_bit(R1BIO_Degraded, &r1_bio->state); in make_request()
1252 is_bad = is_badblock(rdev, r1_bio->sector, in make_request()
1262 if (is_bad && first_bad <= r1_bio->sector) { in make_request()
1264 bad_sectors -= (r1_bio->sector - first_bad); in make_request()
1284 int good_sectors = first_bad - r1_bio->sector; in make_request()
1289 r1_bio->bios[i] = bio; in make_request()
1299 if (r1_bio->bios[j]) in make_request()
1301 r1_bio->state = 0; in make_request()
1317 if (max_sectors < r1_bio->sectors) { in make_request()
1321 r1_bio->sectors = max_sectors; in make_request()
1329 sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector; in make_request()
1331 atomic_set(&r1_bio->remaining, 1); in make_request()
1332 atomic_set(&r1_bio->behind_remaining, 0); in make_request()
1337 if (!r1_bio->bios[i]) in make_request()
1341 bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors); in make_request()
1352 alloc_behind_pages(mbio, r1_bio); in make_request()
1354 bitmap_startwrite(bitmap, r1_bio->sector, in make_request()
1355 r1_bio->sectors, in make_request()
1357 &r1_bio->state)); in make_request()
1360 if (r1_bio->behind_bvecs) { in make_request()
1368 bvec->bv_page = r1_bio->behind_bvecs[j].bv_page; in make_request()
1370 atomic_inc(&r1_bio->behind_remaining); in make_request()
1373 r1_bio->bios[i] = mbio; in make_request()
1375 mbio->bi_iter.bi_sector = (r1_bio->sector + in make_request()
1381 mbio->bi_private = r1_bio; in make_request()
1383 atomic_inc(&r1_bio->remaining); in make_request()
1406 r1_bio_write_done(r1_bio); in make_request()
1410 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); in make_request()
1411 r1_bio->master_bio = bio; in make_request()
1412 r1_bio->sectors = bio_sectors(bio) - sectors_handled; in make_request()
1413 r1_bio->state = 0; in make_request()
1414 r1_bio->mddev = mddev; in make_request()
1415 r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled; in make_request()
1419 r1_bio_write_done(r1_bio); in make_request()
1702 struct r1bio *r1_bio = bio->bi_private; in end_sync_read() local
1704 update_head_pos(r1_bio->read_disk, r1_bio); in end_sync_read()
1712 set_bit(R1BIO_Uptodate, &r1_bio->state); in end_sync_read()
1714 if (atomic_dec_and_test(&r1_bio->remaining)) in end_sync_read()
1715 reschedule_retry(r1_bio); in end_sync_read()
1721 struct r1bio *r1_bio = bio->bi_private; in end_sync_write() local
1722 struct mddev *mddev = r1_bio->mddev; in end_sync_write()
1728 mirror = find_bio_disk(r1_bio, bio); in end_sync_write()
1732 sector_t s = r1_bio->sector; in end_sync_write()
1733 long sectors_to_go = r1_bio->sectors; in end_sync_write()
1747 set_bit(R1BIO_WriteError, &r1_bio->state); in end_sync_write()
1749 r1_bio->sector, in end_sync_write()
1750 r1_bio->sectors, in end_sync_write()
1752 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, in end_sync_write()
1753 r1_bio->sector, in end_sync_write()
1754 r1_bio->sectors, in end_sync_write()
1757 set_bit(R1BIO_MadeGood, &r1_bio->state); in end_sync_write()
1759 if (atomic_dec_and_test(&r1_bio->remaining)) { in end_sync_write()
1760 int s = r1_bio->sectors; in end_sync_write()
1761 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in end_sync_write()
1762 test_bit(R1BIO_WriteError, &r1_bio->state)) in end_sync_write()
1763 reschedule_retry(r1_bio); in end_sync_write()
1765 put_buf(r1_bio); in end_sync_write()
1790 static int fix_sync_read_error(struct r1bio *r1_bio) in fix_sync_read_error() argument
1803 struct mddev *mddev = r1_bio->mddev; in fix_sync_read_error()
1805 struct bio *bio = r1_bio->bios[r1_bio->read_disk]; in fix_sync_read_error()
1806 sector_t sect = r1_bio->sector; in fix_sync_read_error()
1807 int sectors = r1_bio->sectors; in fix_sync_read_error()
1812 int d = r1_bio->read_disk; in fix_sync_read_error()
1820 if (r1_bio->bios[d]->bi_end_io == end_sync_read) { in fix_sync_read_error()
1836 } while (!success && d != r1_bio->read_disk); in fix_sync_read_error()
1850 (unsigned long long)r1_bio->sector); in fix_sync_read_error()
1862 md_done_sync(mddev, r1_bio->sectors, 0); in fix_sync_read_error()
1863 put_buf(r1_bio); in fix_sync_read_error()
1875 while (d != r1_bio->read_disk) { in fix_sync_read_error()
1879 if (r1_bio->bios[d]->bi_end_io != end_sync_read) in fix_sync_read_error()
1885 r1_bio->bios[d]->bi_end_io = NULL; in fix_sync_read_error()
1890 while (d != r1_bio->read_disk) { in fix_sync_read_error()
1894 if (r1_bio->bios[d]->bi_end_io != end_sync_read) in fix_sync_read_error()
1906 set_bit(R1BIO_Uptodate, &r1_bio->state); in fix_sync_read_error()
1911 static void process_checks(struct r1bio *r1_bio) in process_checks() argument
1920 struct mddev *mddev = r1_bio->mddev; in process_checks()
1927 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); in process_checks()
1932 struct bio *b = r1_bio->bios[i]; in process_checks()
1940 b->bi_iter.bi_size = r1_bio->sectors << 9; in process_checks()
1941 b->bi_iter.bi_sector = r1_bio->sector + in process_checks()
1945 b->bi_private = r1_bio; in process_checks()
1960 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && in process_checks()
1961 !r1_bio->bios[primary]->bi_error) { in process_checks()
1962 r1_bio->bios[primary]->bi_end_io = NULL; in process_checks()
1966 r1_bio->read_disk = primary; in process_checks()
1969 struct bio *pbio = r1_bio->bios[primary]; in process_checks()
1970 struct bio *sbio = r1_bio->bios[i]; in process_checks()
1991 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); in process_checks()
2004 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) in sync_request_write() argument
2011 bio = r1_bio->bios[r1_bio->read_disk]; in sync_request_write()
2013 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) in sync_request_write()
2015 if (!fix_sync_read_error(r1_bio)) in sync_request_write()
2019 process_checks(r1_bio); in sync_request_write()
2024 atomic_set(&r1_bio->remaining, 1); in sync_request_write()
2026 wbio = r1_bio->bios[i]; in sync_request_write()
2029 (i == r1_bio->read_disk || in sync_request_write()
2035 atomic_inc(&r1_bio->remaining); in sync_request_write()
2041 if (atomic_dec_and_test(&r1_bio->remaining)) { in sync_request_write()
2043 int s = r1_bio->sectors; in sync_request_write()
2044 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in sync_request_write()
2045 test_bit(R1BIO_WriteError, &r1_bio->state)) in sync_request_write()
2046 reschedule_retry(r1_bio); in sync_request_write()
2048 put_buf(r1_bio); in sync_request_write()
2148 static int narrow_write_error(struct r1bio *r1_bio, int i) in narrow_write_error() argument
2150 struct mddev *mddev = r1_bio->mddev; in narrow_write_error()
2168 int sect_to_write = r1_bio->sectors; in narrow_write_error()
2176 sector = r1_bio->sector; in narrow_write_error()
2187 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { in narrow_write_error()
2188 unsigned vcnt = r1_bio->behind_page_count; in narrow_write_error()
2189 struct bio_vec *vec = r1_bio->behind_bvecs; in narrow_write_error()
2201 wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); in narrow_write_error()
2205 wbio->bi_iter.bi_sector = r1_bio->sector; in narrow_write_error()
2206 wbio->bi_iter.bi_size = r1_bio->sectors << 9; in narrow_write_error()
2208 bio_trim(wbio, sector - r1_bio->sector, sectors); in narrow_write_error()
2225 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_sync_write_finished() argument
2228 int s = r1_bio->sectors; in handle_sync_write_finished()
2231 struct bio *bio = r1_bio->bios[m]; in handle_sync_write_finished()
2235 test_bit(R1BIO_MadeGood, &r1_bio->state)) { in handle_sync_write_finished()
2236 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); in handle_sync_write_finished()
2239 test_bit(R1BIO_WriteError, &r1_bio->state)) { in handle_sync_write_finished()
2240 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) in handle_sync_write_finished()
2244 put_buf(r1_bio); in handle_sync_write_finished()
2248 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_write_finished() argument
2253 if (r1_bio->bios[m] == IO_MADE_GOOD) { in handle_write_finished()
2256 r1_bio->sector, in handle_write_finished()
2257 r1_bio->sectors, 0); in handle_write_finished()
2259 } else if (r1_bio->bios[m] != NULL) { in handle_write_finished()
2265 if (!narrow_write_error(r1_bio, m)) { in handle_write_finished()
2269 set_bit(R1BIO_Degraded, &r1_bio->state); in handle_write_finished()
2276 list_add(&r1_bio->retry_list, &conf->bio_end_io_list); in handle_write_finished()
2281 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in handle_write_finished()
2282 close_write(r1_bio); in handle_write_finished()
2283 raid_end_bio_io(r1_bio); in handle_write_finished()
2287 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) in handle_read_error() argument
2296 clear_bit(R1BIO_ReadError, &r1_bio->state); in handle_read_error()
2307 fix_read_error(conf, r1_bio->read_disk, in handle_read_error()
2308 r1_bio->sector, r1_bio->sectors); in handle_read_error()
2311 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); in handle_read_error()
2312 rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); in handle_read_error()
2314 bio = r1_bio->bios[r1_bio->read_disk]; in handle_read_error()
2317 disk = read_balance(conf, r1_bio, &max_sectors); in handle_read_error()
2321 mdname(mddev), b, (unsigned long long)r1_bio->sector); in handle_read_error()
2322 raid_end_bio_io(r1_bio); in handle_read_error()
2325 = r1_bio->master_bio->bi_rw & REQ_SYNC; in handle_read_error()
2327 r1_bio->bios[r1_bio->read_disk] = in handle_read_error()
2331 r1_bio->read_disk = disk; in handle_read_error()
2332 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); in handle_read_error()
2333 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector, in handle_read_error()
2335 r1_bio->bios[r1_bio->read_disk] = bio; in handle_read_error()
2341 (unsigned long long)r1_bio->sector, in handle_read_error()
2343 bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; in handle_read_error()
2347 bio->bi_private = r1_bio; in handle_read_error()
2348 if (max_sectors < r1_bio->sectors) { in handle_read_error()
2350 struct bio *mbio = r1_bio->master_bio; in handle_read_error()
2351 int sectors_handled = (r1_bio->sector + max_sectors in handle_read_error()
2353 r1_bio->sectors = max_sectors; in handle_read_error()
2363 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); in handle_read_error()
2365 r1_bio->master_bio = mbio; in handle_read_error()
2366 r1_bio->sectors = bio_sectors(mbio) - sectors_handled; in handle_read_error()
2367 r1_bio->state = 0; in handle_read_error()
2368 set_bit(R1BIO_ReadError, &r1_bio->state); in handle_read_error()
2369 r1_bio->mddev = mddev; in handle_read_error()
2370 r1_bio->sector = mbio->bi_iter.bi_sector + in handle_read_error()
2382 struct r1bio *r1_bio; in raid1d() local
2402 r1_bio = list_first_entry(&tmp, struct r1bio, in raid1d()
2404 list_del(&r1_bio->retry_list); in raid1d()
2406 set_bit(R1BIO_Degraded, &r1_bio->state); in raid1d()
2407 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2408 close_write(r1_bio); in raid1d()
2409 raid_end_bio_io(r1_bio); in raid1d()
2423 r1_bio = list_entry(head->prev, struct r1bio, retry_list); in raid1d()
2428 mddev = r1_bio->mddev; in raid1d()
2430 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { in raid1d()
2431 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in raid1d()
2432 test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2433 handle_sync_write_finished(conf, r1_bio); in raid1d()
2435 sync_request_write(mddev, r1_bio); in raid1d()
2436 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in raid1d()
2437 test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2438 handle_write_finished(conf, r1_bio); in raid1d()
2439 else if (test_bit(R1BIO_ReadError, &r1_bio->state)) in raid1d()
2440 handle_read_error(conf, r1_bio); in raid1d()
2445 generic_make_request(r1_bio->bios[r1_bio->read_disk]); in raid1d()
2481 struct r1bio *r1_bio; in sync_request() local
2543 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); in sync_request()
2557 r1_bio->mddev = mddev; in sync_request()
2558 r1_bio->sector = sector_nr; in sync_request()
2559 r1_bio->state = 0; in sync_request()
2560 set_bit(R1BIO_IsSync, &r1_bio->state); in sync_request()
2564 bio = r1_bio->bios[i]; in sync_request()
2621 bio->bi_private = r1_bio; in sync_request()
2627 r1_bio->read_disk = disk; in sync_request()
2635 if (r1_bio->bios[i]->bi_end_io == end_sync_write) { in sync_request()
2643 put_buf(r1_bio); in sync_request()
2677 put_buf(r1_bio); in sync_request()
2706 bio = r1_bio->bios[i]; in sync_request()
2714 bio = r1_bio->bios[i]; in sync_request()
2729 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); in sync_request()
2731 r1_bio->sectors = nr_sectors; in sync_request()
2747 atomic_set(&r1_bio->remaining, read_targets); in sync_request()
2749 bio = r1_bio->bios[i]; in sync_request()
2757 atomic_set(&r1_bio->remaining, 1); in sync_request()
2758 bio = r1_bio->bios[r1_bio->read_disk]; in sync_request()