Lines Matching refs:devs

110 	int size = offsetof(struct r10bio, devs[conf->copies]);  in r10bio_pool_alloc()
163 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
169 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
176 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
177 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
183 struct bio *rbio = r10_bio->devs[0].bio; in r10buf_pool_alloc()
204 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); in r10buf_pool_alloc()
208 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
209 bio_put(r10_bio->devs[j].bio); in r10buf_pool_alloc()
210 if (r10_bio->devs[j].repl_bio) in r10buf_pool_alloc()
211 bio_put(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
225 struct bio *bio = r10bio->devs[j].bio; in r10buf_pool_free()
233 bio = r10bio->devs[j].repl_bio; in r10buf_pool_free()
245 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios()
249 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
329 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
330 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
343 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
345 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
358 return r10_bio->devs[slot].devnum; in find_bio_disk()
370 dev = r10_bio->devs[slot].devnum; in raid10_end_read_request()
371 rdev = r10_bio->devs[slot].rdev; in raid10_end_read_request()
504 r10_bio->devs[slot].addr, in raid10_end_write_request()
509 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; in raid10_end_write_request()
511 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
585 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
586 r10bio->devs[slot].addr = s; in __raid10_find_phys()
603 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
604 r10bio->devs[slot].addr = s; in __raid10_find_phys()
716 struct r10dev devs[conf->copies]; in raid10_mergeable_bvec() member
730 int disk = r10_bio->devs[s].devnum; in raid10_mergeable_bvec()
737 bvm->bi_sector = r10_bio->devs[s].addr in raid10_mergeable_bvec()
749 bvm->bi_sector = r10_bio->devs[s].addr in raid10_mergeable_bvec()
819 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
821 disk = r10_bio->devs[slot].devnum; in read_balance()
825 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
832 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
835 dev_sector = r10_bio->devs[slot].addr; in read_balance()
879 new_distance = r10_bio->devs[slot].addr; in read_balance()
881 new_distance = abs(r10_bio->devs[slot].addr - in read_balance()
1241 r10_bio->devs[slot].bio = read_bio; in __make_request()
1242 r10_bio->devs[slot].rdev = rdev; in __make_request()
1244 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + in __make_request()
1313 int d = r10_bio->devs[i].devnum; in __make_request()
1336 r10_bio->devs[i].bio = NULL; in __make_request()
1337 r10_bio->devs[i].repl_bio = NULL; in __make_request()
1345 sector_t dev_sector = r10_bio->devs[i].addr; in __make_request()
1386 r10_bio->devs[i].bio = bio; in __make_request()
1390 r10_bio->devs[i].repl_bio = bio; in __make_request()
1402 if (r10_bio->devs[j].bio) { in __make_request()
1403 d = r10_bio->devs[j].devnum; in __make_request()
1406 if (r10_bio->devs[j].repl_bio) { in __make_request()
1408 d = r10_bio->devs[j].devnum; in __make_request()
1444 int d = r10_bio->devs[i].devnum; in __make_request()
1445 if (r10_bio->devs[i].bio) { in __make_request()
1450 r10_bio->devs[i].bio = mbio; in __make_request()
1452 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ in __make_request()
1483 if (r10_bio->devs[i].repl_bio) { in __make_request()
1493 r10_bio->devs[i].repl_bio = mbio; in __make_request()
1495 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + in __make_request()
2010 r10_bio->devs[slot].addr, in end_sync_write()
2047 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) in sync_request_write()
2054 fbio = r10_bio->devs[i].bio; in sync_request_write()
2061 tbio = r10_bio->devs[i].bio; in sync_request_write()
2067 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) { in sync_request_write()
2101 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; in sync_request_write()
2113 d = r10_bio->devs[i].devnum; in sync_request_write()
2129 tbio = r10_bio->devs[i].repl_bio; in sync_request_write()
2132 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2133 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2138 d = r10_bio->devs[i].devnum; in sync_request_write()
2173 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error()
2177 int dr = r10_bio->devs[0].devnum; in fix_recovery_read_error()
2178 int dw = r10_bio->devs[1].devnum; in fix_recovery_read_error()
2190 addr = r10_bio->devs[0].addr + sect, in fix_recovery_read_error()
2198 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2222 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2262 d = r10_bio->devs[1].devnum; in recovery_request_write()
2263 wbio = r10_bio->devs[1].bio; in recovery_request_write()
2264 wbio2 = r10_bio->devs[1].repl_bio; in recovery_request_write()
2359 int d = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2386 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; in fix_read_error()
2404 d = r10_bio->devs[sl].devnum; in fix_read_error()
2409 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, in fix_read_error()
2414 r10_bio->devs[sl].addr + in fix_read_error()
2434 int dn = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2439 r10_bio->devs[r10_bio->read_slot].addr in fix_read_error()
2443 r10_bio->devs[r10_bio->read_slot].bio in fix_read_error()
2458 d = r10_bio->devs[sl].devnum; in fix_read_error()
2468 r10_bio->devs[sl].addr + in fix_read_error()
2498 d = r10_bio->devs[sl].devnum; in fix_read_error()
2507 r10_bio->devs[sl].addr + in fix_read_error()
2554 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2589 wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ in narrow_write_error()
2612 struct md_rdev *rdev = r10_bio->devs[slot].rdev; in handle_read_error()
2625 bio = r10_bio->devs[slot].bio; in handle_read_error()
2628 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2635 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
2662 r10_bio->devs[slot].bio = bio; in handle_read_error()
2663 r10_bio->devs[slot].rdev = rdev; in handle_read_error()
2664 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr in handle_read_error()
2715 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2717 if (r10_bio->devs[m].bio == NULL) in handle_write_completed()
2720 &r10_bio->devs[m].bio->bi_flags)) { in handle_write_completed()
2723 r10_bio->devs[m].addr, in handle_write_completed()
2728 r10_bio->devs[m].addr, in handle_write_completed()
2733 if (r10_bio->devs[m].repl_bio == NULL) in handle_write_completed()
2736 &r10_bio->devs[m].repl_bio->bi_flags)) { in handle_write_completed()
2739 r10_bio->devs[m].addr, in handle_write_completed()
2744 r10_bio->devs[m].addr, in handle_write_completed()
2752 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2753 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed()
2758 r10_bio->devs[m].addr, in handle_write_completed()
2770 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
2775 r10_bio->devs[m].addr, in handle_write_completed()
2831 generic_make_request(r10_bio->devs[slot].bio); in raid10d()
3093 int d = r10_bio->devs[j].devnum; in sync_request()
3104 sector = r10_bio->devs[j].addr; in sync_request()
3118 bio = r10_bio->devs[0].bio; in sync_request()
3125 from_addr = r10_bio->devs[j].addr; in sync_request()
3133 if (r10_bio->devs[k].devnum == i) in sync_request()
3136 to_addr = r10_bio->devs[k].addr; in sync_request()
3137 r10_bio->devs[0].devnum = d; in sync_request()
3138 r10_bio->devs[0].addr = from_addr; in sync_request()
3139 r10_bio->devs[1].devnum = i; in sync_request()
3140 r10_bio->devs[1].addr = to_addr; in sync_request()
3144 bio = r10_bio->devs[1].bio; in sync_request()
3156 r10_bio->devs[1].bio->bi_end_io = NULL; in sync_request()
3159 bio = r10_bio->devs[1].repl_bio; in sync_request()
3195 if (r10_bio->devs[k].devnum == i) in sync_request()
3201 r10_bio->devs[k].addr, in sync_request()
3207 r10_bio->devs[k].addr, in sync_request()
3267 int d = r10_bio->devs[i].devnum; in sync_request()
3271 if (r10_bio->devs[i].repl_bio) in sync_request()
3272 r10_bio->devs[i].repl_bio->bi_end_io = NULL; in sync_request()
3274 bio = r10_bio->devs[i].bio; in sync_request()
3280 sector = r10_bio->devs[i].addr; in sync_request()
3311 bio = r10_bio->devs[i].repl_bio; in sync_request()
3315 sector = r10_bio->devs[i].addr; in sync_request()
3330 int d = r10_bio->devs[i].devnum; in sync_request()
3331 if (r10_bio->devs[i].bio->bi_end_io) in sync_request()
3334 if (r10_bio->devs[i].repl_bio && in sync_request()
3335 r10_bio->devs[i].repl_bio->bi_end_io) in sync_request()
3870 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) in raid10_takeover_raid0() argument
3880 sector_div(size, devs); in raid10_takeover_raid0()
4389 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr in reshape_request()
4399 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; in reshape_request()
4409 int d = r10_bio->devs[s/2].devnum; in reshape_request()
4413 b = r10_bio->devs[s/2].repl_bio; in reshape_request()
4416 b = r10_bio->devs[s/2].bio; in reshape_request()
4423 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + in reshape_request()
4436 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page; in reshape_request()
4509 int d = r10_bio->devs[s/2].devnum; in reshape_request_write()
4513 b = r10_bio->devs[s/2].repl_bio; in reshape_request_write()
4516 b = r10_bio->devs[s/2].bio; in reshape_request_write()
4563 struct r10dev devs[conf->copies]; in handle_reshape_read_error() member
4582 int d = r10b->devs[slot].devnum; in handle_reshape_read_error()
4590 addr = r10b->devs[slot].addr + idx * PAGE_SIZE; in handle_reshape_read_error()