Lines Matching refs:devs

111 	int size = offsetof(struct r10bio, devs[conf->copies]);  in r10bio_pool_alloc()
164 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
170 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
177 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
178 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
184 struct bio *rbio = r10_bio->devs[0].bio; in r10buf_pool_alloc()
205 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); in r10buf_pool_alloc()
209 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
210 bio_put(r10_bio->devs[j].bio); in r10buf_pool_alloc()
211 if (r10_bio->devs[j].repl_bio) in r10buf_pool_alloc()
212 bio_put(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
226 struct bio *bio = r10bio->devs[j].bio; in r10buf_pool_free()
234 bio = r10bio->devs[j].repl_bio; in r10buf_pool_free()
246 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios()
250 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
330 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
331 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
344 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
346 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
359 return r10_bio->devs[slot].devnum; in find_bio_disk()
371 dev = r10_bio->devs[slot].devnum; in raid10_end_read_request()
372 rdev = r10_bio->devs[slot].rdev; in raid10_end_read_request()
504 r10_bio->devs[slot].addr, in raid10_end_write_request()
509 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; in raid10_end_write_request()
511 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
585 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
586 r10bio->devs[slot].addr = s; in __raid10_find_phys()
603 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
604 r10bio->devs[slot].addr = s; in __raid10_find_phys()
732 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
734 disk = r10_bio->devs[slot].devnum; in read_balance()
737 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
743 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
746 dev_sector = r10_bio->devs[slot].addr; in read_balance()
790 new_distance = r10_bio->devs[slot].addr; in read_balance()
792 new_distance = abs(r10_bio->devs[slot].addr - in read_balance()
1152 r10_bio->devs[slot].bio = read_bio; in __make_request()
1153 r10_bio->devs[slot].rdev = rdev; in __make_request()
1155 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + in __make_request()
1224 int d = r10_bio->devs[i].devnum; in __make_request()
1245 r10_bio->devs[i].bio = NULL; in __make_request()
1246 r10_bio->devs[i].repl_bio = NULL; in __make_request()
1254 sector_t dev_sector = r10_bio->devs[i].addr; in __make_request()
1295 r10_bio->devs[i].bio = bio; in __make_request()
1299 r10_bio->devs[i].repl_bio = bio; in __make_request()
1311 if (r10_bio->devs[j].bio) { in __make_request()
1312 d = r10_bio->devs[j].devnum; in __make_request()
1315 if (r10_bio->devs[j].repl_bio) { in __make_request()
1317 d = r10_bio->devs[j].devnum; in __make_request()
1353 int d = r10_bio->devs[i].devnum; in __make_request()
1354 if (r10_bio->devs[i].bio) { in __make_request()
1359 r10_bio->devs[i].bio = mbio; in __make_request()
1361 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ in __make_request()
1392 if (r10_bio->devs[i].repl_bio) { in __make_request()
1402 r10_bio->devs[i].repl_bio = mbio; in __make_request()
1404 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + in __make_request()
1904 r10_bio->devs[slot].addr, in end_sync_write()
1941 if (!r10_bio->devs[i].bio->bi_error) in sync_request_write()
1948 fbio = r10_bio->devs[i].bio; in sync_request_write()
1957 tbio = r10_bio->devs[i].bio; in sync_request_write()
1963 if (!r10_bio->devs[i].bio->bi_error) { in sync_request_write()
1997 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; in sync_request_write()
2002 d = r10_bio->devs[i].devnum; in sync_request_write()
2018 tbio = r10_bio->devs[i].repl_bio; in sync_request_write()
2021 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2022 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2024 d = r10_bio->devs[i].devnum; in sync_request_write()
2059 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error()
2063 int dr = r10_bio->devs[0].devnum; in fix_recovery_read_error()
2064 int dw = r10_bio->devs[1].devnum; in fix_recovery_read_error()
2076 addr = r10_bio->devs[0].addr + sect, in fix_recovery_read_error()
2084 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2108 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2148 d = r10_bio->devs[1].devnum; in recovery_request_write()
2149 wbio = r10_bio->devs[1].bio; in recovery_request_write()
2150 wbio2 = r10_bio->devs[1].repl_bio; in recovery_request_write()
2245 int d = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2272 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; in fix_read_error()
2290 d = r10_bio->devs[sl].devnum; in fix_read_error()
2294 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, in fix_read_error()
2299 r10_bio->devs[sl].addr + in fix_read_error()
2319 int dn = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2324 r10_bio->devs[r10_bio->read_slot].addr in fix_read_error()
2328 r10_bio->devs[r10_bio->read_slot].bio in fix_read_error()
2343 d = r10_bio->devs[sl].devnum; in fix_read_error()
2352 r10_bio->devs[sl].addr + in fix_read_error()
2382 d = r10_bio->devs[sl].devnum; in fix_read_error()
2391 r10_bio->devs[sl].addr + in fix_read_error()
2438 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2473 wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ in narrow_write_error()
2496 struct md_rdev *rdev = r10_bio->devs[slot].rdev; in handle_read_error()
2509 bio = r10_bio->devs[slot].bio; in handle_read_error()
2512 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2519 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
2546 r10_bio->devs[slot].bio = bio; in handle_read_error()
2547 r10_bio->devs[slot].rdev = rdev; in handle_read_error()
2548 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr in handle_read_error()
2599 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2601 if (r10_bio->devs[m].bio == NULL) in handle_write_completed()
2603 if (!r10_bio->devs[m].bio->bi_error) { in handle_write_completed()
2606 r10_bio->devs[m].addr, in handle_write_completed()
2611 r10_bio->devs[m].addr, in handle_write_completed()
2616 if (r10_bio->devs[m].repl_bio == NULL) in handle_write_completed()
2619 if (!r10_bio->devs[m].repl_bio->bi_error) { in handle_write_completed()
2622 r10_bio->devs[m].addr, in handle_write_completed()
2627 r10_bio->devs[m].addr, in handle_write_completed()
2636 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2637 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed()
2642 r10_bio->devs[m].addr, in handle_write_completed()
2654 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
2659 r10_bio->devs[m].addr, in handle_write_completed()
2748 generic_make_request(r10_bio->devs[slot].bio); in raid10d()
3010 int d = r10_bio->devs[j].devnum; in sync_request()
3021 sector = r10_bio->devs[j].addr; in sync_request()
3035 bio = r10_bio->devs[0].bio; in sync_request()
3042 from_addr = r10_bio->devs[j].addr; in sync_request()
3050 if (r10_bio->devs[k].devnum == i) in sync_request()
3053 to_addr = r10_bio->devs[k].addr; in sync_request()
3054 r10_bio->devs[0].devnum = d; in sync_request()
3055 r10_bio->devs[0].addr = from_addr; in sync_request()
3056 r10_bio->devs[1].devnum = i; in sync_request()
3057 r10_bio->devs[1].addr = to_addr; in sync_request()
3061 bio = r10_bio->devs[1].bio; in sync_request()
3073 r10_bio->devs[1].bio->bi_end_io = NULL; in sync_request()
3076 bio = r10_bio->devs[1].repl_bio; in sync_request()
3112 if (r10_bio->devs[k].devnum == i) in sync_request()
3118 r10_bio->devs[k].addr, in sync_request()
3124 r10_bio->devs[k].addr, in sync_request()
3184 int d = r10_bio->devs[i].devnum; in sync_request()
3188 if (r10_bio->devs[i].repl_bio) in sync_request()
3189 r10_bio->devs[i].repl_bio->bi_end_io = NULL; in sync_request()
3191 bio = r10_bio->devs[i].bio; in sync_request()
3197 sector = r10_bio->devs[i].addr; in sync_request()
3228 bio = r10_bio->devs[i].repl_bio; in sync_request()
3232 sector = r10_bio->devs[i].addr; in sync_request()
3247 int d = r10_bio->devs[i].devnum; in sync_request()
3248 if (r10_bio->devs[i].bio->bi_end_io) in sync_request()
3251 if (r10_bio->devs[i].repl_bio && in sync_request()
3252 r10_bio->devs[i].repl_bio->bi_end_io) in sync_request()
3798 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) in raid10_takeover_raid0() argument
3808 sector_div(size, devs); in raid10_takeover_raid0()
4317 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr in reshape_request()
4327 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; in reshape_request()
4337 int d = r10_bio->devs[s/2].devnum; in reshape_request()
4341 b = r10_bio->devs[s/2].repl_bio; in reshape_request()
4344 b = r10_bio->devs[s/2].bio; in reshape_request()
4351 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + in reshape_request()
4364 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page; in reshape_request()
4437 int d = r10_bio->devs[s/2].devnum; in reshape_request_write()
4441 b = r10_bio->devs[s/2].repl_bio; in reshape_request_write()
4444 b = r10_bio->devs[s/2].bio; in reshape_request_write()
4491 struct r10dev devs[conf->copies]; in handle_reshape_read_error() member
4510 int d = r10b->devs[slot].devnum; in handle_reshape_read_error()
4518 addr = r10b->devs[slot].addr + idx * PAGE_SIZE; in handle_reshape_read_error()