Lines Matching refs:sector

411 				   (unsigned long long)r10_bio->sector);  in raid10_end_read_request()
420 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, in close_write()
555 sector_t sector; in __raid10_find_phys() local
569 chunk = r10bio->sector >> geo->chunk_shift; in __raid10_find_phys()
570 sector = r10bio->sector & geo->chunk_mask; in __raid10_find_phys()
578 sector += stripe << geo->chunk_shift; in __raid10_find_phys()
584 sector_t s = sector; in __raid10_find_phys()
610 sector += (geo->chunk_mask + 1); in __raid10_find_phys()
620 ((r10bio->sector >= conf->reshape_progress) != in raid10_find_phys()
630 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) in raid10_find_virt() argument
652 offset = sector & geo->chunk_mask; in raid10_find_virt()
655 chunk = sector >> geo->chunk_shift; in raid10_find_virt()
661 while (sector >= geo->stride) { in raid10_find_virt()
662 sector -= geo->stride; in raid10_find_virt()
668 chunk = sector >> geo->chunk_shift; in raid10_find_virt()
698 const sector_t this_sector = r10_bio->sector; in read_balance()
1120 r10_bio->sector = bio->bi_iter.bi_sector; in __make_request()
1149 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
1166 sectors_handled = (r10_bio->sector + max_sectors in __make_request()
1188 r10_bio->sector = bio->bi_iter.bi_sector + in __make_request()
1345 sectors_handled = r10_bio->sector + max_sectors - in __make_request()
1349 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); in __make_request()
1357 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
1400 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
1438 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; in __make_request()
2207 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, in r10_sync_page_io() argument
2213 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) in r10_sync_page_io()
2216 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) in r10_sync_page_io()
2226 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) in r10_sync_page_io()
2451 sector_t sector; in narrow_write_error() local
2461 sector = r10_bio->sector; in narrow_write_error()
2462 sectors = ((r10_bio->sector + block_sectors) in narrow_write_error()
2464 - sector; in narrow_write_error()
2472 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); in narrow_write_error()
2475 (sector - r10_bio->sector)); in narrow_write_error()
2479 ok = rdev_set_badblocks(rdev, sector, in narrow_write_error()
2485 sector += sectors; in narrow_write_error()
2529 (unsigned long long)r10_bio->sector); in handle_read_error()
2542 (unsigned long long)r10_bio->sector); in handle_read_error()
2545 bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); in handle_read_error()
2558 r10_bio->sector + max_sectors in handle_read_error()
2577 r10_bio->sector = mbio->bi_iter.bi_sector in handle_read_error()
2990 r10_bio->sector = sect; in sync_request()
3013 sector_t sector, first_bad; in sync_request() local
3021 sector = r10_bio->devs[j].addr; in sync_request()
3023 if (is_badblock(rdev, sector, max_sync, in sync_request()
3025 if (first_bad > sector) in sync_request()
3026 max_sync = first_bad - sector; in sync_request()
3028 bad_sectors -= (sector in sync_request()
3178 r10_bio->sector = sector_nr; in sync_request()
3185 sector_t first_bad, sector; in sync_request() local
3197 sector = r10_bio->devs[i].addr; in sync_request()
3199 sector, max_sync, in sync_request()
3201 if (first_bad > sector) in sync_request()
3202 max_sync = first_bad - sector; in sync_request()
3204 bad_sectors -= (sector - first_bad); in sync_request()
3217 bio->bi_iter.bi_sector = sector + in sync_request()
3232 sector = r10_bio->devs[i].addr; in sync_request()
3239 bio->bi_iter.bi_sector = sector + in sync_request()
4298 r10_bio->sector = sector_nr; in reshape_request()
4498 r10b->sector = r10_bio->sector; in handle_reshape_read_error()