Lines Matching refs:sectors
331 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
421 r10_bio->sectors, in close_write()
505 r10_bio->sectors, in raid10_end_write_request()
700 int sectors = r10_bio->sectors; in read_balance() local
711 sectors = r10_bio->sectors; in read_balance()
724 && (this_sector + sectors >= conf->next_resync)) in read_balance()
737 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
743 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
747 if (is_badblock(rdev, dev_sector, sectors, in read_balance()
758 if (!do_balance && sectors > bad_sectors) in read_balance()
759 sectors = bad_sectors; in read_balance()
760 if (best_good_sectors > sectors) in read_balance()
761 best_good_sectors = sectors; in read_balance()
776 best_good_sectors = sectors; in read_balance()
1073 int sectors; in __make_request() local
1082 sectors = bio_sectors(bio); in __make_request()
1085 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in __make_request()
1093 sectors); in __make_request()
1100 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in __make_request()
1101 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in __make_request()
1117 r10_bio->sectors = sectors; in __make_request()
1162 if (max_sectors < r10_bio->sectors) { in __make_request()
1168 r10_bio->sectors = max_sectors; in __make_request()
1185 r10_bio->sectors = bio_sectors(bio) - sectors_handled; in __make_request()
1221 max_sectors = r10_bio->sectors; in __make_request()
1333 if (max_sectors < r10_bio->sectors) { in __make_request()
1337 r10_bio->sectors = max_sectors; in __make_request()
1349 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); in __make_request()
1435 r10_bio->sectors = bio_sectors(bio) - sectors_handled; in __make_request()
1832 atomic_add(r10_bio->sectors, in end_sync_read()
1855 sector_t s = r10_bio->sectors; in end_sync_request()
1905 r10_bio->sectors, in end_sync_write()
1949 fbio->bi_iter.bi_size = r10_bio->sectors << 9; in sync_request_write()
1952 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); in sync_request_write()
1968 int sectors = r10_bio->sectors; in sync_request_write() local
1971 if (sectors < (len / 512)) in sync_request_write()
1972 len = sectors * 512; in sync_request_write()
1977 sectors -= len/512; in sync_request_write()
1981 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
2033 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2061 int sectors = r10_bio->sectors; in fix_recovery_read_error() local
2066 while (sectors) { in fix_recovery_read_error()
2067 int s = sectors; in fix_recovery_read_error()
2126 sectors -= s; in fix_recovery_read_error()
2208 int sectors, struct page *page, int rw) in r10_sync_page_io() argument
2213 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) in r10_sync_page_io()
2216 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) in r10_sync_page_io()
2226 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) in r10_sync_page_io()
2242 int sectors = r10_bio->sectors; in fix_read_error() local
2276 while(sectors) { in fix_read_error()
2277 int s = sectors; in fix_read_error()
2428 sectors -= s; in fix_read_error()
2452 int sectors; in narrow_write_error() local
2453 int sect_to_write = r10_bio->sectors; in narrow_write_error()
2462 sectors = ((r10_bio->sector + block_sectors) in narrow_write_error()
2468 if (sectors > sect_to_write) in narrow_write_error()
2469 sectors = sect_to_write; in narrow_write_error()
2472 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); in narrow_write_error()
2480 sectors, 0) in narrow_write_error()
2484 sect_to_write -= sectors; in narrow_write_error()
2485 sector += sectors; in narrow_write_error()
2486 sectors = block_sectors; in narrow_write_error()
2554 if (max_sectors < r10_bio->sectors) { in handle_read_error()
2560 r10_bio->sectors = max_sectors; in handle_read_error()
2572 r10_bio->sectors = bio_sectors(mbio) - sectors_handled; in handle_read_error()
2607 r10_bio->sectors, 0); in handle_write_completed()
2612 r10_bio->sectors, 0)) in handle_write_completed()
2623 r10_bio->sectors, 0); in handle_write_completed()
2628 r10_bio->sectors, 0)) in handle_write_completed()
2643 r10_bio->sectors, 0); in handle_write_completed()
2660 r10_bio->sectors, 0); in handle_write_completed()
3181 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; in sync_request()
3295 r10_bio->sectors = nr_sectors; in sync_request()
3303 r10_bio->sectors = nr_sectors; in sync_request()
3334 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid10_size() argument
3342 if (!sectors) in raid10_size()
3343 sectors = conf->dev_sectors; in raid10_size()
3345 size = sectors >> conf->geo.chunk_shift; in raid10_size()
3751 static int raid10_resize(struct mddev *mddev, sector_t sectors) in raid10_resize() argument
3775 size = raid10_size(mddev, sectors, 0); in raid10_resize()
3787 if (sectors > mddev->dev_sectors && in raid10_resize()
3792 calc_sectors(conf, sectors); in raid10_resize()
3826 rdev->sectors = size; in raid10_takeover_raid0()
4300 r10_bio->sectors = last - sector_nr + 1; in reshape_request()
4388 r10_bio->sectors = nr_sectors; in reshape_request()
4391 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); in reshape_request()
4427 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
4449 md_sync_acct(b->bi_bdev, r10_bio->sectors); in reshape_request_write()
4487 int sectors = r10_bio->sectors; in handle_reshape_read_error() local
4501 while (sectors) { in handle_reshape_read_error()
4502 int s = sectors; in handle_reshape_read_error()
4539 sectors -= s; in handle_reshape_read_error()
4576 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()