Lines Matching refs:sectors

330 		r10_bio->devs[slot].addr + (r10_bio->sectors);  in update_head_pos()
420 r10_bio->sectors, in close_write()
505 r10_bio->sectors, in raid10_end_write_request()
787 int sectors = r10_bio->sectors; in read_balance() local
798 sectors = r10_bio->sectors; in read_balance()
811 && (this_sector + sectors >= conf->next_resync)) in read_balance()
825 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
832 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
836 if (is_badblock(rdev, dev_sector, sectors, in read_balance()
847 if (!do_balance && sectors > bad_sectors) in read_balance()
848 sectors = bad_sectors; in read_balance()
849 if (best_good_sectors > sectors) in read_balance()
850 best_good_sectors = sectors; in read_balance()
865 best_good_sectors = sectors; in read_balance()
1162 int sectors; in __make_request() local
1171 sectors = bio_sectors(bio); in __make_request()
1174 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in __make_request()
1182 sectors); in __make_request()
1189 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in __make_request()
1190 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in __make_request()
1206 r10_bio->sectors = sectors; in __make_request()
1251 if (max_sectors < r10_bio->sectors) { in __make_request()
1257 r10_bio->sectors = max_sectors; in __make_request()
1274 r10_bio->sectors = bio_sectors(bio) - sectors_handled; in __make_request()
1310 max_sectors = r10_bio->sectors; in __make_request()
1424 if (max_sectors < r10_bio->sectors) { in __make_request()
1428 r10_bio->sectors = max_sectors; in __make_request()
1440 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); in __make_request()
1526 r10_bio->sectors = bio_sectors(bio) - sectors_handled; in __make_request()
1937 atomic_add(r10_bio->sectors, in end_sync_read()
1960 sector_t s = r10_bio->sectors; in end_sync_request()
2011 r10_bio->sectors, in end_sync_write()
2056 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); in sync_request_write()
2072 int sectors = r10_bio->sectors; in sync_request_write() local
2075 if (sectors < (len / 512)) in sync_request_write()
2076 len = sectors * 512; in sync_request_write()
2081 sectors -= len/512; in sync_request_write()
2085 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
2098 tbio->bi_iter.bi_size = r10_bio->sectors << 9; in sync_request_write()
2147 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2175 int sectors = r10_bio->sectors; in fix_recovery_read_error() local
2180 while (sectors) { in fix_recovery_read_error()
2181 int s = sectors; in fix_recovery_read_error()
2240 sectors -= s; in fix_recovery_read_error()
2322 int sectors, struct page *page, int rw) in r10_sync_page_io() argument
2327 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) in r10_sync_page_io()
2330 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) in r10_sync_page_io()
2340 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) in r10_sync_page_io()
2356 int sectors = r10_bio->sectors; in fix_read_error() local
2390 while(sectors) { in fix_read_error()
2391 int s = sectors; in fix_read_error()
2544 sectors -= s; in fix_read_error()
2568 int sectors; in narrow_write_error() local
2569 int sect_to_write = r10_bio->sectors; in narrow_write_error()
2578 sectors = ((r10_bio->sector + block_sectors) in narrow_write_error()
2584 if (sectors > sect_to_write) in narrow_write_error()
2585 sectors = sect_to_write; in narrow_write_error()
2588 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); in narrow_write_error()
2596 sectors, 0) in narrow_write_error()
2600 sect_to_write -= sectors; in narrow_write_error()
2601 sector += sectors; in narrow_write_error()
2602 sectors = block_sectors; in narrow_write_error()
2670 if (max_sectors < r10_bio->sectors) { in handle_read_error()
2676 r10_bio->sectors = max_sectors; in handle_read_error()
2688 r10_bio->sectors = bio_sectors(mbio) - sectors_handled; in handle_read_error()
2724 r10_bio->sectors, 0); in handle_write_completed()
2729 r10_bio->sectors, 0)) in handle_write_completed()
2740 r10_bio->sectors, 0); in handle_write_completed()
2745 r10_bio->sectors, 0)) in handle_write_completed()
2759 r10_bio->sectors, 0); in handle_write_completed()
2776 r10_bio->sectors, 0); in handle_write_completed()
3264 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; in sync_request()
3378 r10_bio->sectors = nr_sectors; in sync_request()
3386 r10_bio->sectors = nr_sectors; in sync_request()
3417 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid10_size() argument
3425 if (!sectors) in raid10_size()
3426 sectors = conf->dev_sectors; in raid10_size()
3428 size = sectors >> conf->geo.chunk_shift; in raid10_size()
3823 static int raid10_resize(struct mddev *mddev, sector_t sectors) in raid10_resize() argument
3847 size = raid10_size(mddev, sectors, 0); in raid10_resize()
3859 if (sectors > mddev->dev_sectors && in raid10_resize()
3864 calc_sectors(conf, sectors); in raid10_resize()
3898 rdev->sectors = size; in raid10_takeover_raid0()
4372 r10_bio->sectors = last - sector_nr + 1; in reshape_request()
4460 r10_bio->sectors = nr_sectors; in reshape_request()
4463 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); in reshape_request()
4499 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
4521 md_sync_acct(b->bi_bdev, r10_bio->sectors); in reshape_request_write()
4559 int sectors = r10_bio->sectors; in handle_reshape_read_error() local
4573 while (sectors) { in handle_reshape_read_error()
4574 int s = sectors; in handle_reshape_read_error()
4611 sectors -= s; in handle_reshape_read_error()
4649 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()