sector_nr         570 drivers/md/md.h 	sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
sector_nr          45 drivers/md/raid1.c static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
sector_nr          46 drivers/md/raid1.c static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
sector_nr         881 drivers/md/raid1.c static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
sector_nr         883 drivers/md/raid1.c 	int idx = sector_to_idx(sector_nr);
sector_nr         931 drivers/md/raid1.c static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
sector_nr         933 drivers/md/raid1.c 	int idx = sector_to_idx(sector_nr);
sector_nr        1001 drivers/md/raid1.c static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
sector_nr        1003 drivers/md/raid1.c 	int idx = sector_to_idx(sector_nr);
sector_nr        1034 drivers/md/raid1.c static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
sector_nr        1036 drivers/md/raid1.c 	int idx = sector_to_idx(sector_nr);
sector_nr        1047 drivers/md/raid1.c static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
sector_nr        1049 drivers/md/raid1.c 	int idx = sector_to_idx(sector_nr);
sector_nr        2631 drivers/md/raid1.c static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_nr        2646 drivers/md/raid1.c 	int idx = sector_to_idx(sector_nr);
sector_nr        2654 drivers/md/raid1.c 	if (sector_nr >= max_sector) {
sector_nr        2681 drivers/md/raid1.c 		return max_sector - sector_nr;
sector_nr        2686 drivers/md/raid1.c 	if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
sector_nr        2704 drivers/md/raid1.c 	md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
sector_nr        2705 drivers/md/raid1.c 		mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
sector_nr        2708 drivers/md/raid1.c 	if (raise_barrier(conf, sector_nr))
sector_nr        2724 drivers/md/raid1.c 	r1_bio->sector = sector_nr;
sector_nr        2728 drivers/md/raid1.c 	good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
sector_nr        2748 drivers/md/raid1.c 			if (is_badblock(rdev, sector_nr, good_sectors,
sector_nr        2750 drivers/md/raid1.c 				if (first_bad > sector_nr)
sector_nr        2751 drivers/md/raid1.c 					good_sectors = first_bad - sector_nr;
sector_nr        2753 drivers/md/raid1.c 					bad_sectors -= (sector_nr - first_bad);
sector_nr        2759 drivers/md/raid1.c 			if (sector_nr < first_bad) {
sector_nr        2786 drivers/md/raid1.c 			bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
sector_nr        2805 drivers/md/raid1.c 				ok = rdev_set_badblocks(rdev, sector_nr,
sector_nr        2842 drivers/md/raid1.c 			max_sector = sector_nr + min_bad;
sector_nr        2843 drivers/md/raid1.c 		rv = max_sector - sector_nr;
sector_nr        2851 drivers/md/raid1.c 	if (max_sector > sector_nr + good_sectors)
sector_nr        2852 drivers/md/raid1.c 		max_sector = sector_nr + good_sectors;
sector_nr        2858 drivers/md/raid1.c 		if (sector_nr + (len>>9) > max_sector)
sector_nr        2859 drivers/md/raid1.c 			len = (max_sector - sector_nr) << 9;
sector_nr        2863 drivers/md/raid1.c 			if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
sector_nr        2888 drivers/md/raid1.c 		sector_nr += len>>9;
sector_nr        2895 drivers/md/raid1.c 			conf->cluster_sync_high < sector_nr + nr_sectors) {
sector_nr          71 drivers/md/raid10.c static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
sector_nr        2901 drivers/md/raid10.c static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_nr        2932 drivers/md/raid10.c 		return mddev->dev_sectors - sector_nr;
sector_nr        2940 drivers/md/raid10.c 	if (sector_nr >= max_sector) {
sector_nr        2995 drivers/md/raid10.c 		return reshape_request(mddev, sector_nr, skipped);
sector_nr        3002 drivers/md/raid10.c 		return (max_sector - sector_nr) + sectors_skipped;
sector_nr        3012 drivers/md/raid10.c 	    max_sector > (sector_nr | chunk_mask))
sector_nr        3013 drivers/md/raid10.c 		max_sector = (sector_nr | chunk_mask) + 1;
sector_nr        3074 drivers/md/raid10.c 			sect = raid10_find_virt(conf, sector_nr, i);
sector_nr        3306 drivers/md/raid10.c 		md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
sector_nr        3308 drivers/md/raid10.c 					(sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
sector_nr        3310 drivers/md/raid10.c 		if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
sector_nr        3326 drivers/md/raid10.c 		conf->next_resync = sector_nr;
sector_nr        3329 drivers/md/raid10.c 		r10_bio->sector = sector_nr;
sector_nr        3332 drivers/md/raid10.c 		r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
sector_nr        3419 drivers/md/raid10.c 	if (sector_nr + max_sync < max_sector)
sector_nr        3420 drivers/md/raid10.c 		max_sector = sector_nr + max_sync;
sector_nr        3424 drivers/md/raid10.c 		if (sector_nr + (len>>9) > max_sector)
sector_nr        3425 drivers/md/raid10.c 			len = (max_sector - sector_nr) << 9;
sector_nr        3438 drivers/md/raid10.c 		sector_nr += len>>9;
sector_nr        3445 drivers/md/raid10.c 		if (conf->cluster_sync_high < sector_nr + nr_sectors) {
sector_nr        3464 drivers/md/raid10.c 			sect_va1 = raid10_find_virt(conf, sector_nr, i);
sector_nr        3515 drivers/md/raid10.c 	if (sector_nr + max_sync < max_sector)
sector_nr        3516 drivers/md/raid10.c 		max_sector = sector_nr + max_sync;
sector_nr        3518 drivers/md/raid10.c 	sectors_skipped += (max_sector - sector_nr);
sector_nr        3520 drivers/md/raid10.c 	sector_nr = max_sector;
sector_nr        4393 drivers/md/raid10.c static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
sector_nr        4446 drivers/md/raid10.c 	if (sector_nr == 0) {
sector_nr        4450 drivers/md/raid10.c 			sector_nr = (raid10_size(mddev, 0, 0)
sector_nr        4454 drivers/md/raid10.c 			sector_nr = conf->reshape_progress;
sector_nr        4455 drivers/md/raid10.c 		if (sector_nr) {
sector_nr        4456 drivers/md/raid10.c 			mddev->curr_resync_completed = sector_nr;
sector_nr        4459 drivers/md/raid10.c 			return sector_nr;
sector_nr        4484 drivers/md/raid10.c 		sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
sector_nr        4486 drivers/md/raid10.c 		if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
sector_nr        4487 drivers/md/raid10.c 			sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
sector_nr        4505 drivers/md/raid10.c 		sector_nr = conf->reshape_progress;
sector_nr        4506 drivers/md/raid10.c 		last  = sector_nr | (conf->geo.chunk_mask
sector_nr        4509 drivers/md/raid10.c 		if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
sector_nr        4510 drivers/md/raid10.c 			last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
sector_nr        4544 drivers/md/raid10.c 	r10_bio->sector = sector_nr;
sector_nr        4546 drivers/md/raid10.c 	r10_bio->sectors = last - sector_nr + 1;
sector_nr        4579 drivers/md/raid10.c 	if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
sector_nr        4583 drivers/md/raid10.c 		conf->cluster_sync_low = sector_nr;
sector_nr        4584 drivers/md/raid10.c 		conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
sector_nr        4647 drivers/md/raid10.c 		sector_nr += len >> 9;
sector_nr        4659 drivers/md/raid10.c 	if (sector_nr <= last)
sector_nr        5753 drivers/md/raid5.c static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
sector_nr        5779 drivers/md/raid5.c 	if (sector_nr == 0) {
sector_nr        5783 drivers/md/raid5.c 			sector_nr = raid5_size(mddev, 0, 0)
sector_nr        5788 drivers/md/raid5.c 			sector_nr = MaxSector;
sector_nr        5791 drivers/md/raid5.c 			sector_nr = conf->reshape_progress;
sector_nr        5792 drivers/md/raid5.c 		sector_div(sector_nr, new_data_disks);
sector_nr        5793 drivers/md/raid5.c 		if (sector_nr) {
sector_nr        5794 drivers/md/raid5.c 			mddev->curr_resync_completed = sector_nr;
sector_nr        5797 drivers/md/raid5.c 			retn = sector_nr;
sector_nr        5845 drivers/md/raid5.c 		       != sector_nr);
sector_nr        5847 drivers/md/raid5.c 		BUG_ON(writepos != sector_nr + reshape_sectors);
sector_nr        5848 drivers/md/raid5.c 		stripe_addr = sector_nr;
sector_nr        5888 drivers/md/raid5.c 		mddev->curr_resync_completed = sector_nr;
sector_nr        5895 drivers/md/raid5.c 				    rdev->recovery_offset < sector_nr)
sector_nr        5896 drivers/md/raid5.c 					rdev->recovery_offset = sector_nr;
sector_nr        5982 drivers/md/raid5.c 	sector_nr += reshape_sectors;
sector_nr        5986 drivers/md/raid5.c 	    (sector_nr - mddev->curr_resync_completed) * 2
sector_nr        5995 drivers/md/raid5.c 		mddev->curr_resync_completed = sector_nr;
sector_nr        6002 drivers/md/raid5.c 				    rdev->recovery_offset < sector_nr)
sector_nr        6003 drivers/md/raid5.c 					rdev->recovery_offset = sector_nr;
sector_nr        6022 drivers/md/raid5.c static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_nr        6032 drivers/md/raid5.c 	if (sector_nr >= max_sector) {
sector_nr        6054 drivers/md/raid5.c 		return reshape_request(mddev, sector_nr, skipped);
sector_nr        6068 drivers/md/raid5.c 		sector_t rv = mddev->dev_sectors - sector_nr;
sector_nr        6074 drivers/md/raid5.c 	    !md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
sector_nr        6082 drivers/md/raid5.c 	md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
sector_nr        6084 drivers/md/raid5.c 	sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0);
sector_nr        6086 drivers/md/raid5.c 		sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0);
sector_nr        6105 drivers/md/raid5.c 	md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);