Lines Matching refs:mddev
101 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
103 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
150 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in r10buf_pool_alloc()
151 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in r10buf_pool_alloc()
180 &conf->mddev->recovery)) { in r10buf_pool_alloc()
258 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio()
266 struct r10conf *conf = r10_bio->mddev->private; in put_buf()
276 struct mddev *mddev = r10_bio->mddev; in reschedule_retry() local
277 struct r10conf *conf = mddev->private; in reschedule_retry()
287 md_wakeup_thread(mddev->thread); in reschedule_retry()
299 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io()
327 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos()
367 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request()
400 rdev_dec_pending(rdev, conf->mddev); in raid10_end_read_request()
408 mdname(conf->mddev), in raid10_end_read_request()
419 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, in close_write()
423 md_write_end(r10_bio->mddev); in close_write()
447 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request()
468 md_error(rdev->mddev, rdev); in raid10_end_write_request()
473 &rdev->mddev->recovery); in raid10_end_write_request()
524 rdev_dec_pending(rdev, conf->mddev); in raid10_end_write_request()
621 conf->mddev->reshape_backwards)) { in raid10_find_phys()
685 static int raid10_mergeable_bvec(struct mddev *mddev, in raid10_mergeable_bvec() argument
689 struct r10conf *conf = mddev->private; in raid10_mergeable_bvec()
699 conf->mddev->reshape_backwards)) in raid10_mergeable_bvec()
713 if (mddev->merge_check_needed) { in raid10_mergeable_bvec()
810 if (conf->mddev->recovery_cp < MaxSector in read_balance()
900 rdev_dec_pending(rdev, conf->mddev); in read_balance()
912 static int raid10_congested(struct mddev *mddev, int bits) in raid10_congested() argument
914 struct r10conf *conf = mddev->private; in raid10_congested()
951 bitmap_unplug(conf->mddev->bitmap); in flush_pending_writes()
1093 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) || in choose_data_offset()
1110 struct mddev *mddev = plug->cb.data; in raid10_unplug() local
1111 struct r10conf *conf = mddev->private; in raid10_unplug()
1120 md_wakeup_thread(mddev->thread); in raid10_unplug()
1127 bitmap_unplug(mddev->bitmap); in raid10_unplug()
1144 static void __make_request(struct mddev *mddev, struct bio *bio) in __make_request() argument
1146 struct r10conf *conf = mddev->private; in __make_request()
1172 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in __make_request()
1185 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in __make_request()
1187 (mddev->reshape_backwards in __make_request()
1193 mddev->reshape_position = conf->reshape_progress; in __make_request()
1194 set_bit(MD_CHANGE_DEVS, &mddev->flags); in __make_request()
1195 set_bit(MD_CHANGE_PENDING, &mddev->flags); in __make_request()
1196 md_wakeup_thread(mddev->thread); in __make_request()
1197 wait_event(mddev->sb_wait, in __make_request()
1198 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); in __make_request()
1200 conf->reshape_safe = mddev->reshape_position; in __make_request()
1208 r10_bio->mddev = mddev; in __make_request()
1237 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); in __make_request()
1276 r10_bio->mddev = mddev; in __make_request()
1289 md_wakeup_thread(mddev->thread); in __make_request()
1404 rdev_dec_pending(conf->mirrors[d].rdev, mddev); in __make_request()
1415 rdev_dec_pending(rdev, mddev); in __make_request()
1419 md_wait_for_blocked_rdev(blocked_rdev, mddev); in __make_request()
1440 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); in __make_request()
1447 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); in __make_request()
1463 cb = blk_check_plugged(raid10_unplug, mddev, in __make_request()
1480 md_wakeup_thread(mddev->thread); in __make_request()
1490 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); in __make_request()
1509 if (!mddev_check_plugged(mddev)) in __make_request()
1510 md_wakeup_thread(mddev->thread); in __make_request()
1528 r10_bio->mddev = mddev; in __make_request()
1536 static void make_request(struct mddev *mddev, struct bio *bio) in make_request() argument
1538 struct r10conf *conf = mddev->private; in make_request()
1545 md_flush_request(mddev, bio); in make_request()
1549 md_write_start(mddev, bio); in make_request()
1571 __make_request(mddev, split); in make_request()
1578 static void status(struct seq_file *seq, struct mddev *mddev) in status() argument
1580 struct r10conf *conf = mddev->private; in status()
1584 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); in status()
1594 conf->geo.raid_disks - mddev->degraded); in status()
1654 static void error(struct mddev *mddev, struct md_rdev *rdev) in error() argument
1657 struct r10conf *conf = mddev->private; in error()
1676 mddev->degraded++; in error()
1680 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in error()
1683 set_bit(MD_CHANGE_DEVS, &mddev->flags); in error()
1688 mdname(mddev), bdevname(rdev->bdev, b), in error()
1689 mdname(mddev), conf->geo.raid_disks - mddev->degraded); in error()
1702 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, in print_conf()
1725 static int raid10_spare_active(struct mddev *mddev) in raid10_spare_active() argument
1728 struct r10conf *conf = mddev->private; in raid10_spare_active()
1766 mddev->degraded -= count; in raid10_spare_active()
1773 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid10_add_disk() argument
1775 struct r10conf *conf = mddev->private; in raid10_add_disk()
1782 if (mddev->recovery_cp < MaxSector) in raid10_add_disk()
1795 mddev->merge_check_needed = 1; in raid10_add_disk()
1805 if (p->recovery_disabled == mddev->recovery_disabled) in raid10_add_disk()
1815 if (mddev->gendisk) in raid10_add_disk()
1816 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_add_disk()
1823 if (mddev->gendisk) in raid10_add_disk()
1824 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_add_disk()
1828 p->recovery_disabled = mddev->recovery_disabled - 1; in raid10_add_disk()
1849 md_integrity_add_rdev(rdev, mddev); in raid10_add_disk()
1850 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid10_add_disk()
1851 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); in raid10_add_disk()
1857 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid10_remove_disk() argument
1859 struct r10conf *conf = mddev->private; in raid10_remove_disk()
1882 mddev->recovery_disabled != p->recovery_disabled && in raid10_remove_disk()
1911 err = md_integrity_register(mddev); in raid10_remove_disk()
1922 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read()
1943 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); in end_sync_read()
1955 struct mddev *mddev = r10_bio->mddev; in end_sync_request() local
1966 md_done_sync(mddev, s, 1); in end_sync_request()
1984 struct mddev *mddev = r10_bio->mddev; in end_sync_write() local
1985 struct r10conf *conf = mddev->private; in end_sync_write()
2001 md_error(mddev, rdev); in end_sync_write()
2006 &rdev->mddev->recovery); in end_sync_write()
2015 rdev_dec_pending(rdev, mddev); in end_sync_write()
2036 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) in sync_request_write() argument
2038 struct r10conf *conf = mddev->private; in sync_request_write()
2085 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
2086 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) in sync_request_write()
2147 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2171 struct mddev *mddev = r10_bio->mddev; in fix_recovery_read_error() local
2172 struct r10conf *conf = mddev->private; in fix_recovery_read_error()
2209 &rdev->mddev->recovery); in fix_recovery_read_error()
2229 mdname(mddev)); in fix_recovery_read_error()
2232 = mddev->recovery_disabled; in fix_recovery_read_error()
2234 &mddev->recovery); in fix_recovery_read_error()
2246 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) in recovery_request_write() argument
2248 struct r10conf *conf = mddev->private; in recovery_request_write()
2290 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) in check_decay_read_errors() argument
2337 &rdev->mddev->recovery); in r10_sync_page_io()
2341 md_error(rdev->mddev, rdev); in r10_sync_page_io()
2353 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2358 int max_read_errors = atomic_read(&mddev->max_corr_read_errors); in fix_read_error()
2371 check_decay_read_errors(mddev, rdev); in fix_read_error()
2380 mdname(mddev), b, in fix_read_error()
2384 mdname(mddev), b); in fix_read_error()
2385 md_error(mddev, conf->mirrors[d].rdev); in fix_read_error()
2418 rdev_dec_pending(rdev, mddev); in fix_read_error()
2442 md_error(mddev, rdev); in fix_read_error()
2477 mdname(mddev), s, in fix_read_error()
2485 mdname(mddev), in fix_read_error()
2488 rdev_dec_pending(rdev, mddev); in fix_read_error()
2517 mdname(mddev), s, in fix_read_error()
2524 mdname(mddev), in fix_read_error()
2531 mdname(mddev), s, in fix_read_error()
2539 rdev_dec_pending(rdev, mddev); in fix_read_error()
2552 struct mddev *mddev = r10_bio->mddev; in narrow_write_error() local
2553 struct r10conf *conf = mddev->private; in narrow_write_error()
2587 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); in narrow_write_error()
2607 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) in handle_read_error() argument
2611 struct r10conf *conf = mddev->private; in handle_read_error()
2630 if (mddev->ro == 0) { in handle_read_error()
2632 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2637 rdev_dec_pending(rdev, mddev); in handle_read_error()
2644 mdname(mddev), b, in handle_read_error()
2656 mdname(mddev), in handle_read_error()
2660 GFP_NOIO, mddev); in handle_read_error()
2692 r10_bio->mddev = mddev; in handle_read_error()
2730 md_error(conf->mddev, rdev); in handle_write_completed()
2746 md_error(conf->mddev, rdev); in handle_write_completed()
2760 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2764 md_error(conf->mddev, rdev); in handle_write_completed()
2768 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2777 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2789 struct mddev *mddev = thread->mddev; in raid10d() local
2792 struct r10conf *conf = mddev->private; in raid10d()
2796 md_check_recovery(mddev); in raid10d()
2813 mddev = r10_bio->mddev; in raid10d()
2814 conf = mddev->private; in raid10d()
2819 reshape_request_write(mddev, r10_bio); in raid10d()
2821 sync_request_write(mddev, r10_bio); in raid10d()
2823 recovery_request_write(mddev, r10_bio); in raid10d()
2825 handle_read_error(mddev, r10_bio); in raid10d()
2835 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) in raid10d()
2836 md_check_recovery(mddev); in raid10d()
2891 static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, in sync_request() argument
2894 struct r10conf *conf = mddev->private; in sync_request()
2913 if (mddev->bitmap == NULL && in sync_request()
2914 mddev->recovery_cp == MaxSector && in sync_request()
2915 mddev->reshape_position == MaxSector && in sync_request()
2916 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in sync_request()
2917 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in sync_request()
2918 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in sync_request()
2921 return mddev->dev_sectors - sector_nr; in sync_request()
2925 max_sector = mddev->dev_sectors; in sync_request()
2926 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in sync_request()
2927 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in sync_request()
2928 max_sector = mddev->resync_max_sectors; in sync_request()
2939 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in sync_request()
2945 if (mddev->curr_resync < max_sector) { /* aborted */ in sync_request()
2946 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) in sync_request()
2947 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in sync_request()
2951 raid10_find_virt(conf, mddev->curr_resync, i); in sync_request()
2952 bitmap_end_sync(mddev->bitmap, sect, in sync_request()
2957 if ((!mddev->bitmap || conf->fullsync) in sync_request()
2959 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in sync_request()
2971 bitmap_close_sync(mddev->bitmap); in sync_request()
2977 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in sync_request()
2978 return reshape_request(mddev, sector_nr, skipped); in sync_request()
2988 if (max_sector > mddev->resync_max) in sync_request()
2989 max_sector = mddev->resync_max; /* Don't do IO beyond here */ in sync_request()
3014 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in sync_request()
3039 if (sect >= mddev->resync_max_sectors) { in sync_request()
3049 must_sync = bitmap_start_sync(mddev->bitmap, sect, in sync_request()
3071 r10_bio->mddev = mddev; in sync_request()
3087 must_sync = bitmap_start_sync(mddev->bitmap, sect, in sync_request()
3213 &mddev->recovery)) in sync_request()
3216 mdname(mddev)); in sync_request()
3218 = mddev->recovery_disabled; in sync_request()
3240 bitmap_cond_end_sync(mddev->bitmap, sector_nr); in sync_request()
3242 if (!bitmap_start_sync(mddev->bitmap, sector_nr, in sync_request()
3243 &sync_blocks, mddev->degraded) && in sync_request()
3245 &mddev->recovery)) { in sync_request()
3255 r10_bio->mddev = mddev; in sync_request()
3333 mddev); in sync_request()
3338 mddev); in sync_request()
3399 md_done_sync(mddev, sectors_skipped, 1); in sync_request()
3417 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid10_size() argument
3420 struct r10conf *conf = mddev->private; in raid10_size()
3467 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) in setup_geo() argument
3473 layout = mddev->layout; in setup_geo()
3474 chunk = mddev->chunk_sectors; in setup_geo()
3475 disks = mddev->raid_disks - mddev->delta_disks; in setup_geo()
3478 layout = mddev->new_layout; in setup_geo()
3479 chunk = mddev->new_chunk_sectors; in setup_geo()
3480 disks = mddev->raid_disks; in setup_geo()
3485 layout = mddev->new_layout; in setup_geo()
3486 chunk = mddev->new_chunk_sectors; in setup_geo()
3487 disks = mddev->raid_disks + mddev->delta_disks; in setup_geo()
3508 static struct r10conf *setup_conf(struct mddev *mddev) in setup_conf() argument
3515 copies = setup_geo(&geo, mddev, geo_new); in setup_conf()
3520 mdname(mddev), PAGE_SIZE); in setup_conf()
3524 if (copies < 2 || copies > mddev->raid_disks) { in setup_conf()
3526 mdname(mddev), mddev->new_layout); in setup_conf()
3536 conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks + in setup_conf()
3537 max(0,-mddev->delta_disks)), in setup_conf()
3553 calc_sectors(conf, mddev->dev_sectors); in setup_conf()
3554 if (mddev->reshape_position == MaxSector) { in setup_conf()
3558 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { in setup_conf()
3562 conf->reshape_progress = mddev->reshape_position; in setup_conf()
3576 conf->thread = md_register_thread(raid10d, mddev, "raid10"); in setup_conf()
3580 conf->mddev = mddev; in setup_conf()
3586 mdname(mddev)); in setup_conf()
3597 static int run(struct mddev *mddev) in run() argument
3608 if (mddev->private == NULL) { in run()
3609 conf = setup_conf(mddev); in run()
3612 mddev->private = conf; in run()
3614 conf = mddev->private; in run()
3618 mddev->thread = conf->thread; in run()
3621 chunk_size = mddev->chunk_sectors << 9; in run()
3622 if (mddev->queue) { in run()
3623 blk_queue_max_discard_sectors(mddev->queue, in run()
3624 mddev->chunk_sectors); in run()
3625 blk_queue_max_write_same_sectors(mddev->queue, 0); in run()
3626 blk_queue_io_min(mddev->queue, chunk_size); in run()
3628 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); in run()
3630 blk_queue_io_opt(mddev->queue, chunk_size * in run()
3634 rdev_for_each(rdev, mddev) { in run()
3657 mddev->merge_check_needed = 1; in run()
3659 if (!mddev->reshape_backwards) in run()
3666 if (mddev->gendisk) in run()
3667 disk_stack_limits(mddev->gendisk, rdev->bdev, in run()
3676 if (mddev->queue) { in run()
3679 mddev->queue); in run()
3682 mddev->queue); in run()
3687 mdname(mddev)); in run()
3701 mddev->degraded = 0; in run()
3719 mddev->degraded++; in run()
3724 disk->recovery_disabled = mddev->recovery_disabled - 1; in run()
3727 if (mddev->recovery_cp != MaxSector) in run()
3730 mdname(mddev)); in run()
3733 mdname(mddev), conf->geo.raid_disks - mddev->degraded, in run()
3738 mddev->dev_sectors = conf->dev_sectors; in run()
3739 size = raid10_size(mddev, 0, 0); in run()
3740 md_set_array_sectors(mddev, size); in run()
3741 mddev->resync_max_sectors = size; in run()
3743 if (mddev->queue) { in run()
3745 ((mddev->chunk_sectors << 9) / PAGE_SIZE); in run()
3752 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) in run()
3753 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; in run()
3756 if (md_integrity_register(mddev)) in run()
3774 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in run()
3775 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in run()
3776 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in run()
3777 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in run()
3778 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in run()
3785 md_unregister_thread(&mddev->thread); in run()
3791 mddev->private = NULL; in run()
3796 static void raid10_free(struct mddev *mddev, void *priv) in raid10_free() argument
3809 static void raid10_quiesce(struct mddev *mddev, int state) in raid10_quiesce() argument
3811 struct r10conf *conf = mddev->private; in raid10_quiesce()
3823 static int raid10_resize(struct mddev *mddev, sector_t sectors) in raid10_resize() argument
3837 struct r10conf *conf = mddev->private; in raid10_resize()
3840 if (mddev->reshape_position != MaxSector) in raid10_resize()
3846 oldsize = raid10_size(mddev, 0, 0); in raid10_resize()
3847 size = raid10_size(mddev, sectors, 0); in raid10_resize()
3848 if (mddev->external_size && in raid10_resize()
3849 mddev->array_sectors > size) in raid10_resize()
3851 if (mddev->bitmap) { in raid10_resize()
3852 int ret = bitmap_resize(mddev->bitmap, size, 0, 0); in raid10_resize()
3856 md_set_array_sectors(mddev, size); in raid10_resize()
3857 set_capacity(mddev->gendisk, mddev->array_sectors); in raid10_resize()
3858 revalidate_disk(mddev->gendisk); in raid10_resize()
3859 if (sectors > mddev->dev_sectors && in raid10_resize()
3860 mddev->recovery_cp > oldsize) { in raid10_resize()
3861 mddev->recovery_cp = oldsize; in raid10_resize()
3862 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid10_resize()
3865 mddev->dev_sectors = conf->dev_sectors; in raid10_resize()
3866 mddev->resync_max_sectors = size; in raid10_resize()
3870 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) in raid10_takeover_raid0() argument
3875 if (mddev->degraded > 0) { in raid10_takeover_raid0()
3877 mdname(mddev)); in raid10_takeover_raid0()
3883 mddev->new_level = 10; in raid10_takeover_raid0()
3885 mddev->new_layout = (1<<8) + 2; in raid10_takeover_raid0()
3886 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid10_takeover_raid0()
3887 mddev->delta_disks = mddev->raid_disks; in raid10_takeover_raid0()
3888 mddev->raid_disks *= 2; in raid10_takeover_raid0()
3890 mddev->recovery_cp = MaxSector; in raid10_takeover_raid0()
3891 mddev->dev_sectors = size; in raid10_takeover_raid0()
3893 conf = setup_conf(mddev); in raid10_takeover_raid0()
3895 rdev_for_each(rdev, mddev) in raid10_takeover_raid0()
3906 static void *raid10_takeover(struct mddev *mddev) in raid10_takeover() argument
3913 if (mddev->level == 0) { in raid10_takeover()
3915 raid0_conf = mddev->private; in raid10_takeover()
3919 mdname(mddev)); in raid10_takeover()
3922 return raid10_takeover_raid0(mddev, in raid10_takeover()
3929 static int raid10_check_reshape(struct mddev *mddev) in raid10_check_reshape() argument
3945 struct r10conf *conf = mddev->private; in raid10_check_reshape()
3951 if (setup_geo(&geo, mddev, geo_start) != conf->copies) in raid10_check_reshape()
3958 if (mddev->array_sectors & geo.chunk_mask) in raid10_check_reshape()
3967 if (mddev->delta_disks > 0) { in raid10_check_reshape()
3971 *(mddev->raid_disks + in raid10_check_reshape()
3972 mddev->delta_disks), in raid10_check_reshape()
4037 static int raid10_start_reshape(struct mddev *mddev) in raid10_start_reshape() argument
4053 struct r10conf *conf = mddev->private; in raid10_start_reshape()
4058 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in raid10_start_reshape()
4061 if (setup_geo(&new, mddev, geo_start) != conf->copies) in raid10_start_reshape()
4069 rdev_for_each(rdev, mddev) { in raid10_start_reshape()
4076 if (!mddev->reshape_backwards) in raid10_start_reshape()
4088 if (spares < mddev->delta_disks) in raid10_start_reshape()
4102 setup_geo(&conf->geo, mddev, geo_start); in raid10_start_reshape()
4104 if (mddev->reshape_backwards) { in raid10_start_reshape()
4105 sector_t size = raid10_size(mddev, 0, 0); in raid10_start_reshape()
4106 if (size < mddev->array_sectors) { in raid10_start_reshape()
4109 mdname(mddev)); in raid10_start_reshape()
4112 mddev->resync_max_sectors = size; in raid10_start_reshape()
4119 if (mddev->delta_disks && mddev->bitmap) { in raid10_start_reshape()
4120 ret = bitmap_resize(mddev->bitmap, in raid10_start_reshape()
4121 raid10_size(mddev, 0, in raid10_start_reshape()
4127 if (mddev->delta_disks > 0) { in raid10_start_reshape()
4128 rdev_for_each(rdev, mddev) in raid10_start_reshape()
4131 if (raid10_add_disk(mddev, rdev) == 0) { in raid10_start_reshape()
4138 if (sysfs_link_rdev(mddev, rdev)) in raid10_start_reshape()
4152 mddev->degraded = calc_degraded(conf); in raid10_start_reshape()
4154 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4155 mddev->reshape_position = conf->reshape_progress; in raid10_start_reshape()
4156 set_bit(MD_CHANGE_DEVS, &mddev->flags); in raid10_start_reshape()
4158 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid10_start_reshape()
4159 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid10_start_reshape()
4160 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in raid10_start_reshape()
4161 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid10_start_reshape()
4162 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid10_start_reshape()
4164 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid10_start_reshape()
4166 if (!mddev->sync_thread) { in raid10_start_reshape()
4171 md_wakeup_thread(mddev->sync_thread); in raid10_start_reshape()
4172 md_new_event(mddev); in raid10_start_reshape()
4176 mddev->recovery = 0; in raid10_start_reshape()
4179 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4180 rdev_for_each(rdev, mddev) in raid10_start_reshape()
4185 mddev->reshape_position = MaxSector; in raid10_start_reshape()
4221 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, in reshape_request() argument
4261 struct r10conf *conf = mddev->private; in reshape_request()
4275 if (mddev->reshape_backwards && in reshape_request()
4276 conf->reshape_progress < raid10_size(mddev, 0, 0)) { in reshape_request()
4277 sector_nr = (raid10_size(mddev, 0, 0) in reshape_request()
4279 } else if (!mddev->reshape_backwards && in reshape_request()
4283 mddev->curr_resync_completed = sector_nr; in reshape_request()
4284 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in reshape_request()
4294 if (mddev->reshape_backwards) { in reshape_request()
4344 mddev->reshape_position = conf->reshape_progress; in reshape_request()
4345 if (mddev->reshape_backwards) in reshape_request()
4346 mddev->curr_resync_completed = raid10_size(mddev, 0, 0) in reshape_request()
4349 mddev->curr_resync_completed = conf->reshape_progress; in reshape_request()
4351 set_bit(MD_CHANGE_DEVS, &mddev->flags); in reshape_request()
4352 md_wakeup_thread(mddev->thread); in reshape_request()
4353 wait_event(mddev->sb_wait, mddev->flags == 0 || in reshape_request()
4354 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
4355 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in reshape_request()
4359 conf->reshape_safe = mddev->reshape_position; in reshape_request()
4369 r10_bio->mddev = mddev; in reshape_request()
4382 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in reshape_request()
4386 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); in reshape_request()
4475 if (mddev->reshape_backwards) in reshape_request()
4484 static int handle_reshape_read_error(struct mddev *mddev,
4486 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) in reshape_request_write() argument
4493 struct r10conf *conf = mddev->private; in reshape_request_write()
4497 if (handle_reshape_read_error(mddev, r10_bio) < 0) { in reshape_request_write()
4499 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
4531 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) in end_reshape()
4536 md_finish_reshape(conf->mddev); in end_reshape()
4545 if (conf->mddev->queue) { in end_reshape()
4547 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); in end_reshape()
4549 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) in end_reshape()
4550 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; in end_reshape()
4555 static int handle_reshape_read_error(struct mddev *mddev, in handle_reshape_read_error() argument
4560 struct r10conf *conf = mddev->private; in handle_reshape_read_error()
4608 &mddev->recovery); in handle_reshape_read_error()
4621 struct mddev *mddev = r10_bio->mddev; in end_reshape_write() local
4622 struct r10conf *conf = mddev->private; in end_reshape_write()
4638 md_error(mddev, rdev); in end_reshape_write()
4641 rdev_dec_pending(rdev, mddev); in end_reshape_write()
4649 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()
4654 static void raid10_finish_reshape(struct mddev *mddev) in raid10_finish_reshape() argument
4656 struct r10conf *conf = mddev->private; in raid10_finish_reshape()
4658 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in raid10_finish_reshape()
4661 if (mddev->delta_disks > 0) { in raid10_finish_reshape()
4662 sector_t size = raid10_size(mddev, 0, 0); in raid10_finish_reshape()
4663 md_set_array_sectors(mddev, size); in raid10_finish_reshape()
4664 if (mddev->recovery_cp > mddev->resync_max_sectors) { in raid10_finish_reshape()
4665 mddev->recovery_cp = mddev->resync_max_sectors; in raid10_finish_reshape()
4666 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid10_finish_reshape()
4668 mddev->resync_max_sectors = size; in raid10_finish_reshape()
4669 set_capacity(mddev->gendisk, mddev->array_sectors); in raid10_finish_reshape()
4670 revalidate_disk(mddev->gendisk); in raid10_finish_reshape()
4674 d < conf->geo.raid_disks - mddev->delta_disks; in raid10_finish_reshape()
4684 mddev->layout = mddev->new_layout; in raid10_finish_reshape()
4685 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()
4686 mddev->reshape_position = MaxSector; in raid10_finish_reshape()
4687 mddev->delta_disks = 0; in raid10_finish_reshape()
4688 mddev->reshape_backwards = 0; in raid10_finish_reshape()