Lines Matching refs:mddev

102 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
104 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
151 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in r10buf_pool_alloc()
152 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in r10buf_pool_alloc()
181 &conf->mddev->recovery)) { in r10buf_pool_alloc()
259 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio()
267 struct r10conf *conf = r10_bio->mddev->private; in put_buf()
277 struct mddev *mddev = r10_bio->mddev; in reschedule_retry() local
278 struct r10conf *conf = mddev->private; in reschedule_retry()
288 md_wakeup_thread(mddev->thread); in reschedule_retry()
300 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io()
328 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos()
368 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request()
401 rdev_dec_pending(rdev, conf->mddev); in raid10_end_read_request()
409 mdname(conf->mddev), in raid10_end_read_request()
420 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, in close_write()
424 md_write_end(r10_bio->mddev); in close_write()
447 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request()
468 md_error(rdev->mddev, rdev); in raid10_end_write_request()
473 &rdev->mddev->recovery); in raid10_end_write_request()
524 rdev_dec_pending(rdev, conf->mddev); in raid10_end_write_request()
621 conf->mddev->reshape_backwards)) { in raid10_find_phys()
723 if (conf->mddev->recovery_cp < MaxSector in read_balance()
811 rdev_dec_pending(rdev, conf->mddev); in read_balance()
823 static int raid10_congested(struct mddev *mddev, int bits) in raid10_congested() argument
825 struct r10conf *conf = mddev->private; in raid10_congested()
862 bitmap_unplug(conf->mddev->bitmap); in flush_pending_writes()
1004 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) || in choose_data_offset()
1021 struct mddev *mddev = plug->cb.data; in raid10_unplug() local
1022 struct r10conf *conf = mddev->private; in raid10_unplug()
1031 md_wakeup_thread(mddev->thread); in raid10_unplug()
1038 bitmap_unplug(mddev->bitmap); in raid10_unplug()
1055 static void __make_request(struct mddev *mddev, struct bio *bio) in __make_request() argument
1057 struct r10conf *conf = mddev->private; in __make_request()
1083 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in __make_request()
1096 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in __make_request()
1098 (mddev->reshape_backwards in __make_request()
1104 mddev->reshape_position = conf->reshape_progress; in __make_request()
1105 set_bit(MD_CHANGE_DEVS, &mddev->flags); in __make_request()
1106 set_bit(MD_CHANGE_PENDING, &mddev->flags); in __make_request()
1107 md_wakeup_thread(mddev->thread); in __make_request()
1108 wait_event(mddev->sb_wait, in __make_request()
1109 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); in __make_request()
1111 conf->reshape_safe = mddev->reshape_position; in __make_request()
1119 r10_bio->mddev = mddev; in __make_request()
1148 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); in __make_request()
1187 r10_bio->mddev = mddev; in __make_request()
1200 md_wakeup_thread(mddev->thread); in __make_request()
1313 rdev_dec_pending(conf->mirrors[d].rdev, mddev); in __make_request()
1324 rdev_dec_pending(rdev, mddev); in __make_request()
1328 md_wait_for_blocked_rdev(blocked_rdev, mddev); in __make_request()
1349 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); in __make_request()
1356 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); in __make_request()
1372 cb = blk_check_plugged(raid10_unplug, mddev, in __make_request()
1389 md_wakeup_thread(mddev->thread); in __make_request()
1399 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); in __make_request()
1418 if (!mddev_check_plugged(mddev)) in __make_request()
1419 md_wakeup_thread(mddev->thread); in __make_request()
1437 r10_bio->mddev = mddev; in __make_request()
1445 static void make_request(struct mddev *mddev, struct bio *bio) in make_request() argument
1447 struct r10conf *conf = mddev->private; in make_request()
1454 md_flush_request(mddev, bio); in make_request()
1458 md_write_start(mddev, bio); in make_request()
1480 __make_request(mddev, split); in make_request()
1487 static void status(struct seq_file *seq, struct mddev *mddev) in status() argument
1489 struct r10conf *conf = mddev->private; in status()
1493 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); in status()
1505 conf->geo.raid_disks - mddev->degraded); in status()
1565 static void error(struct mddev *mddev, struct md_rdev *rdev) in error() argument
1568 struct r10conf *conf = mddev->private; in error()
1587 mddev->degraded++; in error()
1591 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in error()
1594 set_bit(MD_CHANGE_DEVS, &mddev->flags); in error()
1595 set_bit(MD_CHANGE_PENDING, &mddev->flags); in error()
1600 mdname(mddev), bdevname(rdev->bdev, b), in error()
1601 mdname(mddev), conf->geo.raid_disks - mddev->degraded); in error()
1614 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, in print_conf()
1637 static int raid10_spare_active(struct mddev *mddev) in raid10_spare_active() argument
1640 struct r10conf *conf = mddev->private; in raid10_spare_active()
1678 mddev->degraded -= count; in raid10_spare_active()
1685 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid10_add_disk() argument
1687 struct r10conf *conf = mddev->private; in raid10_add_disk()
1693 if (mddev->recovery_cp < MaxSector) in raid10_add_disk()
1701 if (md_integrity_add_rdev(rdev, mddev)) in raid10_add_disk()
1714 if (p->recovery_disabled == mddev->recovery_disabled) in raid10_add_disk()
1724 if (mddev->gendisk) in raid10_add_disk()
1725 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_add_disk()
1732 if (mddev->gendisk) in raid10_add_disk()
1733 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_add_disk()
1737 p->recovery_disabled = mddev->recovery_disabled - 1; in raid10_add_disk()
1745 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid10_add_disk()
1746 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); in raid10_add_disk()
1752 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid10_remove_disk() argument
1754 struct r10conf *conf = mddev->private; in raid10_remove_disk()
1777 mddev->recovery_disabled != p->recovery_disabled && in raid10_remove_disk()
1806 err = md_integrity_register(mddev); in raid10_remove_disk()
1817 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read()
1838 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); in end_sync_read()
1850 struct mddev *mddev = r10_bio->mddev; in end_sync_request() local
1861 md_done_sync(mddev, s, 1); in end_sync_request()
1878 struct mddev *mddev = r10_bio->mddev; in end_sync_write() local
1879 struct r10conf *conf = mddev->private; in end_sync_write()
1895 md_error(mddev, rdev); in end_sync_write()
1900 &rdev->mddev->recovery); in end_sync_write()
1909 rdev_dec_pending(rdev, mddev); in end_sync_write()
1930 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) in sync_request_write() argument
1932 struct r10conf *conf = mddev->private; in sync_request_write()
1981 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
1982 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) in sync_request_write()
2033 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2057 struct mddev *mddev = r10_bio->mddev; in fix_recovery_read_error() local
2058 struct r10conf *conf = mddev->private; in fix_recovery_read_error()
2095 &rdev->mddev->recovery); in fix_recovery_read_error()
2115 mdname(mddev)); in fix_recovery_read_error()
2118 = mddev->recovery_disabled; in fix_recovery_read_error()
2120 &mddev->recovery); in fix_recovery_read_error()
2132 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) in recovery_request_write() argument
2134 struct r10conf *conf = mddev->private; in recovery_request_write()
2176 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) in check_decay_read_errors() argument
2223 &rdev->mddev->recovery); in r10_sync_page_io()
2227 md_error(rdev->mddev, rdev); in r10_sync_page_io()
2239 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2244 int max_read_errors = atomic_read(&mddev->max_corr_read_errors); in fix_read_error()
2257 check_decay_read_errors(mddev, rdev); in fix_read_error()
2266 mdname(mddev), b, in fix_read_error()
2270 mdname(mddev), b); in fix_read_error()
2271 md_error(mddev, conf->mirrors[d].rdev); in fix_read_error()
2303 rdev_dec_pending(rdev, mddev); in fix_read_error()
2327 md_error(mddev, rdev); in fix_read_error()
2361 mdname(mddev), s, in fix_read_error()
2369 mdname(mddev), in fix_read_error()
2372 rdev_dec_pending(rdev, mddev); in fix_read_error()
2401 mdname(mddev), s, in fix_read_error()
2408 mdname(mddev), in fix_read_error()
2415 mdname(mddev), s, in fix_read_error()
2423 rdev_dec_pending(rdev, mddev); in fix_read_error()
2436 struct mddev *mddev = r10_bio->mddev; in narrow_write_error() local
2437 struct r10conf *conf = mddev->private; in narrow_write_error()
2471 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); in narrow_write_error()
2491 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) in handle_read_error() argument
2495 struct r10conf *conf = mddev->private; in handle_read_error()
2514 if (mddev->ro == 0) { in handle_read_error()
2516 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2521 rdev_dec_pending(rdev, mddev); in handle_read_error()
2528 mdname(mddev), b, in handle_read_error()
2540 mdname(mddev), in handle_read_error()
2544 GFP_NOIO, mddev); in handle_read_error()
2576 r10_bio->mddev = mddev; in handle_read_error()
2613 md_error(conf->mddev, rdev); in handle_write_completed()
2629 md_error(conf->mddev, rdev); in handle_write_completed()
2644 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2648 md_error(conf->mddev, rdev); in handle_write_completed()
2652 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2661 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2669 md_wakeup_thread(conf->mddev->thread); in handle_write_completed()
2681 struct mddev *mddev = thread->mddev; in raid10d() local
2684 struct r10conf *conf = mddev->private; in raid10d()
2688 md_check_recovery(mddev); in raid10d()
2691 !test_bit(MD_CHANGE_PENDING, &mddev->flags)) { in raid10d()
2694 if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { in raid10d()
2705 if (mddev->degraded) in raid10d()
2730 mddev = r10_bio->mddev; in raid10d()
2731 conf = mddev->private; in raid10d()
2736 reshape_request_write(mddev, r10_bio); in raid10d()
2738 sync_request_write(mddev, r10_bio); in raid10d()
2740 recovery_request_write(mddev, r10_bio); in raid10d()
2742 handle_read_error(mddev, r10_bio); in raid10d()
2752 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) in raid10d()
2753 md_check_recovery(mddev); in raid10d()
2808 static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, in sync_request() argument
2811 struct r10conf *conf = mddev->private; in sync_request()
2830 if (mddev->bitmap == NULL && in sync_request()
2831 mddev->recovery_cp == MaxSector && in sync_request()
2832 mddev->reshape_position == MaxSector && in sync_request()
2833 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in sync_request()
2834 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in sync_request()
2835 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in sync_request()
2838 return mddev->dev_sectors - sector_nr; in sync_request()
2842 max_sector = mddev->dev_sectors; in sync_request()
2843 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in sync_request()
2844 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in sync_request()
2845 max_sector = mddev->resync_max_sectors; in sync_request()
2856 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in sync_request()
2862 if (mddev->curr_resync < max_sector) { /* aborted */ in sync_request()
2863 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) in sync_request()
2864 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in sync_request()
2868 raid10_find_virt(conf, mddev->curr_resync, i); in sync_request()
2869 bitmap_end_sync(mddev->bitmap, sect, in sync_request()
2874 if ((!mddev->bitmap || conf->fullsync) in sync_request()
2876 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in sync_request()
2888 bitmap_close_sync(mddev->bitmap); in sync_request()
2894 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in sync_request()
2895 return reshape_request(mddev, sector_nr, skipped); in sync_request()
2905 if (max_sector > mddev->resync_max) in sync_request()
2906 max_sector = mddev->resync_max; /* Don't do IO beyond here */ in sync_request()
2931 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in sync_request()
2956 if (sect >= mddev->resync_max_sectors) { in sync_request()
2966 must_sync = bitmap_start_sync(mddev->bitmap, sect, in sync_request()
2988 r10_bio->mddev = mddev; in sync_request()
3004 must_sync = bitmap_start_sync(mddev->bitmap, sect, in sync_request()
3130 &mddev->recovery)) in sync_request()
3133 mdname(mddev)); in sync_request()
3135 = mddev->recovery_disabled; in sync_request()
3157 bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0); in sync_request()
3159 if (!bitmap_start_sync(mddev->bitmap, sector_nr, in sync_request()
3160 &sync_blocks, mddev->degraded) && in sync_request()
3162 &mddev->recovery)) { in sync_request()
3172 r10_bio->mddev = mddev; in sync_request()
3250 mddev); in sync_request()
3255 mddev); in sync_request()
3316 md_done_sync(mddev, sectors_skipped, 1); in sync_request()
3334 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid10_size() argument
3337 struct r10conf *conf = mddev->private; in raid10_size()
3384 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) in setup_geo() argument
3390 layout = mddev->layout; in setup_geo()
3391 chunk = mddev->chunk_sectors; in setup_geo()
3392 disks = mddev->raid_disks - mddev->delta_disks; in setup_geo()
3395 layout = mddev->new_layout; in setup_geo()
3396 chunk = mddev->new_chunk_sectors; in setup_geo()
3397 disks = mddev->raid_disks; in setup_geo()
3402 layout = mddev->new_layout; in setup_geo()
3403 chunk = mddev->new_chunk_sectors; in setup_geo()
3404 disks = mddev->raid_disks + mddev->delta_disks; in setup_geo()
3440 static struct r10conf *setup_conf(struct mddev *mddev) in setup_conf() argument
3447 copies = setup_geo(&geo, mddev, geo_new); in setup_conf()
3452 mdname(mddev), PAGE_SIZE); in setup_conf()
3456 if (copies < 2 || copies > mddev->raid_disks) { in setup_conf()
3458 mdname(mddev), mddev->new_layout); in setup_conf()
3468 conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks + in setup_conf()
3469 max(0,-mddev->delta_disks)), in setup_conf()
3485 calc_sectors(conf, mddev->dev_sectors); in setup_conf()
3486 if (mddev->reshape_position == MaxSector) { in setup_conf()
3490 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { in setup_conf()
3494 conf->reshape_progress = mddev->reshape_position; in setup_conf()
3509 conf->thread = md_register_thread(raid10d, mddev, "raid10"); in setup_conf()
3513 conf->mddev = mddev; in setup_conf()
3519 mdname(mddev)); in setup_conf()
3529 static int run(struct mddev *mddev) in run() argument
3540 if (mddev->private == NULL) { in run()
3541 conf = setup_conf(mddev); in run()
3544 mddev->private = conf; in run()
3546 conf = mddev->private; in run()
3550 mddev->thread = conf->thread; in run()
3553 chunk_size = mddev->chunk_sectors << 9; in run()
3554 if (mddev->queue) { in run()
3555 blk_queue_max_discard_sectors(mddev->queue, in run()
3556 mddev->chunk_sectors); in run()
3557 blk_queue_max_write_same_sectors(mddev->queue, 0); in run()
3558 blk_queue_io_min(mddev->queue, chunk_size); in run()
3560 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); in run()
3562 blk_queue_io_opt(mddev->queue, chunk_size * in run()
3566 rdev_for_each(rdev, mddev) { in run()
3589 if (!mddev->reshape_backwards) in run()
3596 if (mddev->gendisk) in run()
3597 disk_stack_limits(mddev->gendisk, rdev->bdev, in run()
3606 if (mddev->queue) { in run()
3609 mddev->queue); in run()
3612 mddev->queue); in run()
3617 mdname(mddev)); in run()
3631 mddev->degraded = 0; in run()
3649 mddev->degraded++; in run()
3654 disk->recovery_disabled = mddev->recovery_disabled - 1; in run()
3657 if (mddev->recovery_cp != MaxSector) in run()
3660 mdname(mddev)); in run()
3663 mdname(mddev), conf->geo.raid_disks - mddev->degraded, in run()
3668 mddev->dev_sectors = conf->dev_sectors; in run()
3669 size = raid10_size(mddev, 0, 0); in run()
3670 md_set_array_sectors(mddev, size); in run()
3671 mddev->resync_max_sectors = size; in run()
3673 if (mddev->queue) { in run()
3675 ((mddev->chunk_sectors << 9) / PAGE_SIZE); in run()
3682 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) in run()
3683 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; in run()
3686 if (md_integrity_register(mddev)) in run()
3704 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in run()
3705 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in run()
3706 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in run()
3707 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in run()
3708 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in run()
3715 md_unregister_thread(&mddev->thread); in run()
3720 mddev->private = NULL; in run()
3725 static void raid10_free(struct mddev *mddev, void *priv) in raid10_free() argument
3737 static void raid10_quiesce(struct mddev *mddev, int state) in raid10_quiesce() argument
3739 struct r10conf *conf = mddev->private; in raid10_quiesce()
3751 static int raid10_resize(struct mddev *mddev, sector_t sectors) in raid10_resize() argument
3765 struct r10conf *conf = mddev->private; in raid10_resize()
3768 if (mddev->reshape_position != MaxSector) in raid10_resize()
3774 oldsize = raid10_size(mddev, 0, 0); in raid10_resize()
3775 size = raid10_size(mddev, sectors, 0); in raid10_resize()
3776 if (mddev->external_size && in raid10_resize()
3777 mddev->array_sectors > size) in raid10_resize()
3779 if (mddev->bitmap) { in raid10_resize()
3780 int ret = bitmap_resize(mddev->bitmap, size, 0, 0); in raid10_resize()
3784 md_set_array_sectors(mddev, size); in raid10_resize()
3785 set_capacity(mddev->gendisk, mddev->array_sectors); in raid10_resize()
3786 revalidate_disk(mddev->gendisk); in raid10_resize()
3787 if (sectors > mddev->dev_sectors && in raid10_resize()
3788 mddev->recovery_cp > oldsize) { in raid10_resize()
3789 mddev->recovery_cp = oldsize; in raid10_resize()
3790 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid10_resize()
3793 mddev->dev_sectors = conf->dev_sectors; in raid10_resize()
3794 mddev->resync_max_sectors = size; in raid10_resize()
3798 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) in raid10_takeover_raid0() argument
3803 if (mddev->degraded > 0) { in raid10_takeover_raid0()
3805 mdname(mddev)); in raid10_takeover_raid0()
3811 mddev->new_level = 10; in raid10_takeover_raid0()
3813 mddev->new_layout = (1<<8) + 2; in raid10_takeover_raid0()
3814 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid10_takeover_raid0()
3815 mddev->delta_disks = mddev->raid_disks; in raid10_takeover_raid0()
3816 mddev->raid_disks *= 2; in raid10_takeover_raid0()
3818 mddev->recovery_cp = MaxSector; in raid10_takeover_raid0()
3819 mddev->dev_sectors = size; in raid10_takeover_raid0()
3821 conf = setup_conf(mddev); in raid10_takeover_raid0()
3823 rdev_for_each(rdev, mddev) in raid10_takeover_raid0()
3834 static void *raid10_takeover(struct mddev *mddev) in raid10_takeover() argument
3841 if (mddev->level == 0) { in raid10_takeover()
3843 raid0_conf = mddev->private; in raid10_takeover()
3847 mdname(mddev)); in raid10_takeover()
3850 return raid10_takeover_raid0(mddev, in raid10_takeover()
3857 static int raid10_check_reshape(struct mddev *mddev) in raid10_check_reshape() argument
3873 struct r10conf *conf = mddev->private; in raid10_check_reshape()
3879 if (setup_geo(&geo, mddev, geo_start) != conf->copies) in raid10_check_reshape()
3886 if (mddev->array_sectors & geo.chunk_mask) in raid10_check_reshape()
3895 if (mddev->delta_disks > 0) { in raid10_check_reshape()
3899 *(mddev->raid_disks + in raid10_check_reshape()
3900 mddev->delta_disks), in raid10_check_reshape()
3965 static int raid10_start_reshape(struct mddev *mddev) in raid10_start_reshape() argument
3981 struct r10conf *conf = mddev->private; in raid10_start_reshape()
3986 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in raid10_start_reshape()
3989 if (setup_geo(&new, mddev, geo_start) != conf->copies) in raid10_start_reshape()
3997 rdev_for_each(rdev, mddev) { in raid10_start_reshape()
4004 if (!mddev->reshape_backwards) in raid10_start_reshape()
4016 if (spares < mddev->delta_disks) in raid10_start_reshape()
4030 setup_geo(&conf->geo, mddev, geo_start); in raid10_start_reshape()
4032 if (mddev->reshape_backwards) { in raid10_start_reshape()
4033 sector_t size = raid10_size(mddev, 0, 0); in raid10_start_reshape()
4034 if (size < mddev->array_sectors) { in raid10_start_reshape()
4037 mdname(mddev)); in raid10_start_reshape()
4040 mddev->resync_max_sectors = size; in raid10_start_reshape()
4047 if (mddev->delta_disks && mddev->bitmap) { in raid10_start_reshape()
4048 ret = bitmap_resize(mddev->bitmap, in raid10_start_reshape()
4049 raid10_size(mddev, 0, in raid10_start_reshape()
4055 if (mddev->delta_disks > 0) { in raid10_start_reshape()
4056 rdev_for_each(rdev, mddev) in raid10_start_reshape()
4059 if (raid10_add_disk(mddev, rdev) == 0) { in raid10_start_reshape()
4066 if (sysfs_link_rdev(mddev, rdev)) in raid10_start_reshape()
4080 mddev->degraded = calc_degraded(conf); in raid10_start_reshape()
4082 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4083 mddev->reshape_position = conf->reshape_progress; in raid10_start_reshape()
4084 set_bit(MD_CHANGE_DEVS, &mddev->flags); in raid10_start_reshape()
4086 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid10_start_reshape()
4087 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid10_start_reshape()
4088 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in raid10_start_reshape()
4089 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid10_start_reshape()
4090 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid10_start_reshape()
4092 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid10_start_reshape()
4094 if (!mddev->sync_thread) { in raid10_start_reshape()
4099 md_wakeup_thread(mddev->sync_thread); in raid10_start_reshape()
4100 md_new_event(mddev); in raid10_start_reshape()
4104 mddev->recovery = 0; in raid10_start_reshape()
4107 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4108 rdev_for_each(rdev, mddev) in raid10_start_reshape()
4113 mddev->reshape_position = MaxSector; in raid10_start_reshape()
4149 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, in reshape_request() argument
4189 struct r10conf *conf = mddev->private; in reshape_request()
4203 if (mddev->reshape_backwards && in reshape_request()
4204 conf->reshape_progress < raid10_size(mddev, 0, 0)) { in reshape_request()
4205 sector_nr = (raid10_size(mddev, 0, 0) in reshape_request()
4207 } else if (!mddev->reshape_backwards && in reshape_request()
4211 mddev->curr_resync_completed = sector_nr; in reshape_request()
4212 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in reshape_request()
4222 if (mddev->reshape_backwards) { in reshape_request()
4272 mddev->reshape_position = conf->reshape_progress; in reshape_request()
4273 if (mddev->reshape_backwards) in reshape_request()
4274 mddev->curr_resync_completed = raid10_size(mddev, 0, 0) in reshape_request()
4277 mddev->curr_resync_completed = conf->reshape_progress; in reshape_request()
4279 set_bit(MD_CHANGE_DEVS, &mddev->flags); in reshape_request()
4280 md_wakeup_thread(mddev->thread); in reshape_request()
4281 wait_event(mddev->sb_wait, mddev->flags == 0 || in reshape_request()
4282 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
4283 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in reshape_request()
4287 conf->reshape_safe = mddev->reshape_position; in reshape_request()
4297 r10_bio->mddev = mddev; in reshape_request()
4310 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in reshape_request()
4314 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); in reshape_request()
4403 if (mddev->reshape_backwards) in reshape_request()
4412 static int handle_reshape_read_error(struct mddev *mddev,
4414 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) in reshape_request_write() argument
4421 struct r10conf *conf = mddev->private; in reshape_request_write()
4425 if (handle_reshape_read_error(mddev, r10_bio) < 0) { in reshape_request_write()
4427 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
4459 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) in end_reshape()
4464 md_finish_reshape(conf->mddev); in end_reshape()
4473 if (conf->mddev->queue) { in end_reshape()
4475 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); in end_reshape()
4477 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) in end_reshape()
4478 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; in end_reshape()
4483 static int handle_reshape_read_error(struct mddev *mddev, in handle_reshape_read_error() argument
4488 struct r10conf *conf = mddev->private; in handle_reshape_read_error()
4536 &mddev->recovery); in handle_reshape_read_error()
4548 struct mddev *mddev = r10_bio->mddev; in end_reshape_write() local
4549 struct r10conf *conf = mddev->private; in end_reshape_write()
4565 md_error(mddev, rdev); in end_reshape_write()
4568 rdev_dec_pending(rdev, mddev); in end_reshape_write()
4576 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()
4581 static void raid10_finish_reshape(struct mddev *mddev) in raid10_finish_reshape() argument
4583 struct r10conf *conf = mddev->private; in raid10_finish_reshape()
4585 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in raid10_finish_reshape()
4588 if (mddev->delta_disks > 0) { in raid10_finish_reshape()
4589 sector_t size = raid10_size(mddev, 0, 0); in raid10_finish_reshape()
4590 md_set_array_sectors(mddev, size); in raid10_finish_reshape()
4591 if (mddev->recovery_cp > mddev->resync_max_sectors) { in raid10_finish_reshape()
4592 mddev->recovery_cp = mddev->resync_max_sectors; in raid10_finish_reshape()
4593 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid10_finish_reshape()
4595 mddev->resync_max_sectors = size; in raid10_finish_reshape()
4596 set_capacity(mddev->gendisk, mddev->array_sectors); in raid10_finish_reshape()
4597 revalidate_disk(mddev->gendisk); in raid10_finish_reshape()
4601 d < conf->geo.raid_disks - mddev->delta_disks; in raid10_finish_reshape()
4611 mddev->layout = mddev->new_layout; in raid10_finish_reshape()
4612 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()
4613 mddev->reshape_position = MaxSector; in raid10_finish_reshape()
4614 mddev->delta_disks = 0; in raid10_finish_reshape()
4615 mddev->reshape_backwards = 0; in raid10_finish_reshape()