Lines Matching refs:mddev

267 		md_wakeup_thread(conf->mddev->thread);  in raid5_wakeup_stripe_thread()
311 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
317 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
376 md_wakeup_thread(conf->mddev->thread); in release_inactive_stripe_list()
424 if (unlikely(!conf->mddev->thread) || in raid5_release_stripe()
429 md_wakeup_thread(conf->mddev->thread); in raid5_release_stripe()
647 if (conf->mddev->reshape_position == MaxSector) in has_failed()
648 return conf->mddev->degraded > conf->max_degraded; in has_failed()
841 md_wakeup_thread(conf->mddev->thread); in stripe_add_to_batch_list()
969 if (!conf->mddev->external && in ops_run_io()
970 conf->mddev->flags) { in ops_run_io()
975 md_check_recovery(conf->mddev); in ops_run_io()
983 md_wait_for_blocked_rdev(rdev, conf->mddev); in ops_run_io()
986 rdev_dec_pending(rdev, conf->mddev); in ops_run_io()
1037 if (conf->mddev->gendisk) in ops_run_io()
1039 bi, disk_devt(conf->mddev->gendisk), in ops_run_io()
1083 if (conf->mddev->gendisk) in ops_run_io()
1085 rbi, disk_devt(conf->mddev->gendisk), in ops_run_io()
2027 if (conf->mddev->gendisk) in grow_stripes()
2029 "raid%d-%s", conf->level, mdname(conf->mddev)); in grow_stripes()
2032 "raid%d-%p", conf->level, conf->mddev); in grow_stripes()
2093 mddev_suspend(conf->mddev); in resize_chunks()
2113 mddev_resume(conf->mddev); in resize_chunks()
2157 err = md_allow_write(conf->mddev); in resize_stripes()
2332 mdname(conf->mddev), STRIPE_SECTORS, in raid5_end_read_request()
2355 mdname(conf->mddev), in raid5_end_read_request()
2358 else if (conf->mddev->degraded >= conf->max_degraded) { in raid5_end_read_request()
2364 mdname(conf->mddev), in raid5_end_read_request()
2374 mdname(conf->mddev), in raid5_end_read_request()
2381 mdname(conf->mddev), bdn); in raid5_end_read_request()
2400 md_error(conf->mddev, rdev); in raid5_end_read_request()
2403 rdev_dec_pending(rdev, conf->mddev); in raid5_end_read_request()
2447 md_error(conf->mddev, rdev); in raid5_end_write_request()
2459 &rdev->mddev->recovery); in raid5_end_write_request()
2472 rdev_dec_pending(rdev, conf->mddev); in raid5_end_write_request()
2504 static void error(struct mddev *mddev, struct md_rdev *rdev) in error() argument
2507 struct r5conf *conf = mddev->private; in error()
2513 mddev->degraded = calc_degraded(conf); in error()
2515 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in error()
2519 set_bit(MD_CHANGE_DEVS, &mddev->flags); in error()
2520 set_bit(MD_CHANGE_PENDING, &mddev->flags); in error()
2524 mdname(mddev), in error()
2526 mdname(mddev), in error()
2527 conf->raid_disks - mddev->degraded); in error()
2850 mdname(conf->mddev)); in raid5_compute_blocknr()
3016 if (conf->mddev->bitmap && firstwrite) { in add_stripe_bio()
3031 bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
3095 md_error(conf->mddev, rdev); in handle_failed_stripe()
3096 rdev_dec_pending(rdev, conf->mddev); in handle_failed_stripe()
3119 md_write_end(conf->mddev); in handle_failed_stripe()
3125 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3143 md_write_end(conf->mddev); in handle_failed_stripe()
3176 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3188 md_wakeup_thread(conf->mddev->thread); in handle_failed_stripe()
3211 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { in handle_failed_sync()
3233 conf->mddev->recovery_disabled; in handle_failed_sync()
3235 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); in handle_failed_sync()
3248 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
3337 sh->sector < sh->raid_conf->mddev->recovery_cp) in need_this_block()
3485 md_write_end(conf->mddev); in handle_stripe_clean_event()
3490 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
3549 md_wakeup_thread(conf->mddev->thread); in handle_stripe_clean_event()
3561 sector_t recovery_cp = conf->mddev->recovery_cp; in handle_stripe_dirtying()
3609 if (conf->mddev->queue) in handle_stripe_dirtying()
3610 blk_add_trace_msg(conf->mddev->queue, in handle_stripe_dirtying()
3661 if (rcw && conf->mddev->queue) in handle_stripe_dirtying()
3662 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", in handle_stripe_dirtying()
3749 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); in handle_parity_checks5()
3750 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) in handle_parity_checks5()
3901 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); in handle_parity_checks6()
3902 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) in handle_parity_checks6()
4185 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4186 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) in analyse_stripe()
4352 rdev_dec_pending(s.blocked_rdev, conf->mddev); in handle_stripe()
4493 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe()
4502 if (s.failed <= conf->max_degraded && !conf->mddev->ro) in handle_stripe()
4561 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe()
4571 if (conf->mddev->external) in handle_stripe()
4573 conf->mddev); in handle_stripe()
4580 conf->mddev); in handle_stripe()
4592 md_error(conf->mddev, rdev); in handle_stripe()
4593 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
4599 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
4608 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
4625 md_wakeup_thread(conf->mddev->thread); in handle_stripe()
4629 if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) { in handle_stripe()
4633 md_wakeup_thread(conf->mddev->thread); in handle_stripe()
4675 static int raid5_congested(struct mddev *mddev, int bits) in raid5_congested() argument
4677 struct r5conf *conf = mddev->private; in raid5_congested()
4693 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) in in_chunk_boundary() argument
4695 struct r5conf *conf = mddev->private; in in_chunk_boundary()
4719 md_wakeup_thread(conf->mddev->thread); in add_bio_to_retry()
4754 struct mddev *mddev; in raid5_align_endio() local
4763 mddev = rdev->mddev; in raid5_align_endio()
4764 conf = mddev->private; in raid5_align_endio()
4766 rdev_dec_pending(rdev, conf->mddev); in raid5_align_endio()
4782 static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) in raid5_read_one_chunk() argument
4784 struct r5conf *conf = mddev->private; in raid5_read_one_chunk()
4790 if (!in_chunk_boundary(mddev, raid_bio)) { in raid5_read_one_chunk()
4797 align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev); in raid5_read_one_chunk()
4839 rdev_dec_pending(rdev, mddev); in raid5_read_one_chunk()
4853 if (mddev->gendisk) in raid5_read_one_chunk()
4855 align_bi, disk_devt(mddev->gendisk), in raid5_read_one_chunk()
4866 static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) in chunk_aligned_read() argument
4872 unsigned chunk_sects = mddev->chunk_sectors; in chunk_aligned_read()
4881 if (!raid5_read_one_chunk(mddev, split)) { in chunk_aligned_read()
4989 struct mddev *mddev = cb->cb.data; in raid5_unplug() local
4990 struct r5conf *conf = mddev->private; in raid5_unplug()
5018 if (mddev->queue) in raid5_unplug()
5019 trace_block_unplug(mddev->queue, cnt, !from_schedule); in raid5_unplug()
5023 static void release_stripe_plug(struct mddev *mddev, in release_stripe_plug() argument
5027 raid5_unplug, mddev, in release_stripe_plug()
5051 static void make_discard_request(struct mddev *mddev, struct bio *bi) in make_discard_request() argument
5053 struct r5conf *conf = mddev->private; in make_discard_request()
5059 if (mddev->reshape_position != MaxSector) in make_discard_request()
5117 if (conf->mddev->bitmap) { in make_discard_request()
5121 bitmap_startwrite(mddev->bitmap, in make_discard_request()
5133 release_stripe_plug(mddev, sh); in make_discard_request()
5138 md_write_end(mddev); in make_discard_request()
5143 static void make_request(struct mddev *mddev, struct bio * bi) in make_request() argument
5145 struct r5conf *conf = mddev->private; in make_request()
5161 md_flush_request(mddev, bi); in make_request()
5167 md_write_start(mddev, bi); in make_request()
5174 if (rw == READ && mddev->degraded == 0 && in make_request()
5175 mddev->reshape_position == MaxSector) { in make_request()
5176 bi = chunk_aligned_read(mddev, bi); in make_request()
5182 make_discard_request(mddev, bi); in make_request()
5213 if (mddev->reshape_backwards in make_request()
5218 if (mddev->reshape_backwards in make_request()
5251 if (mddev->reshape_backwards in make_request()
5273 logical_sector >= mddev->suspend_lo && in make_request()
5274 logical_sector < mddev->suspend_hi) { in make_request()
5283 if (logical_sector >= mddev->suspend_lo && in make_request()
5284 logical_sector < mddev->suspend_hi) { in make_request()
5297 md_wakeup_thread(mddev->thread); in make_request()
5309 release_stripe_plug(mddev, sh); in make_request()
5322 md_write_end(mddev); in make_request()
5330 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
5332 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) in reshape_request() argument
5343 struct r5conf *conf = mddev->private; in reshape_request()
5359 if (mddev->reshape_backwards && in reshape_request()
5360 conf->reshape_progress < raid5_size(mddev, 0, 0)) { in reshape_request()
5361 sector_nr = raid5_size(mddev, 0, 0) in reshape_request()
5363 } else if (mddev->reshape_backwards && in reshape_request()
5367 } else if (!mddev->reshape_backwards && in reshape_request()
5372 mddev->curr_resync_completed = sector_nr; in reshape_request()
5373 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in reshape_request()
5399 if (mddev->reshape_backwards) { in reshape_request()
5417 if (mddev->reshape_backwards) { in reshape_request()
5420 BUG_ON((mddev->dev_sectors & in reshape_request()
5455 if ((mddev->reshape_backwards in reshape_request()
5462 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
5465 mddev->reshape_position = conf->reshape_progress; in reshape_request()
5466 mddev->curr_resync_completed = sector_nr; in reshape_request()
5468 set_bit(MD_CHANGE_DEVS, &mddev->flags); in reshape_request()
5469 md_wakeup_thread(mddev->thread); in reshape_request()
5470 wait_event(mddev->sb_wait, mddev->flags == 0 || in reshape_request()
5471 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
5472 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in reshape_request()
5475 conf->reshape_safe = mddev->reshape_position; in reshape_request()
5478 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in reshape_request()
5499 if (s < raid5_size(mddev, 0, 0)) { in reshape_request()
5514 if (mddev->reshape_backwards) in reshape_request()
5531 if (last_sector >= mddev->dev_sectors) in reshape_request()
5532 last_sector = mddev->dev_sectors - 1; in reshape_request()
5554 if (mddev->curr_resync_completed > mddev->resync_max || in reshape_request()
5555 (sector_nr - mddev->curr_resync_completed) * 2 in reshape_request()
5556 >= mddev->resync_max - mddev->curr_resync_completed) { in reshape_request()
5560 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
5563 mddev->reshape_position = conf->reshape_progress; in reshape_request()
5564 mddev->curr_resync_completed = sector_nr; in reshape_request()
5566 set_bit(MD_CHANGE_DEVS, &mddev->flags); in reshape_request()
5567 md_wakeup_thread(mddev->thread); in reshape_request()
5568 wait_event(mddev->sb_wait, in reshape_request()
5569 !test_bit(MD_CHANGE_DEVS, &mddev->flags) in reshape_request()
5570 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
5571 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in reshape_request()
5574 conf->reshape_safe = mddev->reshape_position; in reshape_request()
5577 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in reshape_request()
5583 static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped) in sync_request() argument
5585 struct r5conf *conf = mddev->private; in sync_request()
5587 sector_t max_sector = mddev->dev_sectors; in sync_request()
5595 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in sync_request()
5600 if (mddev->curr_resync < max_sector) /* aborted */ in sync_request()
5601 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in sync_request()
5605 bitmap_close_sync(mddev->bitmap); in sync_request()
5613 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in sync_request()
5614 return reshape_request(mddev, sector_nr, skipped); in sync_request()
5626 if (mddev->degraded >= conf->max_degraded && in sync_request()
5627 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in sync_request()
5628 sector_t rv = mddev->dev_sectors - sector_nr; in sync_request()
5632 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in sync_request()
5634 !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && in sync_request()
5642 bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); in sync_request()
5665 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); in sync_request()
5833 struct mddev *mddev = thread->mddev; in raid5d() local
5834 struct r5conf *conf = mddev->private; in raid5d()
5840 md_check_recovery(mddev); in raid5d()
5843 !test_bit(MD_CHANGE_PENDING, &mddev->flags)) { in raid5d()
5846 if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { in raid5d()
5870 bitmap_unplug(mddev->bitmap); in raid5d()
5893 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) { in raid5d()
5895 md_check_recovery(mddev); in raid5d()
5921 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) in raid5_show_stripe_cache_size() argument
5925 spin_lock(&mddev->lock); in raid5_show_stripe_cache_size()
5926 conf = mddev->private; in raid5_show_stripe_cache_size()
5929 spin_unlock(&mddev->lock); in raid5_show_stripe_cache_size()
5934 raid5_set_cache_size(struct mddev *mddev, int size) in raid5_set_cache_size() argument
5936 struct r5conf *conf = mddev->private; in raid5_set_cache_size()
5950 err = md_allow_write(mddev); in raid5_set_cache_size()
5965 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) in raid5_store_stripe_cache_size() argument
5975 err = mddev_lock(mddev); in raid5_store_stripe_cache_size()
5978 conf = mddev->private; in raid5_store_stripe_cache_size()
5982 err = raid5_set_cache_size(mddev, new); in raid5_store_stripe_cache_size()
5983 mddev_unlock(mddev); in raid5_store_stripe_cache_size()
5994 raid5_show_rmw_level(struct mddev *mddev, char *page) in raid5_show_rmw_level() argument
5996 struct r5conf *conf = mddev->private; in raid5_show_rmw_level()
6004 raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len) in raid5_store_rmw_level() argument
6006 struct r5conf *conf = mddev->private; in raid5_store_rmw_level()
6037 raid5_show_preread_threshold(struct mddev *mddev, char *page) in raid5_show_preread_threshold() argument
6041 spin_lock(&mddev->lock); in raid5_show_preread_threshold()
6042 conf = mddev->private; in raid5_show_preread_threshold()
6045 spin_unlock(&mddev->lock); in raid5_show_preread_threshold()
6050 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) in raid5_store_preread_threshold() argument
6061 err = mddev_lock(mddev); in raid5_store_preread_threshold()
6064 conf = mddev->private; in raid5_store_preread_threshold()
6071 mddev_unlock(mddev); in raid5_store_preread_threshold()
6082 raid5_show_skip_copy(struct mddev *mddev, char *page) in raid5_show_skip_copy() argument
6086 spin_lock(&mddev->lock); in raid5_show_skip_copy()
6087 conf = mddev->private; in raid5_show_skip_copy()
6090 spin_unlock(&mddev->lock); in raid5_show_skip_copy()
6095 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) in raid5_store_skip_copy() argument
6107 err = mddev_lock(mddev); in raid5_store_skip_copy()
6110 conf = mddev->private; in raid5_store_skip_copy()
6114 mddev_suspend(mddev); in raid5_store_skip_copy()
6117 mddev->queue->backing_dev_info.capabilities |= in raid5_store_skip_copy()
6120 mddev->queue->backing_dev_info.capabilities &= in raid5_store_skip_copy()
6122 mddev_resume(mddev); in raid5_store_skip_copy()
6124 mddev_unlock(mddev); in raid5_store_skip_copy()
6134 stripe_cache_active_show(struct mddev *mddev, char *page) in stripe_cache_active_show() argument
6136 struct r5conf *conf = mddev->private; in stripe_cache_active_show()
6147 raid5_show_group_thread_cnt(struct mddev *mddev, char *page) in raid5_show_group_thread_cnt() argument
6151 spin_lock(&mddev->lock); in raid5_show_group_thread_cnt()
6152 conf = mddev->private; in raid5_show_group_thread_cnt()
6155 spin_unlock(&mddev->lock); in raid5_show_group_thread_cnt()
6164 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) in raid5_store_group_thread_cnt() argument
6177 err = mddev_lock(mddev); in raid5_store_group_thread_cnt()
6180 conf = mddev->private; in raid5_store_group_thread_cnt()
6184 mddev_suspend(mddev); in raid5_store_group_thread_cnt()
6204 mddev_resume(mddev); in raid5_store_group_thread_cnt()
6206 mddev_unlock(mddev); in raid5_store_group_thread_cnt()
6286 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid5_size() argument
6288 struct r5conf *conf = mddev->private; in raid5_size()
6291 sectors = mddev->dev_sectors; in raid5_size()
6461 static struct r5conf *setup_conf(struct mddev *mddev) in setup_conf() argument
6472 if (mddev->new_level != 5 in setup_conf()
6473 && mddev->new_level != 4 in setup_conf()
6474 && mddev->new_level != 6) { in setup_conf()
6476 mdname(mddev), mddev->new_level); in setup_conf()
6479 if ((mddev->new_level == 5 in setup_conf()
6480 && !algorithm_valid_raid5(mddev->new_layout)) || in setup_conf()
6481 (mddev->new_level == 6 in setup_conf()
6482 && !algorithm_valid_raid6(mddev->new_layout))) { in setup_conf()
6484 mdname(mddev), mddev->new_layout); in setup_conf()
6487 if (mddev->new_level == 6 && mddev->raid_disks < 4) { in setup_conf()
6489 mdname(mddev), mddev->raid_disks); in setup_conf()
6493 if (!mddev->new_chunk_sectors || in setup_conf()
6494 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || in setup_conf()
6495 !is_power_of_2(mddev->new_chunk_sectors)) { in setup_conf()
6497 mdname(mddev), mddev->new_chunk_sectors << 9); in setup_conf()
6528 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
6530 conf->raid_disks = mddev->raid_disks; in setup_conf()
6531 if (mddev->reshape_position == MaxSector) in setup_conf()
6532 conf->previous_raid_disks = mddev->raid_disks; in setup_conf()
6534 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; in setup_conf()
6542 conf->mddev = mddev; in setup_conf()
6562 conf->level = mddev->new_level; in setup_conf()
6563 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
6567 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); in setup_conf()
6569 rdev_for_each(rdev, mddev) { in setup_conf()
6590 mdname(mddev), bdevname(rdev->bdev, b), raid_disk); in setup_conf()
6596 conf->level = mddev->new_level; in setup_conf()
6607 conf->algorithm = mddev->new_layout; in setup_conf()
6608 conf->reshape_progress = mddev->reshape_position; in setup_conf()
6610 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
6611 conf->prev_algo = mddev->layout; in setup_conf()
6624 mdname(mddev), memory); in setup_conf()
6628 mdname(mddev), memory); in setup_conf()
6641 sprintf(pers_name, "raid%d", mddev->new_level); in setup_conf()
6642 conf->thread = md_register_thread(raid5d, mddev, pers_name); in setup_conf()
6646 mdname(mddev)); in setup_conf()
6686 static int run(struct mddev *mddev) in run() argument
6698 if (mddev->recovery_cp != MaxSector) in run()
6701 mdname(mddev)); in run()
6703 rdev_for_each(rdev, mddev) { in run()
6716 } else if (mddev->reshape_backwards && in run()
6719 else if (!mddev->reshape_backwards && in run()
6724 if (mddev->reshape_position != MaxSector) { in run()
6739 int max_degraded = (mddev->level == 6 ? 2 : 1); in run()
6745 mdname(mddev)); in run()
6749 if (mddev->new_level != mddev->level) { in run()
6752 mdname(mddev)); in run()
6755 old_disks = mddev->raid_disks - mddev->delta_disks; in run()
6763 here_new = mddev->reshape_position; in run()
6764 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); in run()
6765 new_data_disks = mddev->raid_disks - max_degraded; in run()
6768 "on a stripe boundary\n", mdname(mddev)); in run()
6773 here_old = mddev->reshape_position; in run()
6777 if (mddev->delta_disks == 0) { in run()
6785 if (abs(min_offset_diff) >= mddev->chunk_sectors && in run()
6786 abs(min_offset_diff) >= mddev->new_chunk_sectors) in run()
6788 else if (mddev->ro == 0) { in run()
6792 mdname(mddev)); in run()
6795 } else if (mddev->reshape_backwards in run()
6803 mdname(mddev)); in run()
6807 mdname(mddev)); in run()
6810 BUG_ON(mddev->level != mddev->new_level); in run()
6811 BUG_ON(mddev->layout != mddev->new_layout); in run()
6812 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); in run()
6813 BUG_ON(mddev->delta_disks != 0); in run()
6816 if (mddev->private == NULL) in run()
6817 conf = setup_conf(mddev); in run()
6819 conf = mddev->private; in run()
6824 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !journal_dev) { in run()
6826 mdname(mddev)); in run()
6827 mddev->ro = 1; in run()
6828 set_disk_ro(mddev->gendisk, 1); in run()
6832 mddev->thread = conf->thread; in run()
6834 mddev->private = conf; in run()
6868 if (mddev->major_version == 0 && in run()
6869 mddev->minor_version > 90) in run()
6891 mddev->degraded = calc_degraded(conf); in run()
6896 mdname(mddev), mddev->degraded, conf->raid_disks); in run()
6901 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); in run()
6902 mddev->resync_max_sectors = mddev->dev_sectors; in run()
6904 if (mddev->degraded > dirty_parity_disks && in run()
6905 mddev->recovery_cp != MaxSector) { in run()
6906 if (mddev->ok_start_degraded) in run()
6910 mdname(mddev)); in run()
6914 mdname(mddev)); in run()
6919 if (mddev->degraded == 0) in run()
6921 " devices, algorithm %d\n", mdname(mddev), conf->level, in run()
6922 mddev->raid_disks-mddev->degraded, mddev->raid_disks, in run()
6923 mddev->new_layout); in run()
6927 mdname(mddev), conf->level, in run()
6928 mddev->raid_disks - mddev->degraded, in run()
6929 mddev->raid_disks, mddev->new_layout); in run()
6936 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in run()
6937 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in run()
6938 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in run()
6939 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in run()
6940 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in run()
6945 if (mddev->to_remove == &raid5_attrs_group) in run()
6946 mddev->to_remove = NULL; in run()
6947 else if (mddev->kobj.sd && in run()
6948 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) in run()
6951 mdname(mddev)); in run()
6952 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); in run()
6954 if (mddev->queue) { in run()
6963 ((mddev->chunk_sectors << 9) / PAGE_SIZE); in run()
6964 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) in run()
6965 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; in run()
6967 chunk_size = mddev->chunk_sectors << 9; in run()
6968 blk_queue_io_min(mddev->queue, chunk_size); in run()
6969 blk_queue_io_opt(mddev->queue, chunk_size * in run()
6971 mddev->queue->limits.raid_partial_stripes_expensive = 1; in run()
6981 mddev->queue->limits.discard_alignment = stripe; in run()
6982 mddev->queue->limits.discard_granularity = stripe; in run()
6987 mddev->queue->limits.discard_zeroes_data = 0; in run()
6989 blk_queue_max_write_same_sectors(mddev->queue, 0); in run()
6991 rdev_for_each(rdev, mddev) { in run()
6992 disk_stack_limits(mddev->gendisk, rdev->bdev, in run()
6994 disk_stack_limits(mddev->gendisk, rdev->bdev, in run()
7025 mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && in run()
7026 mddev->queue->limits.discard_granularity >= stripe) in run()
7028 mddev->queue); in run()
7031 mddev->queue); in run()
7038 mdname(mddev), bdevname(journal_dev->bdev, b)); in run()
7044 md_unregister_thread(&mddev->thread); in run()
7047 mddev->private = NULL; in run()
7048 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev)); in run()
7052 static void raid5_free(struct mddev *mddev, void *priv) in raid5_free() argument
7057 mddev->to_remove = &raid5_attrs_group; in raid5_free()
7060 static void status(struct seq_file *seq, struct mddev *mddev) in status() argument
7062 struct r5conf *conf = mddev->private; in status()
7065 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, in status()
7066 conf->chunk_sectors / 2, mddev->layout); in status()
7067 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); in status()
7087 conf->raid_disks - conf->mddev->degraded); in print_raid5_conf()
7099 static int raid5_spare_active(struct mddev *mddev) in raid5_spare_active() argument
7102 struct r5conf *conf = mddev->private; in raid5_spare_active()
7136 mddev->degraded = calc_degraded(conf); in raid5_spare_active()
7142 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid5_remove_disk() argument
7144 struct r5conf *conf = mddev->private; in raid5_remove_disk()
7157 set_bit(MD_CHANGE_DEVS, &mddev->flags); in raid5_remove_disk()
7180 mddev->recovery_disabled != conf->recovery_disabled && in raid5_remove_disk()
7213 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid5_add_disk() argument
7215 struct r5conf *conf = mddev->private; in raid5_add_disk()
7224 if (mddev->recovery_disabled == conf->recovery_disabled) in raid5_add_disk()
7273 static int raid5_resize(struct mddev *mddev, sector_t sectors) in raid5_resize() argument
7283 struct r5conf *conf = mddev->private; in raid5_resize()
7288 newsize = raid5_size(mddev, sectors, mddev->raid_disks); in raid5_resize()
7289 if (mddev->external_size && in raid5_resize()
7290 mddev->array_sectors > newsize) in raid5_resize()
7292 if (mddev->bitmap) { in raid5_resize()
7293 int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0); in raid5_resize()
7297 md_set_array_sectors(mddev, newsize); in raid5_resize()
7298 set_capacity(mddev->gendisk, mddev->array_sectors); in raid5_resize()
7299 revalidate_disk(mddev->gendisk); in raid5_resize()
7300 if (sectors > mddev->dev_sectors && in raid5_resize()
7301 mddev->recovery_cp > mddev->dev_sectors) { in raid5_resize()
7302 mddev->recovery_cp = mddev->dev_sectors; in raid5_resize()
7303 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid5_resize()
7305 mddev->dev_sectors = sectors; in raid5_resize()
7306 mddev->resync_max_sectors = sectors; in raid5_resize()
7310 static int check_stripe_cache(struct mddev *mddev) in check_stripe_cache() argument
7320 struct r5conf *conf = mddev->private; in check_stripe_cache()
7321 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 in check_stripe_cache()
7323 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 in check_stripe_cache()
7326 mdname(mddev), in check_stripe_cache()
7327 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) in check_stripe_cache()
7334 static int check_reshape(struct mddev *mddev) in check_reshape() argument
7336 struct r5conf *conf = mddev->private; in check_reshape()
7340 if (mddev->delta_disks == 0 && in check_reshape()
7341 mddev->new_layout == mddev->layout && in check_reshape()
7342 mddev->new_chunk_sectors == mddev->chunk_sectors) in check_reshape()
7346 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { in check_reshape()
7353 if (mddev->level == 6) in check_reshape()
7355 if (mddev->raid_disks + mddev->delta_disks < min) in check_reshape()
7359 if (!check_stripe_cache(mddev)) in check_reshape()
7362 if (mddev->new_chunk_sectors > mddev->chunk_sectors || in check_reshape()
7363 mddev->delta_disks > 0) in check_reshape()
7366 + max(0, mddev->delta_disks), in check_reshape()
7367 max(mddev->new_chunk_sectors, in check_reshape()
7368 mddev->chunk_sectors) in check_reshape()
7372 + mddev->delta_disks)); in check_reshape()
7375 static int raid5_start_reshape(struct mddev *mddev) in raid5_start_reshape() argument
7377 struct r5conf *conf = mddev->private; in raid5_start_reshape()
7382 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in raid5_start_reshape()
7385 if (!check_stripe_cache(mddev)) in raid5_start_reshape()
7391 rdev_for_each(rdev, mddev) { in raid5_start_reshape()
7397 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) in raid5_start_reshape()
7407 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) in raid5_start_reshape()
7408 < mddev->array_sectors) { in raid5_start_reshape()
7410 "before number of disks\n", mdname(mddev)); in raid5_start_reshape()
7418 conf->raid_disks += mddev->delta_disks; in raid5_start_reshape()
7420 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
7422 conf->algorithm = mddev->new_layout; in raid5_start_reshape()
7428 if (mddev->reshape_backwards) in raid5_start_reshape()
7429 conf->reshape_progress = raid5_size(mddev, 0, 0); in raid5_start_reshape()
7440 mddev_suspend(mddev); in raid5_start_reshape()
7441 mddev_resume(mddev); in raid5_start_reshape()
7450 if (mddev->delta_disks >= 0) { in raid5_start_reshape()
7451 rdev_for_each(rdev, mddev) in raid5_start_reshape()
7454 if (raid5_add_disk(mddev, rdev) == 0) { in raid5_start_reshape()
7461 if (sysfs_link_rdev(mddev, rdev)) in raid5_start_reshape()
7475 mddev->degraded = calc_degraded(conf); in raid5_start_reshape()
7478 mddev->raid_disks = conf->raid_disks; in raid5_start_reshape()
7479 mddev->reshape_position = conf->reshape_progress; in raid5_start_reshape()
7480 set_bit(MD_CHANGE_DEVS, &mddev->flags); in raid5_start_reshape()
7482 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid5_start_reshape()
7483 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid5_start_reshape()
7484 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in raid5_start_reshape()
7485 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid5_start_reshape()
7486 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid5_start_reshape()
7487 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid5_start_reshape()
7489 if (!mddev->sync_thread) { in raid5_start_reshape()
7490 mddev->recovery = 0; in raid5_start_reshape()
7493 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; in raid5_start_reshape()
7494 mddev->new_chunk_sectors = in raid5_start_reshape()
7496 mddev->new_layout = conf->algorithm = conf->prev_algo; in raid5_start_reshape()
7497 rdev_for_each(rdev, mddev) in raid5_start_reshape()
7502 mddev->reshape_position = MaxSector; in raid5_start_reshape()
7508 md_wakeup_thread(mddev->sync_thread); in raid5_start_reshape()
7509 md_new_event(mddev); in raid5_start_reshape()
7519 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in end_reshape()
7524 rdev_for_each(rdev, conf->mddev) in end_reshape()
7528 conf->mddev->reshape_position = MaxSector; in end_reshape()
7535 if (conf->mddev->queue) { in end_reshape()
7539 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) in end_reshape()
7540 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; in end_reshape()
7548 static void raid5_finish_reshape(struct mddev *mddev) in raid5_finish_reshape() argument
7550 struct r5conf *conf = mddev->private; in raid5_finish_reshape()
7552 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in raid5_finish_reshape()
7554 if (mddev->delta_disks > 0) { in raid5_finish_reshape()
7555 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); in raid5_finish_reshape()
7556 set_capacity(mddev->gendisk, mddev->array_sectors); in raid5_finish_reshape()
7557 revalidate_disk(mddev->gendisk); in raid5_finish_reshape()
7561 mddev->degraded = calc_degraded(conf); in raid5_finish_reshape()
7564 d < conf->raid_disks - mddev->delta_disks; in raid5_finish_reshape()
7574 mddev->layout = conf->algorithm; in raid5_finish_reshape()
7575 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
7576 mddev->reshape_position = MaxSector; in raid5_finish_reshape()
7577 mddev->delta_disks = 0; in raid5_finish_reshape()
7578 mddev->reshape_backwards = 0; in raid5_finish_reshape()
7582 static void raid5_quiesce(struct mddev *mddev, int state) in raid5_quiesce() argument
7584 struct r5conf *conf = mddev->private; in raid5_quiesce()
7619 static void *raid45_takeover_raid0(struct mddev *mddev, int level) in raid45_takeover_raid0() argument
7621 struct r0conf *raid0_conf = mddev->private; in raid45_takeover_raid0()
7627 mdname(mddev)); in raid45_takeover_raid0()
7633 mddev->dev_sectors = sectors; in raid45_takeover_raid0()
7634 mddev->new_level = level; in raid45_takeover_raid0()
7635 mddev->new_layout = ALGORITHM_PARITY_N; in raid45_takeover_raid0()
7636 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid45_takeover_raid0()
7637 mddev->raid_disks += 1; in raid45_takeover_raid0()
7638 mddev->delta_disks = 1; in raid45_takeover_raid0()
7640 mddev->recovery_cp = MaxSector; in raid45_takeover_raid0()
7642 return setup_conf(mddev); in raid45_takeover_raid0()
7645 static void *raid5_takeover_raid1(struct mddev *mddev) in raid5_takeover_raid1() argument
7649 if (mddev->raid_disks != 2 || in raid5_takeover_raid1()
7650 mddev->degraded > 1) in raid5_takeover_raid1()
7658 while (chunksect && (mddev->array_sectors & (chunksect-1))) in raid5_takeover_raid1()
7665 mddev->new_level = 5; in raid5_takeover_raid1()
7666 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; in raid5_takeover_raid1()
7667 mddev->new_chunk_sectors = chunksect; in raid5_takeover_raid1()
7669 return setup_conf(mddev); in raid5_takeover_raid1()
7672 static void *raid5_takeover_raid6(struct mddev *mddev) in raid5_takeover_raid6() argument
7676 switch (mddev->layout) { in raid5_takeover_raid6()
7698 mddev->new_level = 5; in raid5_takeover_raid6()
7699 mddev->new_layout = new_layout; in raid5_takeover_raid6()
7700 mddev->delta_disks = -1; in raid5_takeover_raid6()
7701 mddev->raid_disks -= 1; in raid5_takeover_raid6()
7702 return setup_conf(mddev); in raid5_takeover_raid6()
7705 static int raid5_check_reshape(struct mddev *mddev) in raid5_check_reshape() argument
7712 struct r5conf *conf = mddev->private; in raid5_check_reshape()
7713 int new_chunk = mddev->new_chunk_sectors; in raid5_check_reshape()
7715 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) in raid5_check_reshape()
7722 if (mddev->array_sectors & (new_chunk-1)) in raid5_check_reshape()
7729 if (mddev->raid_disks == 2) { in raid5_check_reshape()
7731 if (mddev->new_layout >= 0) { in raid5_check_reshape()
7732 conf->algorithm = mddev->new_layout; in raid5_check_reshape()
7733 mddev->layout = mddev->new_layout; in raid5_check_reshape()
7737 mddev->chunk_sectors = new_chunk; in raid5_check_reshape()
7739 set_bit(MD_CHANGE_DEVS, &mddev->flags); in raid5_check_reshape()
7740 md_wakeup_thread(mddev->thread); in raid5_check_reshape()
7742 return check_reshape(mddev); in raid5_check_reshape()
7745 static int raid6_check_reshape(struct mddev *mddev) in raid6_check_reshape() argument
7747 int new_chunk = mddev->new_chunk_sectors; in raid6_check_reshape()
7749 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) in raid6_check_reshape()
7756 if (mddev->array_sectors & (new_chunk-1)) in raid6_check_reshape()
7762 return check_reshape(mddev); in raid6_check_reshape()
7765 static void *raid5_takeover(struct mddev *mddev) in raid5_takeover() argument
7773 if (mddev->level == 0) in raid5_takeover()
7774 return raid45_takeover_raid0(mddev, 5); in raid5_takeover()
7775 if (mddev->level == 1) in raid5_takeover()
7776 return raid5_takeover_raid1(mddev); in raid5_takeover()
7777 if (mddev->level == 4) { in raid5_takeover()
7778 mddev->new_layout = ALGORITHM_PARITY_N; in raid5_takeover()
7779 mddev->new_level = 5; in raid5_takeover()
7780 return setup_conf(mddev); in raid5_takeover()
7782 if (mddev->level == 6) in raid5_takeover()
7783 return raid5_takeover_raid6(mddev); in raid5_takeover()
7788 static void *raid4_takeover(struct mddev *mddev) in raid4_takeover() argument
7794 if (mddev->level == 0) in raid4_takeover()
7795 return raid45_takeover_raid0(mddev, 4); in raid4_takeover()
7796 if (mddev->level == 5 && in raid4_takeover()
7797 mddev->layout == ALGORITHM_PARITY_N) { in raid4_takeover()
7798 mddev->new_layout = 0; in raid4_takeover()
7799 mddev->new_level = 4; in raid4_takeover()
7800 return setup_conf(mddev); in raid4_takeover()
7807 static void *raid6_takeover(struct mddev *mddev) in raid6_takeover() argument
7815 if (mddev->pers != &raid5_personality) in raid6_takeover()
7817 if (mddev->degraded > 1) in raid6_takeover()
7819 if (mddev->raid_disks > 253) in raid6_takeover()
7821 if (mddev->raid_disks < 3) in raid6_takeover()
7824 switch (mddev->layout) { in raid6_takeover()
7846 mddev->new_level = 6; in raid6_takeover()
7847 mddev->new_layout = new_layout; in raid6_takeover()
7848 mddev->delta_disks = 1; in raid6_takeover()
7849 mddev->raid_disks += 1; in raid6_takeover()
7850 return setup_conf(mddev); in raid6_takeover()