Lines Matching refs:mddev
271 md_wakeup_thread(conf->mddev->thread); in raid5_wakeup_stripe_thread()
315 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
321 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
378 md_wakeup_thread(conf->mddev->thread); in release_inactive_stripe_list()
426 if (unlikely(!conf->mddev->thread) || in release_stripe()
431 md_wakeup_thread(conf->mddev->thread); in release_stripe()
649 if (conf->mddev->reshape_position == MaxSector) in has_failed()
650 return conf->mddev->degraded > conf->max_degraded; in has_failed()
839 md_wakeup_thread(conf->mddev->thread); in stripe_add_to_batch_list()
965 if (!conf->mddev->external && in ops_run_io()
966 conf->mddev->flags) { in ops_run_io()
971 md_check_recovery(conf->mddev); in ops_run_io()
979 md_wait_for_blocked_rdev(rdev, conf->mddev); in ops_run_io()
982 rdev_dec_pending(rdev, conf->mddev); in ops_run_io()
1033 if (conf->mddev->gendisk) in ops_run_io()
1035 bi, disk_devt(conf->mddev->gendisk), in ops_run_io()
1079 if (conf->mddev->gendisk) in ops_run_io()
1081 rbi, disk_devt(conf->mddev->gendisk), in ops_run_io()
2025 if (conf->mddev->gendisk) in grow_stripes()
2027 "raid%d-%s", conf->level, mdname(conf->mddev)); in grow_stripes()
2030 "raid%d-%p", conf->level, conf->mddev); in grow_stripes()
2091 mddev_suspend(conf->mddev); in resize_chunks()
2111 mddev_resume(conf->mddev); in resize_chunks()
2155 err = md_allow_write(conf->mddev); in resize_stripes()
2332 mdname(conf->mddev), STRIPE_SECTORS, in raid5_end_read_request()
2355 mdname(conf->mddev), in raid5_end_read_request()
2358 else if (conf->mddev->degraded >= conf->max_degraded) { in raid5_end_read_request()
2364 mdname(conf->mddev), in raid5_end_read_request()
2374 mdname(conf->mddev), in raid5_end_read_request()
2381 mdname(conf->mddev), bdn); in raid5_end_read_request()
2400 md_error(conf->mddev, rdev); in raid5_end_read_request()
2403 rdev_dec_pending(rdev, conf->mddev); in raid5_end_read_request()
2448 md_error(conf->mddev, rdev); in raid5_end_write_request()
2460 &rdev->mddev->recovery); in raid5_end_write_request()
2473 rdev_dec_pending(rdev, conf->mddev); in raid5_end_write_request()
2507 static void error(struct mddev *mddev, struct md_rdev *rdev) in error() argument
2510 struct r5conf *conf = mddev->private; in error()
2516 mddev->degraded = calc_degraded(conf); in error()
2518 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in error()
2522 set_bit(MD_CHANGE_DEVS, &mddev->flags); in error()
2526 mdname(mddev), in error()
2528 mdname(mddev), in error()
2529 conf->raid_disks - mddev->degraded); in error()
2852 mdname(conf->mddev)); in compute_blocknr()
3018 if (conf->mddev->bitmap && firstwrite) { in add_stripe_bio()
3033 bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
3097 md_error(conf->mddev, rdev); in handle_failed_stripe()
3098 rdev_dec_pending(rdev, conf->mddev); in handle_failed_stripe()
3118 md_write_end(conf->mddev); in handle_failed_stripe()
3125 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3142 md_write_end(conf->mddev); in handle_failed_stripe()
3174 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3184 md_wakeup_thread(conf->mddev->thread); in handle_failed_stripe()
3207 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { in handle_failed_sync()
3229 conf->mddev->recovery_disabled; in handle_failed_sync()
3231 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); in handle_failed_sync()
3244 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
3333 sh->sector < sh->raid_conf->mddev->recovery_cp) in need_this_block()
3481 md_write_end(conf->mddev); in handle_stripe_clean_event()
3487 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
3543 md_wakeup_thread(conf->mddev->thread); in handle_stripe_clean_event()
3555 sector_t recovery_cp = conf->mddev->recovery_cp; in handle_stripe_dirtying()
3603 if (conf->mddev->queue) in handle_stripe_dirtying()
3604 blk_add_trace_msg(conf->mddev->queue, in handle_stripe_dirtying()
3655 if (rcw && conf->mddev->queue) in handle_stripe_dirtying()
3656 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", in handle_stripe_dirtying()
3743 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); in handle_parity_checks5()
3744 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) in handle_parity_checks5()
3895 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); in handle_parity_checks6()
3896 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) in handle_parity_checks6()
4176 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4177 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) in analyse_stripe()
4340 rdev_dec_pending(s.blocked_rdev, conf->mddev); in handle_stripe()
4481 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe()
4490 if (s.failed <= conf->max_degraded && !conf->mddev->ro) in handle_stripe()
4549 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe()
4559 if (conf->mddev->external) in handle_stripe()
4561 conf->mddev); in handle_stripe()
4568 conf->mddev); in handle_stripe()
4580 md_error(conf->mddev, rdev); in handle_stripe()
4581 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
4587 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
4596 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
4613 md_wakeup_thread(conf->mddev->thread); in handle_stripe()
4655 static int raid5_congested(struct mddev *mddev, int bits) in raid5_congested() argument
4657 struct r5conf *conf = mddev->private; in raid5_congested()
4676 static int raid5_mergeable_bvec(struct mddev *mddev, in raid5_mergeable_bvec() argument
4682 unsigned int chunk_sectors = mddev->chunk_sectors; in raid5_mergeable_bvec()
4689 if ((bvm->bi_rw & 1) == WRITE || mddev->degraded) in raid5_mergeable_bvec()
4692 if (mddev->new_chunk_sectors < mddev->chunk_sectors) in raid5_mergeable_bvec()
4693 chunk_sectors = mddev->new_chunk_sectors; in raid5_mergeable_bvec()
4702 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) in in_chunk_boundary() argument
4705 unsigned int chunk_sectors = mddev->chunk_sectors; in in_chunk_boundary()
4708 if (mddev->new_chunk_sectors < mddev->chunk_sectors) in in_chunk_boundary()
4709 chunk_sectors = mddev->new_chunk_sectors; in in_chunk_boundary()
4728 md_wakeup_thread(conf->mddev->thread); in add_bio_to_retry()
4763 struct mddev *mddev; in raid5_align_endio() local
4772 mddev = rdev->mddev; in raid5_align_endio()
4773 conf = mddev->private; in raid5_align_endio()
4775 rdev_dec_pending(rdev, conf->mddev); in raid5_align_endio()
4810 static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) in chunk_aligned_read() argument
4812 struct r5conf *conf = mddev->private; in chunk_aligned_read()
4818 if (!in_chunk_boundary(mddev, raid_bio)) { in chunk_aligned_read()
4825 align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev); in chunk_aligned_read()
4869 rdev_dec_pending(rdev, mddev); in chunk_aligned_read()
4883 if (mddev->gendisk) in chunk_aligned_read()
4885 align_bi, disk_devt(mddev->gendisk), in chunk_aligned_read()
4994 struct mddev *mddev = cb->cb.data; in raid5_unplug() local
4995 struct r5conf *conf = mddev->private; in raid5_unplug()
5023 if (mddev->queue) in raid5_unplug()
5024 trace_block_unplug(mddev->queue, cnt, !from_schedule); in raid5_unplug()
5028 static void release_stripe_plug(struct mddev *mddev, in release_stripe_plug() argument
5032 raid5_unplug, mddev, in release_stripe_plug()
5056 static void make_discard_request(struct mddev *mddev, struct bio *bi) in make_discard_request() argument
5058 struct r5conf *conf = mddev->private; in make_discard_request()
5064 if (mddev->reshape_position != MaxSector) in make_discard_request()
5122 if (conf->mddev->bitmap) { in make_discard_request()
5126 bitmap_startwrite(mddev->bitmap, in make_discard_request()
5138 release_stripe_plug(mddev, sh); in make_discard_request()
5143 md_write_end(mddev); in make_discard_request()
5148 static void make_request(struct mddev *mddev, struct bio * bi) in make_request() argument
5150 struct r5conf *conf = mddev->private; in make_request()
5161 md_flush_request(mddev, bi); in make_request()
5165 md_write_start(mddev, bi); in make_request()
5172 if (rw == READ && mddev->degraded == 0 && in make_request()
5173 mddev->reshape_position == MaxSector && in make_request()
5174 chunk_aligned_read(mddev,bi)) in make_request()
5178 make_discard_request(mddev, bi); in make_request()
5209 if (mddev->reshape_backwards in make_request()
5214 if (mddev->reshape_backwards in make_request()
5247 if (mddev->reshape_backwards in make_request()
5269 logical_sector >= mddev->suspend_lo && in make_request()
5270 logical_sector < mddev->suspend_hi) { in make_request()
5279 if (logical_sector >= mddev->suspend_lo && in make_request()
5280 logical_sector < mddev->suspend_hi) { in make_request()
5293 md_wakeup_thread(mddev->thread); in make_request()
5305 release_stripe_plug(mddev, sh); in make_request()
5318 md_write_end(mddev); in make_request()
5326 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
5328 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) in reshape_request() argument
5339 struct r5conf *conf = mddev->private; in reshape_request()
5354 if (mddev->reshape_backwards && in reshape_request()
5355 conf->reshape_progress < raid5_size(mddev, 0, 0)) { in reshape_request()
5356 sector_nr = raid5_size(mddev, 0, 0) in reshape_request()
5358 } else if (!mddev->reshape_backwards && in reshape_request()
5363 mddev->curr_resync_completed = sector_nr; in reshape_request()
5364 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in reshape_request()
5374 if (mddev->new_chunk_sectors > mddev->chunk_sectors) in reshape_request()
5375 reshape_sectors = mddev->new_chunk_sectors; in reshape_request()
5377 reshape_sectors = mddev->chunk_sectors; in reshape_request()
5391 if (mddev->reshape_backwards) { in reshape_request()
5404 if (mddev->reshape_backwards) { in reshape_request()
5407 BUG_ON((mddev->dev_sectors & in reshape_request()
5442 if ((mddev->reshape_backwards in reshape_request()
5449 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
5452 mddev->reshape_position = conf->reshape_progress; in reshape_request()
5453 mddev->curr_resync_completed = sector_nr; in reshape_request()
5455 set_bit(MD_CHANGE_DEVS, &mddev->flags); in reshape_request()
5456 md_wakeup_thread(mddev->thread); in reshape_request()
5457 wait_event(mddev->sb_wait, mddev->flags == 0 || in reshape_request()
5458 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
5459 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in reshape_request()
5462 conf->reshape_safe = mddev->reshape_position; in reshape_request()
5465 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in reshape_request()
5486 if (s < raid5_size(mddev, 0, 0)) { in reshape_request()
5501 if (mddev->reshape_backwards) in reshape_request()
5518 if (last_sector >= mddev->dev_sectors) in reshape_request()
5519 last_sector = mddev->dev_sectors - 1; in reshape_request()
5539 if ((sector_nr - mddev->curr_resync_completed) * 2 in reshape_request()
5540 >= mddev->resync_max - mddev->curr_resync_completed) { in reshape_request()
5544 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
5547 mddev->reshape_position = conf->reshape_progress; in reshape_request()
5548 mddev->curr_resync_completed = sector_nr; in reshape_request()
5550 set_bit(MD_CHANGE_DEVS, &mddev->flags); in reshape_request()
5551 md_wakeup_thread(mddev->thread); in reshape_request()
5552 wait_event(mddev->sb_wait, in reshape_request()
5553 !test_bit(MD_CHANGE_DEVS, &mddev->flags) in reshape_request()
5554 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
5555 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in reshape_request()
5558 conf->reshape_safe = mddev->reshape_position; in reshape_request()
5561 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in reshape_request()
5567 static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped) in sync_request() argument
5569 struct r5conf *conf = mddev->private; in sync_request()
5571 sector_t max_sector = mddev->dev_sectors; in sync_request()
5579 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in sync_request()
5584 if (mddev->curr_resync < max_sector) /* aborted */ in sync_request()
5585 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in sync_request()
5589 bitmap_close_sync(mddev->bitmap); in sync_request()
5597 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in sync_request()
5598 return reshape_request(mddev, sector_nr, skipped); in sync_request()
5610 if (mddev->degraded >= conf->max_degraded && in sync_request()
5611 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in sync_request()
5612 sector_t rv = mddev->dev_sectors - sector_nr; in sync_request()
5616 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in sync_request()
5618 !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && in sync_request()
5626 bitmap_cond_end_sync(mddev->bitmap, sector_nr); in sync_request()
5649 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); in sync_request()
5811 struct mddev *mddev = thread->mddev; in raid5d() local
5812 struct r5conf *conf = mddev->private; in raid5d()
5818 md_check_recovery(mddev); in raid5d()
5836 bitmap_unplug(mddev->bitmap); in raid5d()
5859 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) { in raid5d()
5861 md_check_recovery(mddev); in raid5d()
5885 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) in raid5_show_stripe_cache_size() argument
5889 spin_lock(&mddev->lock); in raid5_show_stripe_cache_size()
5890 conf = mddev->private; in raid5_show_stripe_cache_size()
5893 spin_unlock(&mddev->lock); in raid5_show_stripe_cache_size()
5898 raid5_set_cache_size(struct mddev *mddev, int size) in raid5_set_cache_size() argument
5900 struct r5conf *conf = mddev->private; in raid5_set_cache_size()
5914 err = md_allow_write(mddev); in raid5_set_cache_size()
5929 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) in raid5_store_stripe_cache_size() argument
5939 err = mddev_lock(mddev); in raid5_store_stripe_cache_size()
5942 conf = mddev->private; in raid5_store_stripe_cache_size()
5946 err = raid5_set_cache_size(mddev, new); in raid5_store_stripe_cache_size()
5947 mddev_unlock(mddev); in raid5_store_stripe_cache_size()
5958 raid5_show_rmw_level(struct mddev *mddev, char *page) in raid5_show_rmw_level() argument
5960 struct r5conf *conf = mddev->private; in raid5_show_rmw_level()
5968 raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len) in raid5_store_rmw_level() argument
5970 struct r5conf *conf = mddev->private; in raid5_store_rmw_level()
6001 raid5_show_preread_threshold(struct mddev *mddev, char *page) in raid5_show_preread_threshold() argument
6005 spin_lock(&mddev->lock); in raid5_show_preread_threshold()
6006 conf = mddev->private; in raid5_show_preread_threshold()
6009 spin_unlock(&mddev->lock); in raid5_show_preread_threshold()
6014 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) in raid5_store_preread_threshold() argument
6025 err = mddev_lock(mddev); in raid5_store_preread_threshold()
6028 conf = mddev->private; in raid5_store_preread_threshold()
6035 mddev_unlock(mddev); in raid5_store_preread_threshold()
6046 raid5_show_skip_copy(struct mddev *mddev, char *page) in raid5_show_skip_copy() argument
6050 spin_lock(&mddev->lock); in raid5_show_skip_copy()
6051 conf = mddev->private; in raid5_show_skip_copy()
6054 spin_unlock(&mddev->lock); in raid5_show_skip_copy()
6059 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) in raid5_store_skip_copy() argument
6071 err = mddev_lock(mddev); in raid5_store_skip_copy()
6074 conf = mddev->private; in raid5_store_skip_copy()
6078 mddev_suspend(mddev); in raid5_store_skip_copy()
6081 mddev->queue->backing_dev_info.capabilities |= in raid5_store_skip_copy()
6084 mddev->queue->backing_dev_info.capabilities &= in raid5_store_skip_copy()
6086 mddev_resume(mddev); in raid5_store_skip_copy()
6088 mddev_unlock(mddev); in raid5_store_skip_copy()
6098 stripe_cache_active_show(struct mddev *mddev, char *page) in stripe_cache_active_show() argument
6100 struct r5conf *conf = mddev->private; in stripe_cache_active_show()
6111 raid5_show_group_thread_cnt(struct mddev *mddev, char *page) in raid5_show_group_thread_cnt() argument
6115 spin_lock(&mddev->lock); in raid5_show_group_thread_cnt()
6116 conf = mddev->private; in raid5_show_group_thread_cnt()
6119 spin_unlock(&mddev->lock); in raid5_show_group_thread_cnt()
6128 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) in raid5_store_group_thread_cnt() argument
6141 err = mddev_lock(mddev); in raid5_store_group_thread_cnt()
6144 conf = mddev->private; in raid5_store_group_thread_cnt()
6148 mddev_suspend(mddev); in raid5_store_group_thread_cnt()
6168 mddev_resume(mddev); in raid5_store_group_thread_cnt()
6170 mddev_unlock(mddev); in raid5_store_group_thread_cnt()
6250 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid5_size() argument
6252 struct r5conf *conf = mddev->private; in raid5_size()
6255 sectors = mddev->dev_sectors; in raid5_size()
6260 sectors &= ~((sector_t)mddev->chunk_sectors - 1); in raid5_size()
6261 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); in raid5_size()
6422 static struct r5conf *setup_conf(struct mddev *mddev) in setup_conf() argument
6433 if (mddev->new_level != 5 in setup_conf()
6434 && mddev->new_level != 4 in setup_conf()
6435 && mddev->new_level != 6) { in setup_conf()
6437 mdname(mddev), mddev->new_level); in setup_conf()
6440 if ((mddev->new_level == 5 in setup_conf()
6441 && !algorithm_valid_raid5(mddev->new_layout)) || in setup_conf()
6442 (mddev->new_level == 6 in setup_conf()
6443 && !algorithm_valid_raid6(mddev->new_layout))) { in setup_conf()
6445 mdname(mddev), mddev->new_layout); in setup_conf()
6448 if (mddev->new_level == 6 && mddev->raid_disks < 4) { in setup_conf()
6450 mdname(mddev), mddev->raid_disks); in setup_conf()
6454 if (!mddev->new_chunk_sectors || in setup_conf()
6455 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || in setup_conf()
6456 !is_power_of_2(mddev->new_chunk_sectors)) { in setup_conf()
6458 mdname(mddev), mddev->new_chunk_sectors << 9); in setup_conf()
6487 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
6489 conf->raid_disks = mddev->raid_disks; in setup_conf()
6490 if (mddev->reshape_position == MaxSector) in setup_conf()
6491 conf->previous_raid_disks = mddev->raid_disks; in setup_conf()
6493 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; in setup_conf()
6501 conf->mddev = mddev; in setup_conf()
6521 conf->level = mddev->new_level; in setup_conf()
6522 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
6526 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); in setup_conf()
6528 rdev_for_each(rdev, mddev) { in setup_conf()
6549 mdname(mddev), bdevname(rdev->bdev, b), raid_disk); in setup_conf()
6555 conf->level = mddev->new_level; in setup_conf()
6566 conf->algorithm = mddev->new_layout; in setup_conf()
6567 conf->reshape_progress = mddev->reshape_position; in setup_conf()
6569 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
6570 conf->prev_algo = mddev->layout; in setup_conf()
6580 mdname(mddev), memory); in setup_conf()
6584 mdname(mddev), memory); in setup_conf()
6597 sprintf(pers_name, "raid%d", mddev->new_level); in setup_conf()
6598 conf->thread = md_register_thread(raid5d, mddev, pers_name); in setup_conf()
6602 mdname(mddev)); in setup_conf()
6642 static int run(struct mddev *mddev) in run() argument
6653 if (mddev->recovery_cp != MaxSector) in run()
6656 mdname(mddev)); in run()
6658 rdev_for_each(rdev, mddev) { in run()
6666 } else if (mddev->reshape_backwards && in run()
6669 else if (!mddev->reshape_backwards && in run()
6674 if (mddev->reshape_position != MaxSector) { in run()
6689 int max_degraded = (mddev->level == 6 ? 2 : 1); in run()
6691 if (mddev->new_level != mddev->level) { in run()
6694 mdname(mddev)); in run()
6697 old_disks = mddev->raid_disks - mddev->delta_disks; in run()
6702 here_new = mddev->reshape_position; in run()
6703 if (sector_div(here_new, mddev->new_chunk_sectors * in run()
6704 (mddev->raid_disks - max_degraded))) { in run()
6706 "on a stripe boundary\n", mdname(mddev)); in run()
6709 reshape_offset = here_new * mddev->new_chunk_sectors; in run()
6711 here_old = mddev->reshape_position; in run()
6712 sector_div(here_old, mddev->chunk_sectors * in run()
6716 if (mddev->delta_disks == 0) { in run()
6717 if ((here_new * mddev->new_chunk_sectors != in run()
6718 here_old * mddev->chunk_sectors)) { in run()
6720 " confused - aborting\n", mdname(mddev)); in run()
6730 if (abs(min_offset_diff) >= mddev->chunk_sectors && in run()
6731 abs(min_offset_diff) >= mddev->new_chunk_sectors) in run()
6733 else if (mddev->ro == 0) { in run()
6737 mdname(mddev)); in run()
6740 } else if (mddev->reshape_backwards in run()
6741 ? (here_new * mddev->new_chunk_sectors + min_offset_diff <= in run()
6742 here_old * mddev->chunk_sectors) in run()
6743 : (here_new * mddev->new_chunk_sectors >= in run()
6744 here_old * mddev->chunk_sectors + (-min_offset_diff))) { in run()
6748 mdname(mddev)); in run()
6752 mdname(mddev)); in run()
6755 BUG_ON(mddev->level != mddev->new_level); in run()
6756 BUG_ON(mddev->layout != mddev->new_layout); in run()
6757 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); in run()
6758 BUG_ON(mddev->delta_disks != 0); in run()
6761 if (mddev->private == NULL) in run()
6762 conf = setup_conf(mddev); in run()
6764 conf = mddev->private; in run()
6770 mddev->thread = conf->thread; in run()
6772 mddev->private = conf; in run()
6806 if (mddev->major_version == 0 && in run()
6807 mddev->minor_version > 90) in run()
6829 mddev->degraded = calc_degraded(conf); in run()
6834 mdname(mddev), mddev->degraded, conf->raid_disks); in run()
6839 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); in run()
6840 mddev->resync_max_sectors = mddev->dev_sectors; in run()
6842 if (mddev->degraded > dirty_parity_disks && in run()
6843 mddev->recovery_cp != MaxSector) { in run()
6844 if (mddev->ok_start_degraded) in run()
6848 mdname(mddev)); in run()
6852 mdname(mddev)); in run()
6857 if (mddev->degraded == 0) in run()
6859 " devices, algorithm %d\n", mdname(mddev), conf->level, in run()
6860 mddev->raid_disks-mddev->degraded, mddev->raid_disks, in run()
6861 mddev->new_layout); in run()
6865 mdname(mddev), conf->level, in run()
6866 mddev->raid_disks - mddev->degraded, in run()
6867 mddev->raid_disks, mddev->new_layout); in run()
6874 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in run()
6875 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in run()
6876 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in run()
6877 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in run()
6878 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in run()
6883 if (mddev->to_remove == &raid5_attrs_group) in run()
6884 mddev->to_remove = NULL; in run()
6885 else if (mddev->kobj.sd && in run()
6886 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) in run()
6889 mdname(mddev)); in run()
6890 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); in run()
6892 if (mddev->queue) { in run()
6901 ((mddev->chunk_sectors << 9) / PAGE_SIZE); in run()
6902 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) in run()
6903 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; in run()
6905 chunk_size = mddev->chunk_sectors << 9; in run()
6906 blk_queue_io_min(mddev->queue, chunk_size); in run()
6907 blk_queue_io_opt(mddev->queue, chunk_size * in run()
6909 mddev->queue->limits.raid_partial_stripes_expensive = 1; in run()
6919 mddev->queue->limits.discard_alignment = stripe; in run()
6920 mddev->queue->limits.discard_granularity = stripe; in run()
6925 mddev->queue->limits.discard_zeroes_data = 0; in run()
6927 blk_queue_max_write_same_sectors(mddev->queue, 0); in run()
6929 rdev_for_each(rdev, mddev) { in run()
6930 disk_stack_limits(mddev->gendisk, rdev->bdev, in run()
6932 disk_stack_limits(mddev->gendisk, rdev->bdev, in run()
6963 mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && in run()
6964 mddev->queue->limits.discard_granularity >= stripe) in run()
6966 mddev->queue); in run()
6969 mddev->queue); in run()
6974 md_unregister_thread(&mddev->thread); in run()
6977 mddev->private = NULL; in run()
6978 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev)); in run()
6982 static void raid5_free(struct mddev *mddev, void *priv) in raid5_free() argument
6987 mddev->to_remove = &raid5_attrs_group; in raid5_free()
6990 static void status(struct seq_file *seq, struct mddev *mddev) in status() argument
6992 struct r5conf *conf = mddev->private; in status()
6995 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, in status()
6996 mddev->chunk_sectors / 2, mddev->layout); in status()
6997 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); in status()
7017 conf->raid_disks - conf->mddev->degraded); in print_raid5_conf()
7029 static int raid5_spare_active(struct mddev *mddev) in raid5_spare_active() argument
7032 struct r5conf *conf = mddev->private; in raid5_spare_active()
7066 mddev->degraded = calc_degraded(conf); in raid5_spare_active()
7072 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid5_remove_disk() argument
7074 struct r5conf *conf = mddev->private; in raid5_remove_disk()
7101 mddev->recovery_disabled != conf->recovery_disabled && in raid5_remove_disk()
7134 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid5_add_disk() argument
7136 struct r5conf *conf = mddev->private; in raid5_add_disk()
7143 if (mddev->recovery_disabled == conf->recovery_disabled) in raid5_add_disk()
7192 static int raid5_resize(struct mddev *mddev, sector_t sectors) in raid5_resize() argument
7202 sectors &= ~((sector_t)mddev->chunk_sectors - 1); in raid5_resize()
7203 newsize = raid5_size(mddev, sectors, mddev->raid_disks); in raid5_resize()
7204 if (mddev->external_size && in raid5_resize()
7205 mddev->array_sectors > newsize) in raid5_resize()
7207 if (mddev->bitmap) { in raid5_resize()
7208 int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0); in raid5_resize()
7212 md_set_array_sectors(mddev, newsize); in raid5_resize()
7213 set_capacity(mddev->gendisk, mddev->array_sectors); in raid5_resize()
7214 revalidate_disk(mddev->gendisk); in raid5_resize()
7215 if (sectors > mddev->dev_sectors && in raid5_resize()
7216 mddev->recovery_cp > mddev->dev_sectors) { in raid5_resize()
7217 mddev->recovery_cp = mddev->dev_sectors; in raid5_resize()
7218 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid5_resize()
7220 mddev->dev_sectors = sectors; in raid5_resize()
7221 mddev->resync_max_sectors = sectors; in raid5_resize()
7225 static int check_stripe_cache(struct mddev *mddev) in check_stripe_cache() argument
7235 struct r5conf *conf = mddev->private; in check_stripe_cache()
7236 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 in check_stripe_cache()
7238 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 in check_stripe_cache()
7241 mdname(mddev), in check_stripe_cache()
7242 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) in check_stripe_cache()
7249 static int check_reshape(struct mddev *mddev) in check_reshape() argument
7251 struct r5conf *conf = mddev->private; in check_reshape()
7253 if (mddev->delta_disks == 0 && in check_reshape()
7254 mddev->new_layout == mddev->layout && in check_reshape()
7255 mddev->new_chunk_sectors == mddev->chunk_sectors) in check_reshape()
7259 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { in check_reshape()
7266 if (mddev->level == 6) in check_reshape()
7268 if (mddev->raid_disks + mddev->delta_disks < min) in check_reshape()
7272 if (!check_stripe_cache(mddev)) in check_reshape()
7275 if (mddev->new_chunk_sectors > mddev->chunk_sectors || in check_reshape()
7276 mddev->delta_disks > 0) in check_reshape()
7279 + max(0, mddev->delta_disks), in check_reshape()
7280 max(mddev->new_chunk_sectors, in check_reshape()
7281 mddev->chunk_sectors) in check_reshape()
7285 + mddev->delta_disks)); in check_reshape()
7288 static int raid5_start_reshape(struct mddev *mddev) in raid5_start_reshape() argument
7290 struct r5conf *conf = mddev->private; in raid5_start_reshape()
7295 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in raid5_start_reshape()
7298 if (!check_stripe_cache(mddev)) in raid5_start_reshape()
7304 rdev_for_each(rdev, mddev) { in raid5_start_reshape()
7310 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) in raid5_start_reshape()
7320 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) in raid5_start_reshape()
7321 < mddev->array_sectors) { in raid5_start_reshape()
7323 "before number of disks\n", mdname(mddev)); in raid5_start_reshape()
7331 conf->raid_disks += mddev->delta_disks; in raid5_start_reshape()
7333 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
7335 conf->algorithm = mddev->new_layout; in raid5_start_reshape()
7341 if (mddev->reshape_backwards) in raid5_start_reshape()
7342 conf->reshape_progress = raid5_size(mddev, 0, 0); in raid5_start_reshape()
7353 mddev_suspend(mddev); in raid5_start_reshape()
7354 mddev_resume(mddev); in raid5_start_reshape()
7363 if (mddev->delta_disks >= 0) { in raid5_start_reshape()
7364 rdev_for_each(rdev, mddev) in raid5_start_reshape()
7367 if (raid5_add_disk(mddev, rdev) == 0) { in raid5_start_reshape()
7374 if (sysfs_link_rdev(mddev, rdev)) in raid5_start_reshape()
7388 mddev->degraded = calc_degraded(conf); in raid5_start_reshape()
7391 mddev->raid_disks = conf->raid_disks; in raid5_start_reshape()
7392 mddev->reshape_position = conf->reshape_progress; in raid5_start_reshape()
7393 set_bit(MD_CHANGE_DEVS, &mddev->flags); in raid5_start_reshape()
7395 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid5_start_reshape()
7396 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid5_start_reshape()
7397 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in raid5_start_reshape()
7398 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid5_start_reshape()
7399 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid5_start_reshape()
7400 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid5_start_reshape()
7402 if (!mddev->sync_thread) { in raid5_start_reshape()
7403 mddev->recovery = 0; in raid5_start_reshape()
7406 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; in raid5_start_reshape()
7407 mddev->new_chunk_sectors = in raid5_start_reshape()
7409 mddev->new_layout = conf->algorithm = conf->prev_algo; in raid5_start_reshape()
7410 rdev_for_each(rdev, mddev) in raid5_start_reshape()
7415 mddev->reshape_position = MaxSector; in raid5_start_reshape()
7421 md_wakeup_thread(mddev->sync_thread); in raid5_start_reshape()
7422 md_new_event(mddev); in raid5_start_reshape()
7432 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in end_reshape()
7437 rdev_for_each(rdev, conf->mddev) in end_reshape()
7447 if (conf->mddev->queue) { in end_reshape()
7451 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) in end_reshape()
7452 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; in end_reshape()
7460 static void raid5_finish_reshape(struct mddev *mddev) in raid5_finish_reshape() argument
7462 struct r5conf *conf = mddev->private; in raid5_finish_reshape()
7464 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in raid5_finish_reshape()
7466 if (mddev->delta_disks > 0) { in raid5_finish_reshape()
7467 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); in raid5_finish_reshape()
7468 set_capacity(mddev->gendisk, mddev->array_sectors); in raid5_finish_reshape()
7469 revalidate_disk(mddev->gendisk); in raid5_finish_reshape()
7473 mddev->degraded = calc_degraded(conf); in raid5_finish_reshape()
7476 d < conf->raid_disks - mddev->delta_disks; in raid5_finish_reshape()
7486 mddev->layout = conf->algorithm; in raid5_finish_reshape()
7487 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
7488 mddev->reshape_position = MaxSector; in raid5_finish_reshape()
7489 mddev->delta_disks = 0; in raid5_finish_reshape()
7490 mddev->reshape_backwards = 0; in raid5_finish_reshape()
7494 static void raid5_quiesce(struct mddev *mddev, int state) in raid5_quiesce() argument
7496 struct r5conf *conf = mddev->private; in raid5_quiesce()
7530 static void *raid45_takeover_raid0(struct mddev *mddev, int level) in raid45_takeover_raid0() argument
7532 struct r0conf *raid0_conf = mddev->private; in raid45_takeover_raid0()
7538 mdname(mddev)); in raid45_takeover_raid0()
7544 mddev->dev_sectors = sectors; in raid45_takeover_raid0()
7545 mddev->new_level = level; in raid45_takeover_raid0()
7546 mddev->new_layout = ALGORITHM_PARITY_N; in raid45_takeover_raid0()
7547 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid45_takeover_raid0()
7548 mddev->raid_disks += 1; in raid45_takeover_raid0()
7549 mddev->delta_disks = 1; in raid45_takeover_raid0()
7551 mddev->recovery_cp = MaxSector; in raid45_takeover_raid0()
7553 return setup_conf(mddev); in raid45_takeover_raid0()
7556 static void *raid5_takeover_raid1(struct mddev *mddev) in raid5_takeover_raid1() argument
7560 if (mddev->raid_disks != 2 || in raid5_takeover_raid1()
7561 mddev->degraded > 1) in raid5_takeover_raid1()
7569 while (chunksect && (mddev->array_sectors & (chunksect-1))) in raid5_takeover_raid1()
7576 mddev->new_level = 5; in raid5_takeover_raid1()
7577 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; in raid5_takeover_raid1()
7578 mddev->new_chunk_sectors = chunksect; in raid5_takeover_raid1()
7580 return setup_conf(mddev); in raid5_takeover_raid1()
7583 static void *raid5_takeover_raid6(struct mddev *mddev) in raid5_takeover_raid6() argument
7587 switch (mddev->layout) { in raid5_takeover_raid6()
7609 mddev->new_level = 5; in raid5_takeover_raid6()
7610 mddev->new_layout = new_layout; in raid5_takeover_raid6()
7611 mddev->delta_disks = -1; in raid5_takeover_raid6()
7612 mddev->raid_disks -= 1; in raid5_takeover_raid6()
7613 return setup_conf(mddev); in raid5_takeover_raid6()
7616 static int raid5_check_reshape(struct mddev *mddev) in raid5_check_reshape() argument
7623 struct r5conf *conf = mddev->private; in raid5_check_reshape()
7624 int new_chunk = mddev->new_chunk_sectors; in raid5_check_reshape()
7626 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) in raid5_check_reshape()
7633 if (mddev->array_sectors & (new_chunk-1)) in raid5_check_reshape()
7640 if (mddev->raid_disks == 2) { in raid5_check_reshape()
7642 if (mddev->new_layout >= 0) { in raid5_check_reshape()
7643 conf->algorithm = mddev->new_layout; in raid5_check_reshape()
7644 mddev->layout = mddev->new_layout; in raid5_check_reshape()
7648 mddev->chunk_sectors = new_chunk; in raid5_check_reshape()
7650 set_bit(MD_CHANGE_DEVS, &mddev->flags); in raid5_check_reshape()
7651 md_wakeup_thread(mddev->thread); in raid5_check_reshape()
7653 return check_reshape(mddev); in raid5_check_reshape()
7656 static int raid6_check_reshape(struct mddev *mddev) in raid6_check_reshape() argument
7658 int new_chunk = mddev->new_chunk_sectors; in raid6_check_reshape()
7660 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) in raid6_check_reshape()
7667 if (mddev->array_sectors & (new_chunk-1)) in raid6_check_reshape()
7673 return check_reshape(mddev); in raid6_check_reshape()
7676 static void *raid5_takeover(struct mddev *mddev) in raid5_takeover() argument
7684 if (mddev->level == 0) in raid5_takeover()
7685 return raid45_takeover_raid0(mddev, 5); in raid5_takeover()
7686 if (mddev->level == 1) in raid5_takeover()
7687 return raid5_takeover_raid1(mddev); in raid5_takeover()
7688 if (mddev->level == 4) { in raid5_takeover()
7689 mddev->new_layout = ALGORITHM_PARITY_N; in raid5_takeover()
7690 mddev->new_level = 5; in raid5_takeover()
7691 return setup_conf(mddev); in raid5_takeover()
7693 if (mddev->level == 6) in raid5_takeover()
7694 return raid5_takeover_raid6(mddev); in raid5_takeover()
7699 static void *raid4_takeover(struct mddev *mddev) in raid4_takeover() argument
7705 if (mddev->level == 0) in raid4_takeover()
7706 return raid45_takeover_raid0(mddev, 4); in raid4_takeover()
7707 if (mddev->level == 5 && in raid4_takeover()
7708 mddev->layout == ALGORITHM_PARITY_N) { in raid4_takeover()
7709 mddev->new_layout = 0; in raid4_takeover()
7710 mddev->new_level = 4; in raid4_takeover()
7711 return setup_conf(mddev); in raid4_takeover()
7718 static void *raid6_takeover(struct mddev *mddev) in raid6_takeover() argument
7726 if (mddev->pers != &raid5_personality) in raid6_takeover()
7728 if (mddev->degraded > 1) in raid6_takeover()
7730 if (mddev->raid_disks > 253) in raid6_takeover()
7732 if (mddev->raid_disks < 3) in raid6_takeover()
7735 switch (mddev->layout) { in raid6_takeover()
7757 mddev->new_level = 6; in raid6_takeover()
7758 mddev->new_layout = new_layout; in raid6_takeover()
7759 mddev->delta_disks = 1; in raid6_takeover()
7760 mddev->raid_disks += 1; in raid6_takeover()
7761 return setup_conf(mddev); in raid6_takeover()