Lines Matching refs:conf

87 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)  in stripe_hash()  argument
90 return &conf->stripe_hashtbl[hash]; in stripe_hash()
98 static inline void lock_device_hash_lock(struct r5conf *conf, int hash) in lock_device_hash_lock() argument
100 spin_lock_irq(conf->hash_locks + hash); in lock_device_hash_lock()
101 spin_lock(&conf->device_lock); in lock_device_hash_lock()
104 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) in unlock_device_hash_lock() argument
106 spin_unlock(&conf->device_lock); in unlock_device_hash_lock()
107 spin_unlock_irq(conf->hash_locks + hash); in unlock_device_hash_lock()
110 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) in lock_all_device_hash_locks_irq() argument
114 spin_lock(conf->hash_locks); in lock_all_device_hash_locks_irq()
116 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); in lock_all_device_hash_locks_irq()
117 spin_lock(&conf->device_lock); in lock_all_device_hash_locks_irq()
120 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) in unlock_all_device_hash_locks_irq() argument
123 spin_unlock(&conf->device_lock); in unlock_all_device_hash_locks_irq()
125 spin_unlock(conf->hash_locks + i - 1); in unlock_all_device_hash_locks_irq()
237 static void print_raid5_conf (struct r5conf *conf);
248 struct r5conf *conf = sh->raid_conf; in raid5_wakeup_stripe_thread() local
260 group = conf->worker_groups + cpu_to_group(cpu); in raid5_wakeup_stripe_thread()
266 if (conf->worker_cnt_per_group == 0) { in raid5_wakeup_stripe_thread()
267 md_wakeup_thread(conf->mddev->thread); in raid5_wakeup_stripe_thread()
271 group = conf->worker_groups + cpu_to_group(sh->cpu); in raid5_wakeup_stripe_thread()
279 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { in raid5_wakeup_stripe_thread()
289 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, in do_release_stripe() argument
293 BUG_ON(atomic_read(&conf->active_stripes)==0); in do_release_stripe()
297 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe()
299 sh->bm_seq - conf->seq_write > 0) in do_release_stripe()
300 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe()
304 if (conf->worker_cnt_per_group == 0) { in do_release_stripe()
305 list_add_tail(&sh->lru, &conf->handle_list); in do_release_stripe()
311 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
315 if (atomic_dec_return(&conf->preread_active_stripes) in do_release_stripe()
317 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
318 atomic_dec(&conf->active_stripes); in do_release_stripe()
324 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, in __release_stripe() argument
328 do_release_stripe(conf, sh, temp_inactive_list); in __release_stripe()
338 static void release_inactive_stripe_list(struct r5conf *conf, in release_inactive_stripe_list() argument
359 spin_lock_irqsave(conf->hash_locks + hash, flags); in release_inactive_stripe_list()
360 if (list_empty(conf->inactive_list + hash) && in release_inactive_stripe_list()
362 atomic_dec(&conf->empty_inactive_list_nr); in release_inactive_stripe_list()
363 list_splice_tail_init(list, conf->inactive_list + hash); in release_inactive_stripe_list()
365 spin_unlock_irqrestore(conf->hash_locks + hash, flags); in release_inactive_stripe_list()
372 wake_up(&conf->wait_for_stripe); in release_inactive_stripe_list()
373 if (atomic_read(&conf->active_stripes) == 0) in release_inactive_stripe_list()
374 wake_up(&conf->wait_for_quiescent); in release_inactive_stripe_list()
375 if (conf->retry_read_aligned) in release_inactive_stripe_list()
376 md_wakeup_thread(conf->mddev->thread); in release_inactive_stripe_list()
381 static int release_stripe_list(struct r5conf *conf, in release_stripe_list() argument
388 head = llist_del_all(&conf->released_stripes); in release_stripe_list()
404 __release_stripe(conf, sh, &temp_inactive_list[hash]); in release_stripe_list()
413 struct r5conf *conf = sh->raid_conf; in raid5_release_stripe() local
424 if (unlikely(!conf->mddev->thread) || in raid5_release_stripe()
427 wakeup = llist_add(&sh->release_list, &conf->released_stripes); in raid5_release_stripe()
429 md_wakeup_thread(conf->mddev->thread); in raid5_release_stripe()
434 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { in raid5_release_stripe()
437 do_release_stripe(conf, sh, &list); in raid5_release_stripe()
438 spin_unlock(&conf->device_lock); in raid5_release_stripe()
439 release_inactive_stripe_list(conf, &list, hash); in raid5_release_stripe()
452 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
454 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
463 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) in get_free_stripe() argument
468 if (list_empty(conf->inactive_list + hash)) in get_free_stripe()
470 first = (conf->inactive_list + hash)->next; in get_free_stripe()
474 atomic_inc(&conf->active_stripes); in get_free_stripe()
476 if (list_empty(conf->inactive_list + hash)) in get_free_stripe()
477 atomic_inc(&conf->empty_inactive_list_nr); in get_free_stripe()
516 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
521 struct r5conf *conf = sh->raid_conf; in init_stripe() local
532 seq = read_seqcount_begin(&conf->gen_lock); in init_stripe()
533 sh->generation = conf->generation - previous; in init_stripe()
534 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
536 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
553 if (read_seqcount_retry(&conf->gen_lock, seq)) in init_stripe()
556 insert_hash(conf, sh); in init_stripe()
561 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, in __find_stripe() argument
567 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
587 static int calc_degraded(struct r5conf *conf) in calc_degraded() argument
594 for (i = 0; i < conf->previous_raid_disks; i++) { in calc_degraded()
595 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in calc_degraded()
597 rdev = rcu_dereference(conf->disks[i].replacement); in calc_degraded()
612 if (conf->raid_disks >= conf->previous_raid_disks) in calc_degraded()
616 if (conf->raid_disks == conf->previous_raid_disks) in calc_degraded()
620 for (i = 0; i < conf->raid_disks; i++) { in calc_degraded()
621 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in calc_degraded()
623 rdev = rcu_dereference(conf->disks[i].replacement); in calc_degraded()
634 if (conf->raid_disks <= conf->previous_raid_disks) in calc_degraded()
643 static int has_failed(struct r5conf *conf) in has_failed() argument
647 if (conf->mddev->reshape_position == MaxSector) in has_failed()
648 return conf->mddev->degraded > conf->max_degraded; in has_failed()
650 degraded = calc_degraded(conf); in has_failed()
651 if (degraded > conf->max_degraded) in has_failed()
657 raid5_get_active_stripe(struct r5conf *conf, sector_t sector, in raid5_get_active_stripe() argument
665 spin_lock_irq(conf->hash_locks + hash); in raid5_get_active_stripe()
668 wait_event_lock_irq(conf->wait_for_quiescent, in raid5_get_active_stripe()
669 conf->quiesce == 0 || noquiesce, in raid5_get_active_stripe()
670 *(conf->hash_locks + hash)); in raid5_get_active_stripe()
671 sh = __find_stripe(conf, sector, conf->generation - previous); in raid5_get_active_stripe()
673 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) { in raid5_get_active_stripe()
674 sh = get_free_stripe(conf, hash); in raid5_get_active_stripe()
676 &conf->cache_state)) in raid5_get_active_stripe()
678 &conf->cache_state); in raid5_get_active_stripe()
684 &conf->cache_state); in raid5_get_active_stripe()
686 conf->wait_for_stripe, in raid5_get_active_stripe()
687 !list_empty(conf->inactive_list + hash) && in raid5_get_active_stripe()
688 (atomic_read(&conf->active_stripes) in raid5_get_active_stripe()
689 < (conf->max_nr_stripes * 3 / 4) in raid5_get_active_stripe()
691 &conf->cache_state)), in raid5_get_active_stripe()
692 *(conf->hash_locks + hash)); in raid5_get_active_stripe()
694 &conf->cache_state); in raid5_get_active_stripe()
700 spin_lock(&conf->device_lock); in raid5_get_active_stripe()
703 atomic_inc(&conf->active_stripes); in raid5_get_active_stripe()
713 spin_unlock(&conf->device_lock); in raid5_get_active_stripe()
717 spin_unlock_irq(conf->hash_locks + hash); in raid5_get_active_stripe()
749 struct r5conf *conf = sh->raid_conf; in stripe_can_batch() local
751 if (conf->log) in stripe_can_batch()
759 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) in stripe_add_to_batch_list() argument
770 if (!sector_div(tmp_sec, conf->chunk_sectors)) in stripe_add_to_batch_list()
775 spin_lock_irq(conf->hash_locks + hash); in stripe_add_to_batch_list()
776 head = __find_stripe(conf, head_sector, conf->generation); in stripe_add_to_batch_list()
778 spin_lock(&conf->device_lock); in stripe_add_to_batch_list()
781 atomic_inc(&conf->active_stripes); in stripe_add_to_batch_list()
791 spin_unlock(&conf->device_lock); in stripe_add_to_batch_list()
793 spin_unlock_irq(conf->hash_locks + hash); in stripe_add_to_batch_list()
839 if (atomic_dec_return(&conf->preread_active_stripes) in stripe_add_to_batch_list()
841 md_wakeup_thread(conf->mddev->thread); in stripe_add_to_batch_list()
862 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) in use_new_offset() argument
864 sector_t progress = conf->reshape_progress; in use_new_offset()
872 if (sh->generation == conf->generation - 1) in use_new_offset()
887 struct r5conf *conf = sh->raid_conf; in ops_run_io() local
893 if (r5l_write_stripe(conf->log, sh) == 0) in ops_run_io()
925 rrdev = rcu_dereference(conf->disks[i].replacement); in ops_run_io()
927 rdev = rcu_dereference(conf->disks[i].rdev); in ops_run_io()
969 if (!conf->mddev->external && in ops_run_io()
970 conf->mddev->flags) { in ops_run_io()
975 md_check_recovery(conf->mddev); in ops_run_io()
983 md_wait_for_blocked_rdev(rdev, conf->mddev); in ops_run_io()
986 rdev_dec_pending(rdev, conf->mddev); in ops_run_io()
1012 if (use_new_offset(conf, sh)) in ops_run_io()
1037 if (conf->mddev->gendisk) in ops_run_io()
1039 bi, disk_devt(conf->mddev->gendisk), in ops_run_io()
1064 if (use_new_offset(conf, sh)) in ops_run_io()
1083 if (conf->mddev->gendisk) in ops_run_io()
1085 rbi, disk_devt(conf->mddev->gendisk), in ops_run_io()
1918 struct r5conf *conf = sh->raid_conf; in raid_run_ops() local
1919 int level = conf->level; in raid_run_ops()
1924 percpu = per_cpu_ptr(conf->percpu, cpu); in raid_run_ops()
1997 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) in grow_one_stripe() argument
2001 sh = alloc_stripe(conf->slab_cache, gfp); in grow_one_stripe()
2005 sh->raid_conf = conf; in grow_one_stripe()
2009 kmem_cache_free(conf->slab_cache, sh); in grow_one_stripe()
2013 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; in grow_one_stripe()
2015 atomic_inc(&conf->active_stripes); in grow_one_stripe()
2018 conf->max_nr_stripes++; in grow_one_stripe()
2022 static int grow_stripes(struct r5conf *conf, int num) in grow_stripes() argument
2025 int devs = max(conf->raid_disks, conf->previous_raid_disks); in grow_stripes()
2027 if (conf->mddev->gendisk) in grow_stripes()
2028 sprintf(conf->cache_name[0], in grow_stripes()
2029 "raid%d-%s", conf->level, mdname(conf->mddev)); in grow_stripes()
2031 sprintf(conf->cache_name[0], in grow_stripes()
2032 "raid%d-%p", conf->level, conf->mddev); in grow_stripes()
2033 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); in grow_stripes()
2035 conf->active_name = 0; in grow_stripes()
2036 sc = kmem_cache_create(conf->cache_name[conf->active_name], in grow_stripes()
2041 conf->slab_cache = sc; in grow_stripes()
2042 conf->pool_size = devs; in grow_stripes()
2044 if (!grow_one_stripe(conf, GFP_KERNEL)) in grow_stripes()
2080 static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) in resize_chunks() argument
2090 if (conf->scribble_disks >= new_disks && in resize_chunks()
2091 conf->scribble_sectors >= new_sectors) in resize_chunks()
2093 mddev_suspend(conf->mddev); in resize_chunks()
2099 percpu = per_cpu_ptr(conf->percpu, cpu); in resize_chunks()
2113 mddev_resume(conf->mddev); in resize_chunks()
2115 conf->scribble_disks = new_disks; in resize_chunks()
2116 conf->scribble_sectors = new_sectors; in resize_chunks()
2121 static int resize_stripes(struct r5conf *conf, int newsize) in resize_stripes() argument
2154 if (newsize <= conf->pool_size) in resize_stripes()
2157 err = md_allow_write(conf->mddev); in resize_stripes()
2162 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], in resize_stripes()
2169 mutex_lock(&conf->cache_size_mutex); in resize_stripes()
2171 for (i = conf->max_nr_stripes; i; i--) { in resize_stripes()
2176 nsh->raid_conf = conf; in resize_stripes()
2187 mutex_unlock(&conf->cache_size_mutex); in resize_stripes()
2197 lock_device_hash_lock(conf, hash); in resize_stripes()
2198 wait_event_cmd(conf->wait_for_stripe, in resize_stripes()
2199 !list_empty(conf->inactive_list + hash), in resize_stripes()
2200 unlock_device_hash_lock(conf, hash), in resize_stripes()
2201 lock_device_hash_lock(conf, hash)); in resize_stripes()
2202 osh = get_free_stripe(conf, hash); in resize_stripes()
2203 unlock_device_hash_lock(conf, hash); in resize_stripes()
2205 for(i=0; i<conf->pool_size; i++) { in resize_stripes()
2210 kmem_cache_free(conf->slab_cache, osh); in resize_stripes()
2212 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + in resize_stripes()
2213 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { in resize_stripes()
2218 kmem_cache_destroy(conf->slab_cache); in resize_stripes()
2227 for (i=0; i<conf->raid_disks; i++) in resize_stripes()
2228 ndisks[i] = conf->disks[i]; in resize_stripes()
2229 kfree(conf->disks); in resize_stripes()
2230 conf->disks = ndisks; in resize_stripes()
2234 mutex_unlock(&conf->cache_size_mutex); in resize_stripes()
2240 for (i=conf->raid_disks; i < newsize; i++) in resize_stripes()
2252 conf->slab_cache = sc; in resize_stripes()
2253 conf->active_name = 1-conf->active_name; in resize_stripes()
2255 conf->pool_size = newsize; in resize_stripes()
2259 static int drop_one_stripe(struct r5conf *conf) in drop_one_stripe() argument
2262 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; in drop_one_stripe()
2264 spin_lock_irq(conf->hash_locks + hash); in drop_one_stripe()
2265 sh = get_free_stripe(conf, hash); in drop_one_stripe()
2266 spin_unlock_irq(conf->hash_locks + hash); in drop_one_stripe()
2271 kmem_cache_free(conf->slab_cache, sh); in drop_one_stripe()
2272 atomic_dec(&conf->active_stripes); in drop_one_stripe()
2273 conf->max_nr_stripes--; in drop_one_stripe()
2277 static void shrink_stripes(struct r5conf *conf) in shrink_stripes() argument
2279 while (conf->max_nr_stripes && in shrink_stripes()
2280 drop_one_stripe(conf)) in shrink_stripes()
2283 kmem_cache_destroy(conf->slab_cache); in shrink_stripes()
2284 conf->slab_cache = NULL; in shrink_stripes()
2290 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request() local
2313 rdev = conf->disks[i].replacement; in raid5_end_read_request()
2315 rdev = conf->disks[i].rdev; in raid5_end_read_request()
2317 if (use_new_offset(conf, sh)) in raid5_end_read_request()
2332 mdname(conf->mddev), STRIPE_SECTORS, in raid5_end_read_request()
2355 mdname(conf->mddev), in raid5_end_read_request()
2358 else if (conf->mddev->degraded >= conf->max_degraded) { in raid5_end_read_request()
2364 mdname(conf->mddev), in raid5_end_read_request()
2374 mdname(conf->mddev), in raid5_end_read_request()
2378 > conf->max_nr_stripes) in raid5_end_read_request()
2381 mdname(conf->mddev), bdn); in raid5_end_read_request()
2400 md_error(conf->mddev, rdev); in raid5_end_read_request()
2403 rdev_dec_pending(rdev, conf->mddev); in raid5_end_read_request()
2412 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request() local
2421 rdev = conf->disks[i].rdev; in raid5_end_write_request()
2425 rdev = conf->disks[i].replacement; in raid5_end_write_request()
2433 rdev = conf->disks[i].rdev; in raid5_end_write_request()
2447 md_error(conf->mddev, rdev); in raid5_end_write_request()
2472 rdev_dec_pending(rdev, conf->mddev); in raid5_end_write_request()
2507 struct r5conf *conf = mddev->private; in error() local
2511 spin_lock_irqsave(&conf->device_lock, flags); in error()
2513 mddev->degraded = calc_degraded(conf); in error()
2514 spin_unlock_irqrestore(&conf->device_lock, flags); in error()
2527 conf->raid_disks - mddev->degraded); in error()
2534 sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, in raid5_compute_sector() argument
2544 int algorithm = previous ? conf->prev_algo in raid5_compute_sector()
2545 : conf->algorithm; in raid5_compute_sector()
2546 int sectors_per_chunk = previous ? conf->prev_chunk_sectors in raid5_compute_sector()
2547 : conf->chunk_sectors; in raid5_compute_sector()
2548 int raid_disks = previous ? conf->previous_raid_disks in raid5_compute_sector()
2549 : conf->raid_disks; in raid5_compute_sector()
2550 int data_disks = raid_disks - conf->max_degraded; in raid5_compute_sector()
2570 switch(conf->level) { in raid5_compute_sector()
2738 struct r5conf *conf = sh->raid_conf; in raid5_compute_blocknr() local
2740 int data_disks = raid_disks - conf->max_degraded; in raid5_compute_blocknr()
2742 int sectors_per_chunk = previous ? conf->prev_chunk_sectors in raid5_compute_blocknr()
2743 : conf->chunk_sectors; in raid5_compute_blocknr()
2744 int algorithm = previous ? conf->prev_algo in raid5_compute_blocknr()
2745 : conf->algorithm; in raid5_compute_blocknr()
2758 switch(conf->level) { in raid5_compute_blocknr()
2845 check = raid5_compute_sector(conf, r_sector, in raid5_compute_blocknr()
2850 mdname(conf->mddev)); in raid5_compute_blocknr()
2861 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction() local
2862 int level = conf->level; in schedule_reconstruction()
2892 if (s->locked + conf->max_degraded == disks) in schedule_reconstruction()
2894 atomic_inc(&conf->pending_full_writes); in schedule_reconstruction()
2955 struct r5conf *conf = sh->raid_conf; in add_stripe_bio() local
3016 if (conf->mddev->bitmap && firstwrite) { in add_stripe_bio()
3031 bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
3036 sh->bm_seq = conf->seq_flush+1; in add_stripe_bio()
3043 stripe_add_to_batch_list(conf, sh); in add_stripe_bio()
3052 static void end_reshape(struct r5conf *conf);
3054 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, in stripe_set_idx() argument
3058 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; in stripe_set_idx()
3061 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; in stripe_set_idx()
3063 raid5_compute_sector(conf, in stripe_set_idx()
3064 stripe * (disks - conf->max_degraded) in stripe_set_idx()
3071 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
3084 rdev = rcu_dereference(conf->disks[i].rdev); in handle_failed_stripe()
3095 md_error(conf->mddev, rdev); in handle_failed_stripe()
3096 rdev_dec_pending(rdev, conf->mddev); in handle_failed_stripe()
3111 wake_up(&conf->wait_for_overlap); in handle_failed_stripe()
3119 md_write_end(conf->mddev); in handle_failed_stripe()
3125 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3143 md_write_end(conf->mddev); in handle_failed_stripe()
3153 s->failed > conf->max_degraded && in handle_failed_stripe()
3161 wake_up(&conf->wait_for_overlap); in handle_failed_stripe()
3176 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3187 if (atomic_dec_and_test(&conf->pending_full_writes)) in handle_failed_stripe()
3188 md_wakeup_thread(conf->mddev->thread); in handle_failed_stripe()
3192 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
3201 wake_up(&conf->wait_for_overlap); in handle_failed_sync()
3211 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { in handle_failed_sync()
3215 for (i = 0; i < conf->raid_disks; i++) { in handle_failed_sync()
3216 struct md_rdev *rdev = conf->disks[i].rdev; in handle_failed_sync()
3223 rdev = conf->disks[i].replacement; in handle_failed_sync()
3232 conf->recovery_disabled = in handle_failed_sync()
3233 conf->mddev->recovery_disabled; in handle_failed_sync()
3235 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); in handle_failed_sync()
3451 static void handle_stripe_clean_event(struct r5conf *conf, in handle_stripe_clean_event() argument
3485 md_write_end(conf->mddev); in handle_stripe_clean_event()
3490 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
3531 spin_lock_irq(conf->hash_locks + hash); in handle_stripe_clean_event()
3533 spin_unlock_irq(conf->hash_locks + hash); in handle_stripe_clean_event()
3548 if (atomic_dec_and_test(&conf->pending_full_writes)) in handle_stripe_clean_event()
3549 md_wakeup_thread(conf->mddev->thread); in handle_stripe_clean_event()
3555 static void handle_stripe_dirtying(struct r5conf *conf, in handle_stripe_dirtying() argument
3561 sector_t recovery_cp = conf->mddev->recovery_cp; in handle_stripe_dirtying()
3570 if (conf->rmw_level == PARITY_DISABLE_RMW || in handle_stripe_dirtying()
3578 conf->rmw_level, (unsigned long long)recovery_cp, in handle_stripe_dirtying()
3607 if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_ENABLE_RMW)) && rmw > 0) { in handle_stripe_dirtying()
3609 if (conf->mddev->queue) in handle_stripe_dirtying()
3610 blk_add_trace_msg(conf->mddev->queue, in handle_stripe_dirtying()
3634 if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_ENABLE_RMW)) && rcw > 0) { in handle_stripe_dirtying()
3661 if (rcw && conf->mddev->queue) in handle_stripe_dirtying()
3662 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", in handle_stripe_dirtying()
3687 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
3749 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); in handle_parity_checks5()
3750 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) in handle_parity_checks5()
3775 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
3901 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); in handle_parity_checks6()
3902 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) in handle_parity_checks6()
3939 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
3956 sector_t s = raid5_compute_sector(conf, bn, 0, in handle_stripe_expansion()
3958 sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1); in handle_stripe_expansion()
3980 for (j = 0; j < conf->raid_disks; j++) in handle_stripe_expansion()
3985 if (j == conf->raid_disks) { in handle_stripe_expansion()
4012 struct r5conf *conf = sh->raid_conf; in analyse_stripe() local
4024 s->log_failed = r5l_log_disk_error(conf); in analyse_stripe()
4072 rdev = rcu_dereference(conf->disks[i].replacement); in analyse_stripe()
4083 rdev = rcu_dereference(conf->disks[i].rdev); in analyse_stripe()
4131 conf->disks[i].rdev); in analyse_stripe()
4144 conf->disks[i].rdev); in analyse_stripe()
4153 conf->disks[i].replacement); in analyse_stripe()
4185 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4186 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) in analyse_stripe()
4294 struct r5conf *conf = sh->raid_conf; in handle_stripe() local
4352 rdev_dec_pending(s.blocked_rdev, conf->mddev); in handle_stripe()
4368 if (s.failed > conf->max_degraded || s.log_failed) { in handle_stripe()
4373 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); in handle_stripe()
4375 handle_failed_sync(conf, sh, &s); in handle_stripe()
4427 || conf->level < 6; in handle_stripe()
4438 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); in handle_stripe()
4445 || (conf->level == 6 && s.to_write && s.failed) in handle_stripe()
4458 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
4469 if (conf->level == 6) in handle_stripe()
4470 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
4472 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
4479 for (i = 0; i < conf->raid_disks; i++) in handle_stripe()
4493 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe()
4496 wake_up(&conf->wait_for_overlap); in handle_stripe()
4502 if (s.failed <= conf->max_degraded && !conf->mddev->ro) in handle_stripe()
4526 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); in handle_stripe()
4535 atomic_inc(&conf->preread_active_stripes); in handle_stripe()
4544 for (i = conf->raid_disks; i--; ) { in handle_stripe()
4554 sh->disks = conf->raid_disks; in handle_stripe()
4555 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
4559 atomic_dec(&conf->reshape_stripes); in handle_stripe()
4560 wake_up(&conf->wait_for_overlap); in handle_stripe()
4561 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe()
4566 handle_stripe_expansion(conf, sh); in handle_stripe()
4571 if (conf->mddev->external) in handle_stripe()
4573 conf->mddev); in handle_stripe()
4580 conf->mddev); in handle_stripe()
4589 rdev = conf->disks[i].rdev; in handle_stripe()
4592 md_error(conf->mddev, rdev); in handle_stripe()
4593 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
4596 rdev = conf->disks[i].rdev; in handle_stripe()
4599 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
4602 rdev = conf->disks[i].replacement; in handle_stripe()
4605 rdev = conf->disks[i].rdev; in handle_stripe()
4608 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
4622 atomic_dec(&conf->preread_active_stripes); in handle_stripe()
4623 if (atomic_read(&conf->preread_active_stripes) < in handle_stripe()
4625 md_wakeup_thread(conf->mddev->thread); in handle_stripe()
4629 if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) { in handle_stripe()
4630 spin_lock_irq(&conf->device_lock); in handle_stripe()
4631 bio_list_merge(&conf->return_bi, &s.return_bi); in handle_stripe()
4632 spin_unlock_irq(&conf->device_lock); in handle_stripe()
4633 md_wakeup_thread(conf->mddev->thread); in handle_stripe()
4641 static void raid5_activate_delayed(struct r5conf *conf) in raid5_activate_delayed() argument
4643 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { in raid5_activate_delayed()
4644 while (!list_empty(&conf->delayed_list)) { in raid5_activate_delayed()
4645 struct list_head *l = conf->delayed_list.next; in raid5_activate_delayed()
4651 atomic_inc(&conf->preread_active_stripes); in raid5_activate_delayed()
4652 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
4658 static void activate_bit_delay(struct r5conf *conf, in activate_bit_delay() argument
4663 list_add(&head, &conf->bitmap_list); in activate_bit_delay()
4664 list_del_init(&conf->bitmap_list); in activate_bit_delay()
4671 __release_stripe(conf, sh, &temp_inactive_list[hash]); in activate_bit_delay()
4677 struct r5conf *conf = mddev->private; in raid5_congested() local
4683 if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) in raid5_congested()
4685 if (conf->quiesce) in raid5_congested()
4687 if (atomic_read(&conf->empty_inactive_list_nr)) in raid5_congested()
4695 struct r5conf *conf = mddev->private; in in_chunk_boundary() local
4700 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary()
4709 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) in add_bio_to_retry() argument
4713 spin_lock_irqsave(&conf->device_lock, flags); in add_bio_to_retry()
4715 bi->bi_next = conf->retry_read_aligned_list; in add_bio_to_retry()
4716 conf->retry_read_aligned_list = bi; in add_bio_to_retry()
4718 spin_unlock_irqrestore(&conf->device_lock, flags); in add_bio_to_retry()
4719 md_wakeup_thread(conf->mddev->thread); in add_bio_to_retry()
4722 static struct bio *remove_bio_from_retry(struct r5conf *conf) in remove_bio_from_retry() argument
4726 bi = conf->retry_read_aligned; in remove_bio_from_retry()
4728 conf->retry_read_aligned = NULL; in remove_bio_from_retry()
4731 bi = conf->retry_read_aligned_list; in remove_bio_from_retry()
4733 conf->retry_read_aligned_list = bi->bi_next; in remove_bio_from_retry()
4755 struct r5conf *conf; in raid5_align_endio() local
4764 conf = mddev->private; in raid5_align_endio()
4766 rdev_dec_pending(rdev, conf->mddev); in raid5_align_endio()
4772 if (atomic_dec_and_test(&conf->active_aligned_reads)) in raid5_align_endio()
4773 wake_up(&conf->wait_for_quiescent); in raid5_align_endio()
4779 add_bio_to_retry(raid_bi, conf); in raid5_align_endio()
4784 struct r5conf *conf = mddev->private; in raid5_read_one_chunk() local
4810 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, in raid5_read_one_chunk()
4815 rdev = rcu_dereference(conf->disks[dd_idx].replacement); in raid5_read_one_chunk()
4818 rdev = rcu_dereference(conf->disks[dd_idx].rdev); in raid5_read_one_chunk()
4846 spin_lock_irq(&conf->device_lock); in raid5_read_one_chunk()
4847 wait_event_lock_irq(conf->wait_for_quiescent, in raid5_read_one_chunk()
4848 conf->quiesce == 0, in raid5_read_one_chunk()
4849 conf->device_lock); in raid5_read_one_chunk()
4850 atomic_inc(&conf->active_aligned_reads); in raid5_read_one_chunk()
4851 spin_unlock_irq(&conf->device_lock); in raid5_read_one_chunk()
4901 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) in __get_priority_stripe() argument
4907 if (conf->worker_cnt_per_group == 0) { in __get_priority_stripe()
4908 handle_list = &conf->handle_list; in __get_priority_stripe()
4910 handle_list = &conf->worker_groups[group].handle_list; in __get_priority_stripe()
4911 wg = &conf->worker_groups[group]; in __get_priority_stripe()
4914 for (i = 0; i < conf->group_cnt; i++) { in __get_priority_stripe()
4915 handle_list = &conf->worker_groups[i].handle_list; in __get_priority_stripe()
4916 wg = &conf->worker_groups[i]; in __get_priority_stripe()
4925 list_empty(&conf->hold_list) ? "empty" : "busy", in __get_priority_stripe()
4926 atomic_read(&conf->pending_full_writes), conf->bypass_count); in __get_priority_stripe()
4931 if (list_empty(&conf->hold_list)) in __get_priority_stripe()
4932 conf->bypass_count = 0; in __get_priority_stripe()
4934 if (conf->hold_list.next == conf->last_hold) in __get_priority_stripe()
4935 conf->bypass_count++; in __get_priority_stripe()
4937 conf->last_hold = conf->hold_list.next; in __get_priority_stripe()
4938 conf->bypass_count -= conf->bypass_threshold; in __get_priority_stripe()
4939 if (conf->bypass_count < 0) in __get_priority_stripe()
4940 conf->bypass_count = 0; in __get_priority_stripe()
4943 } else if (!list_empty(&conf->hold_list) && in __get_priority_stripe()
4944 ((conf->bypass_threshold && in __get_priority_stripe()
4945 conf->bypass_count > conf->bypass_threshold) || in __get_priority_stripe()
4946 atomic_read(&conf->pending_full_writes) == 0)) { in __get_priority_stripe()
4948 list_for_each_entry(tmp, &conf->hold_list, lru) { in __get_priority_stripe()
4949 if (conf->worker_cnt_per_group == 0 || in __get_priority_stripe()
4959 conf->bypass_count -= conf->bypass_threshold; in __get_priority_stripe()
4960 if (conf->bypass_count < 0) in __get_priority_stripe()
4961 conf->bypass_count = 0; in __get_priority_stripe()
4990 struct r5conf *conf = mddev->private; in raid5_unplug() local
4995 spin_lock_irq(&conf->device_lock); in raid5_unplug()
5011 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); in raid5_unplug()
5014 spin_unlock_irq(&conf->device_lock); in raid5_unplug()
5016 release_inactive_stripe_list(conf, cb->temp_inactive_list, in raid5_unplug()
5053 struct r5conf *conf = mddev->private; in make_discard_request() local
5069 stripe_sectors = conf->chunk_sectors * in make_discard_request()
5070 (conf->raid_disks - conf->max_degraded); in make_discard_request()
5075 logical_sector *= conf->chunk_sectors; in make_discard_request()
5076 last_sector *= conf->chunk_sectors; in make_discard_request()
5083 sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); in make_discard_request()
5084 prepare_to_wait(&conf->wait_for_overlap, &w, in make_discard_request()
5094 for (d = 0; d < conf->raid_disks; d++) { in make_discard_request()
5106 finish_wait(&conf->wait_for_overlap, &w); in make_discard_request()
5108 for (d = 0; d < conf->raid_disks; d++) { in make_discard_request()
5117 if (conf->mddev->bitmap) { in make_discard_request()
5119 d < conf->raid_disks - conf->max_degraded; in make_discard_request()
5125 sh->bm_seq = conf->seq_flush + 1; in make_discard_request()
5132 atomic_inc(&conf->preread_active_stripes); in make_discard_request()
5145 struct r5conf *conf = mddev->private; in make_request() local
5156 int ret = r5l_handle_flush_request(conf->log, bi); in make_request()
5191 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); in make_request()
5198 seq = read_seqcount_begin(&conf->gen_lock); in make_request()
5201 prepare_to_wait(&conf->wait_for_overlap, &w, in make_request()
5203 if (unlikely(conf->reshape_progress != MaxSector)) { in make_request()
5212 spin_lock_irq(&conf->device_lock); in make_request()
5214 ? logical_sector < conf->reshape_progress in make_request()
5215 : logical_sector >= conf->reshape_progress) { in make_request()
5219 ? logical_sector < conf->reshape_safe in make_request()
5220 : logical_sector >= conf->reshape_safe) { in make_request()
5221 spin_unlock_irq(&conf->device_lock); in make_request()
5227 spin_unlock_irq(&conf->device_lock); in make_request()
5230 new_sector = raid5_compute_sector(conf, logical_sector, in make_request()
5237 sh = raid5_get_active_stripe(conf, new_sector, previous, in make_request()
5250 spin_lock_irq(&conf->device_lock); in make_request()
5252 ? logical_sector >= conf->reshape_progress in make_request()
5253 : logical_sector < conf->reshape_progress) in make_request()
5256 spin_unlock_irq(&conf->device_lock); in make_request()
5264 if (read_seqcount_retry(&conf->gen_lock, seq)) { in make_request()
5281 prepare_to_wait(&conf->wait_for_overlap, in make_request()
5308 atomic_inc(&conf->preread_active_stripes); in make_request()
5316 finish_wait(&conf->wait_for_overlap, &w); in make_request()
5343 struct r5conf *conf = mddev->private; in reshape_request() local
5346 int raid_disks = conf->previous_raid_disks; in reshape_request()
5347 int data_disks = raid_disks - conf->max_degraded; in reshape_request()
5348 int new_data_disks = conf->raid_disks - conf->max_degraded; in reshape_request()
5360 conf->reshape_progress < raid5_size(mddev, 0, 0)) { in reshape_request()
5362 - conf->reshape_progress; in reshape_request()
5364 conf->reshape_progress == MaxSector) { in reshape_request()
5368 conf->reshape_progress > 0) in reshape_request()
5369 sector_nr = conf->reshape_progress; in reshape_request()
5385 reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors); in reshape_request()
5393 writepos = conf->reshape_progress; in reshape_request()
5395 readpos = conf->reshape_progress; in reshape_request()
5397 safepos = conf->reshape_safe; in reshape_request()
5418 BUG_ON(conf->reshape_progress == 0); in reshape_request()
5449 if (conf->min_offset_diff < 0) { in reshape_request()
5450 safepos += -conf->min_offset_diff; in reshape_request()
5451 readpos += -conf->min_offset_diff; in reshape_request()
5453 writepos += conf->min_offset_diff; in reshape_request()
5458 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { in reshape_request()
5460 wait_event(conf->wait_for_overlap, in reshape_request()
5461 atomic_read(&conf->reshape_stripes)==0 in reshape_request()
5463 if (atomic_read(&conf->reshape_stripes) != 0) in reshape_request()
5465 mddev->reshape_position = conf->reshape_progress; in reshape_request()
5467 conf->reshape_checkpoint = jiffies; in reshape_request()
5474 spin_lock_irq(&conf->device_lock); in reshape_request()
5475 conf->reshape_safe = mddev->reshape_position; in reshape_request()
5476 spin_unlock_irq(&conf->device_lock); in reshape_request()
5477 wake_up(&conf->wait_for_overlap); in reshape_request()
5485 sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); in reshape_request()
5487 atomic_inc(&conf->reshape_stripes); in reshape_request()
5495 if (conf->level == 6 && in reshape_request()
5513 spin_lock_irq(&conf->device_lock); in reshape_request()
5515 conf->reshape_progress -= reshape_sectors * new_data_disks; in reshape_request()
5517 conf->reshape_progress += reshape_sectors * new_data_disks; in reshape_request()
5518 spin_unlock_irq(&conf->device_lock); in reshape_request()
5525 raid5_compute_sector(conf, stripe_addr*(new_data_disks), in reshape_request()
5528 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) in reshape_request()
5534 sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); in reshape_request()
5558 wait_event(conf->wait_for_overlap, in reshape_request()
5559 atomic_read(&conf->reshape_stripes) == 0 in reshape_request()
5561 if (atomic_read(&conf->reshape_stripes) != 0) in reshape_request()
5563 mddev->reshape_position = conf->reshape_progress; in reshape_request()
5565 conf->reshape_checkpoint = jiffies; in reshape_request()
5573 spin_lock_irq(&conf->device_lock); in reshape_request()
5574 conf->reshape_safe = mddev->reshape_position; in reshape_request()
5575 spin_unlock_irq(&conf->device_lock); in reshape_request()
5576 wake_up(&conf->wait_for_overlap); in reshape_request()
5585 struct r5conf *conf = mddev->private; in sync_request() local
5596 end_reshape(conf); in sync_request()
5604 conf->fullsync = 0; in sync_request()
5611 wait_event(conf->wait_for_overlap, conf->quiesce != 2); in sync_request()
5626 if (mddev->degraded >= conf->max_degraded && in sync_request()
5633 !conf->fullsync && in sync_request()
5644 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); in sync_request()
5646 sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); in sync_request()
5657 for (i = 0; i < conf->raid_disks; i++) { in sync_request()
5658 struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev); in sync_request()
5675 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) in retry_aligned_read() argument
5696 sector = raid5_compute_sector(conf, logical_sector, in retry_aligned_read()
5709 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); in retry_aligned_read()
5714 conf->retry_read_aligned = raid_bio; in retry_aligned_read()
5721 conf->retry_read_aligned = raid_bio; in retry_aligned_read()
5736 if (atomic_dec_and_test(&conf->active_aligned_reads)) in retry_aligned_read()
5737 wake_up(&conf->wait_for_quiescent); in retry_aligned_read()
5741 static int handle_active_stripes(struct r5conf *conf, int group, in handle_active_stripes() argument
5750 (sh = __get_priority_stripe(conf, group)) != NULL) in handle_active_stripes()
5758 spin_unlock_irq(&conf->device_lock); in handle_active_stripes()
5759 r5l_flush_stripe_to_raid(conf->log); in handle_active_stripes()
5760 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
5765 spin_unlock_irq(&conf->device_lock); in handle_active_stripes()
5767 release_inactive_stripe_list(conf, temp_inactive_list, in handle_active_stripes()
5770 r5l_flush_stripe_to_raid(conf->log); in handle_active_stripes()
5772 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
5778 r5l_write_stripe_run(conf->log); in handle_active_stripes()
5782 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
5785 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); in handle_active_stripes()
5794 struct r5conf *conf = group->conf; in raid5_do_work() local
5795 int group_id = group - conf->worker_groups; in raid5_do_work()
5803 spin_lock_irq(&conf->device_lock); in raid5_do_work()
5807 released = release_stripe_list(conf, worker->temp_inactive_list); in raid5_do_work()
5809 batch_size = handle_active_stripes(conf, group_id, worker, in raid5_do_work()
5818 spin_unlock_irq(&conf->device_lock); in raid5_do_work()
5834 struct r5conf *conf = mddev->private; in raid5d() local
5842 if (!bio_list_empty(&conf->return_bi) && in raid5d()
5845 spin_lock_irq(&conf->device_lock); in raid5d()
5847 bio_list_merge(&tmp, &conf->return_bi); in raid5d()
5848 bio_list_init(&conf->return_bi); in raid5d()
5850 spin_unlock_irq(&conf->device_lock); in raid5d()
5856 spin_lock_irq(&conf->device_lock); in raid5d()
5861 released = release_stripe_list(conf, conf->temp_inactive_list); in raid5d()
5863 clear_bit(R5_DID_ALLOC, &conf->cache_state); in raid5d()
5866 !list_empty(&conf->bitmap_list)) { in raid5d()
5868 conf->seq_flush++; in raid5d()
5869 spin_unlock_irq(&conf->device_lock); in raid5d()
5871 spin_lock_irq(&conf->device_lock); in raid5d()
5872 conf->seq_write = conf->seq_flush; in raid5d()
5873 activate_bit_delay(conf, conf->temp_inactive_list); in raid5d()
5875 raid5_activate_delayed(conf); in raid5d()
5877 while ((bio = remove_bio_from_retry(conf))) { in raid5d()
5879 spin_unlock_irq(&conf->device_lock); in raid5d()
5880 ok = retry_aligned_read(conf, bio); in raid5d()
5881 spin_lock_irq(&conf->device_lock); in raid5d()
5887 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, in raid5d()
5888 conf->temp_inactive_list); in raid5d()
5894 spin_unlock_irq(&conf->device_lock); in raid5d()
5896 spin_lock_irq(&conf->device_lock); in raid5d()
5901 spin_unlock_irq(&conf->device_lock); in raid5d()
5902 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && in raid5d()
5903 mutex_trylock(&conf->cache_size_mutex)) { in raid5d()
5904 grow_one_stripe(conf, __GFP_NOWARN); in raid5d()
5908 set_bit(R5_DID_ALLOC, &conf->cache_state); in raid5d()
5909 mutex_unlock(&conf->cache_size_mutex); in raid5d()
5912 r5l_flush_stripe_to_raid(conf->log); in raid5d()
5923 struct r5conf *conf; in raid5_show_stripe_cache_size() local
5926 conf = mddev->private; in raid5_show_stripe_cache_size()
5927 if (conf) in raid5_show_stripe_cache_size()
5928 ret = sprintf(page, "%d\n", conf->min_nr_stripes); in raid5_show_stripe_cache_size()
5936 struct r5conf *conf = mddev->private; in raid5_set_cache_size() local
5942 conf->min_nr_stripes = size; in raid5_set_cache_size()
5943 mutex_lock(&conf->cache_size_mutex); in raid5_set_cache_size()
5944 while (size < conf->max_nr_stripes && in raid5_set_cache_size()
5945 drop_one_stripe(conf)) in raid5_set_cache_size()
5947 mutex_unlock(&conf->cache_size_mutex); in raid5_set_cache_size()
5954 mutex_lock(&conf->cache_size_mutex); in raid5_set_cache_size()
5955 while (size > conf->max_nr_stripes) in raid5_set_cache_size()
5956 if (!grow_one_stripe(conf, GFP_KERNEL)) in raid5_set_cache_size()
5958 mutex_unlock(&conf->cache_size_mutex); in raid5_set_cache_size()
5967 struct r5conf *conf; in raid5_store_stripe_cache_size() local
5978 conf = mddev->private; in raid5_store_stripe_cache_size()
5979 if (!conf) in raid5_store_stripe_cache_size()
5996 struct r5conf *conf = mddev->private; in raid5_show_rmw_level() local
5997 if (conf) in raid5_show_rmw_level()
5998 return sprintf(page, "%d\n", conf->rmw_level); in raid5_show_rmw_level()
6006 struct r5conf *conf = mddev->private; in raid5_store_rmw_level() local
6009 if (!conf) in raid5_store_rmw_level()
6026 conf->rmw_level = new; in raid5_store_rmw_level()
6039 struct r5conf *conf; in raid5_show_preread_threshold() local
6042 conf = mddev->private; in raid5_show_preread_threshold()
6043 if (conf) in raid5_show_preread_threshold()
6044 ret = sprintf(page, "%d\n", conf->bypass_threshold); in raid5_show_preread_threshold()
6052 struct r5conf *conf; in raid5_store_preread_threshold() local
6064 conf = mddev->private; in raid5_store_preread_threshold()
6065 if (!conf) in raid5_store_preread_threshold()
6067 else if (new > conf->min_nr_stripes) in raid5_store_preread_threshold()
6070 conf->bypass_threshold = new; in raid5_store_preread_threshold()
6084 struct r5conf *conf; in raid5_show_skip_copy() local
6087 conf = mddev->private; in raid5_show_skip_copy()
6088 if (conf) in raid5_show_skip_copy()
6089 ret = sprintf(page, "%d\n", conf->skip_copy); in raid5_show_skip_copy()
6097 struct r5conf *conf; in raid5_store_skip_copy() local
6110 conf = mddev->private; in raid5_store_skip_copy()
6111 if (!conf) in raid5_store_skip_copy()
6113 else if (new != conf->skip_copy) { in raid5_store_skip_copy()
6115 conf->skip_copy = new; in raid5_store_skip_copy()
6136 struct r5conf *conf = mddev->private; in stripe_cache_active_show() local
6137 if (conf) in stripe_cache_active_show()
6138 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); in stripe_cache_active_show()
6149 struct r5conf *conf; in raid5_show_group_thread_cnt() local
6152 conf = mddev->private; in raid5_show_group_thread_cnt()
6153 if (conf) in raid5_show_group_thread_cnt()
6154 ret = sprintf(page, "%d\n", conf->worker_cnt_per_group); in raid5_show_group_thread_cnt()
6159 static int alloc_thread_groups(struct r5conf *conf, int cnt,
6166 struct r5conf *conf; in raid5_store_group_thread_cnt() local
6180 conf = mddev->private; in raid5_store_group_thread_cnt()
6181 if (!conf) in raid5_store_group_thread_cnt()
6183 else if (new != conf->worker_cnt_per_group) { in raid5_store_group_thread_cnt()
6186 old_groups = conf->worker_groups; in raid5_store_group_thread_cnt()
6190 err = alloc_thread_groups(conf, new, in raid5_store_group_thread_cnt()
6194 spin_lock_irq(&conf->device_lock); in raid5_store_group_thread_cnt()
6195 conf->group_cnt = group_cnt; in raid5_store_group_thread_cnt()
6196 conf->worker_cnt_per_group = worker_cnt_per_group; in raid5_store_group_thread_cnt()
6197 conf->worker_groups = new_groups; in raid5_store_group_thread_cnt()
6198 spin_unlock_irq(&conf->device_lock); in raid5_store_group_thread_cnt()
6230 static int alloc_thread_groups(struct r5conf *conf, int cnt, in alloc_thread_groups() argument
6261 group->conf = conf; in alloc_thread_groups()
6277 static void free_thread_groups(struct r5conf *conf) in free_thread_groups() argument
6279 if (conf->worker_groups) in free_thread_groups()
6280 kfree(conf->worker_groups[0].workers); in free_thread_groups()
6281 kfree(conf->worker_groups); in free_thread_groups()
6282 conf->worker_groups = NULL; in free_thread_groups()
6288 struct r5conf *conf = mddev->private; in raid5_size() local
6294 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); in raid5_size()
6296 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_size()
6297 sectors &= ~((sector_t)conf->prev_chunk_sectors - 1); in raid5_size()
6298 return sectors * (raid_disks - conf->max_degraded); in raid5_size()
6301 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in free_scratch_buffer() argument
6310 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in alloc_scratch_buffer() argument
6312 if (conf->level == 6 && !percpu->spare_page) in alloc_scratch_buffer()
6315 percpu->scribble = scribble_alloc(max(conf->raid_disks, in alloc_scratch_buffer()
6316 conf->previous_raid_disks), in alloc_scratch_buffer()
6317 max(conf->chunk_sectors, in alloc_scratch_buffer()
6318 conf->prev_chunk_sectors) in alloc_scratch_buffer()
6322 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { in alloc_scratch_buffer()
6323 free_scratch_buffer(conf, percpu); in alloc_scratch_buffer()
6330 static void raid5_free_percpu(struct r5conf *conf) in raid5_free_percpu() argument
6334 if (!conf->percpu) in raid5_free_percpu()
6338 unregister_cpu_notifier(&conf->cpu_notify); in raid5_free_percpu()
6343 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid5_free_percpu()
6346 free_percpu(conf->percpu); in raid5_free_percpu()
6349 static void free_conf(struct r5conf *conf) in free_conf() argument
6351 if (conf->log) in free_conf()
6352 r5l_exit_log(conf->log); in free_conf()
6353 if (conf->shrinker.seeks) in free_conf()
6354 unregister_shrinker(&conf->shrinker); in free_conf()
6356 free_thread_groups(conf); in free_conf()
6357 shrink_stripes(conf); in free_conf()
6358 raid5_free_percpu(conf); in free_conf()
6359 kfree(conf->disks); in free_conf()
6360 kfree(conf->stripe_hashtbl); in free_conf()
6361 kfree(conf); in free_conf()
6368 struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify); in raid456_cpu_notify() local
6370 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); in raid456_cpu_notify()
6375 if (alloc_scratch_buffer(conf, percpu)) { in raid456_cpu_notify()
6383 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid456_cpu_notify()
6392 static int raid5_alloc_percpu(struct r5conf *conf) in raid5_alloc_percpu() argument
6397 conf->percpu = alloc_percpu(struct raid5_percpu); in raid5_alloc_percpu()
6398 if (!conf->percpu) in raid5_alloc_percpu()
6402 conf->cpu_notify.notifier_call = raid456_cpu_notify; in raid5_alloc_percpu()
6403 conf->cpu_notify.priority = 0; in raid5_alloc_percpu()
6404 err = register_cpu_notifier(&conf->cpu_notify); in raid5_alloc_percpu()
6411 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid5_alloc_percpu()
6421 conf->scribble_disks = max(conf->raid_disks, in raid5_alloc_percpu()
6422 conf->previous_raid_disks); in raid5_alloc_percpu()
6423 conf->scribble_sectors = max(conf->chunk_sectors, in raid5_alloc_percpu()
6424 conf->prev_chunk_sectors); in raid5_alloc_percpu()
6432 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); in raid5_cache_scan() local
6435 if (mutex_trylock(&conf->cache_size_mutex)) { in raid5_cache_scan()
6438 conf->max_nr_stripes > conf->min_nr_stripes) { in raid5_cache_scan()
6439 if (drop_one_stripe(conf) == 0) { in raid5_cache_scan()
6445 mutex_unlock(&conf->cache_size_mutex); in raid5_cache_scan()
6453 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); in raid5_cache_count() local
6455 if (conf->max_nr_stripes < conf->min_nr_stripes) in raid5_cache_count()
6458 return conf->max_nr_stripes - conf->min_nr_stripes; in raid5_cache_count()
6463 struct r5conf *conf; in setup_conf() local
6501 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); in setup_conf()
6502 if (conf == NULL) in setup_conf()
6505 if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, in setup_conf()
6507 conf->group_cnt = group_cnt; in setup_conf()
6508 conf->worker_cnt_per_group = worker_cnt_per_group; in setup_conf()
6509 conf->worker_groups = new_group; in setup_conf()
6512 spin_lock_init(&conf->device_lock); in setup_conf()
6513 seqcount_init(&conf->gen_lock); in setup_conf()
6514 mutex_init(&conf->cache_size_mutex); in setup_conf()
6515 init_waitqueue_head(&conf->wait_for_quiescent); in setup_conf()
6516 init_waitqueue_head(&conf->wait_for_stripe); in setup_conf()
6517 init_waitqueue_head(&conf->wait_for_overlap); in setup_conf()
6518 INIT_LIST_HEAD(&conf->handle_list); in setup_conf()
6519 INIT_LIST_HEAD(&conf->hold_list); in setup_conf()
6520 INIT_LIST_HEAD(&conf->delayed_list); in setup_conf()
6521 INIT_LIST_HEAD(&conf->bitmap_list); in setup_conf()
6522 bio_list_init(&conf->return_bi); in setup_conf()
6523 init_llist_head(&conf->released_stripes); in setup_conf()
6524 atomic_set(&conf->active_stripes, 0); in setup_conf()
6525 atomic_set(&conf->preread_active_stripes, 0); in setup_conf()
6526 atomic_set(&conf->active_aligned_reads, 0); in setup_conf()
6527 conf->bypass_threshold = BYPASS_THRESHOLD; in setup_conf()
6528 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
6530 conf->raid_disks = mddev->raid_disks; in setup_conf()
6532 conf->previous_raid_disks = mddev->raid_disks; in setup_conf()
6534 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; in setup_conf()
6535 max_disks = max(conf->raid_disks, conf->previous_raid_disks); in setup_conf()
6537 conf->disks = kzalloc(max_disks * sizeof(struct disk_info), in setup_conf()
6539 if (!conf->disks) in setup_conf()
6542 conf->mddev = mddev; in setup_conf()
6544 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) in setup_conf()
6552 spin_lock_init(conf->hash_locks); in setup_conf()
6554 spin_lock_init(conf->hash_locks + i); in setup_conf()
6557 INIT_LIST_HEAD(conf->inactive_list + i); in setup_conf()
6560 INIT_LIST_HEAD(conf->temp_inactive_list + i); in setup_conf()
6562 conf->level = mddev->new_level; in setup_conf()
6563 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
6564 if (raid5_alloc_percpu(conf) != 0) in setup_conf()
6574 disk = conf->disks + raid_disk; in setup_conf()
6593 conf->fullsync = 1; in setup_conf()
6596 conf->level = mddev->new_level; in setup_conf()
6597 if (conf->level == 6) { in setup_conf()
6598 conf->max_degraded = 2; in setup_conf()
6600 conf->rmw_level = PARITY_ENABLE_RMW; in setup_conf()
6602 conf->rmw_level = PARITY_DISABLE_RMW; in setup_conf()
6604 conf->max_degraded = 1; in setup_conf()
6605 conf->rmw_level = PARITY_ENABLE_RMW; in setup_conf()
6607 conf->algorithm = mddev->new_layout; in setup_conf()
6608 conf->reshape_progress = mddev->reshape_position; in setup_conf()
6609 if (conf->reshape_progress != MaxSector) { in setup_conf()
6610 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
6611 conf->prev_algo = mddev->layout; in setup_conf()
6613 conf->prev_chunk_sectors = conf->chunk_sectors; in setup_conf()
6614 conf->prev_algo = conf->algorithm; in setup_conf()
6617 conf->min_nr_stripes = NR_STRIPES; in setup_conf()
6618 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + in setup_conf()
6620 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); in setup_conf()
6621 if (grow_stripes(conf, conf->min_nr_stripes)) { in setup_conf()
6634 conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4; in setup_conf()
6635 conf->shrinker.scan_objects = raid5_cache_scan; in setup_conf()
6636 conf->shrinker.count_objects = raid5_cache_count; in setup_conf()
6637 conf->shrinker.batch = 128; in setup_conf()
6638 conf->shrinker.flags = 0; in setup_conf()
6639 register_shrinker(&conf->shrinker); in setup_conf()
6642 conf->thread = md_register_thread(raid5d, mddev, pers_name); in setup_conf()
6643 if (!conf->thread) { in setup_conf()
6650 return conf; in setup_conf()
6653 if (conf) { in setup_conf()
6654 free_conf(conf); in setup_conf()
6688 struct r5conf *conf; in run() local
6817 conf = setup_conf(mddev); in run()
6819 conf = mddev->private; in run()
6821 if (IS_ERR(conf)) in run()
6822 return PTR_ERR(conf); in run()
6831 conf->min_offset_diff = min_offset_diff; in run()
6832 mddev->thread = conf->thread; in run()
6833 conf->thread = NULL; in run()
6834 mddev->private = conf; in run()
6836 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; in run()
6838 rdev = conf->disks[i].rdev; in run()
6839 if (!rdev && conf->disks[i].replacement) { in run()
6841 rdev = conf->disks[i].replacement; in run()
6842 conf->disks[i].replacement = NULL; in run()
6844 conf->disks[i].rdev = rdev; in run()
6848 if (conf->disks[i].replacement && in run()
6849 conf->reshape_progress != MaxSector) { in run()
6875 conf->algorithm, in run()
6876 conf->raid_disks, in run()
6877 conf->max_degraded)) in run()
6881 conf->prev_algo, in run()
6882 conf->previous_raid_disks, in run()
6883 conf->max_degraded)) in run()
6891 mddev->degraded = calc_degraded(conf); in run()
6893 if (has_failed(conf)) { in run()
6896 mdname(mddev), mddev->degraded, conf->raid_disks); in run()
6921 " devices, algorithm %d\n", mdname(mddev), conf->level, in run()
6927 mdname(mddev), conf->level, in run()
6931 print_raid5_conf(conf); in run()
6933 if (conf->reshape_progress != MaxSector) { in run()
6934 conf->reshape_safe = conf->reshape_progress; in run()
6935 atomic_set(&conf->reshape_stripes, 0); in run()
6961 int data_disks = conf->previous_raid_disks - conf->max_degraded; in run()
6970 (conf->raid_disks - conf->max_degraded)); in run()
7039 r5l_init_log(conf, journal_dev); in run()
7045 print_raid5_conf(conf); in run()
7046 free_conf(conf); in run()
7054 struct r5conf *conf = priv; in raid5_free() local
7056 free_conf(conf); in raid5_free()
7062 struct r5conf *conf = mddev->private; in status() local
7066 conf->chunk_sectors / 2, mddev->layout); in status()
7067 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); in status()
7068 for (i = 0; i < conf->raid_disks; i++) in status()
7070 conf->disks[i].rdev && in status()
7071 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); in status()
7075 static void print_raid5_conf (struct r5conf *conf) in print_raid5_conf() argument
7081 if (!conf) { in print_raid5_conf()
7085 printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level, in print_raid5_conf()
7086 conf->raid_disks, in print_raid5_conf()
7087 conf->raid_disks - conf->mddev->degraded); in print_raid5_conf()
7089 for (i = 0; i < conf->raid_disks; i++) { in print_raid5_conf()
7091 tmp = conf->disks + i; in print_raid5_conf()
7102 struct r5conf *conf = mddev->private; in raid5_spare_active() local
7107 for (i = 0; i < conf->raid_disks; i++) { in raid5_spare_active()
7108 tmp = conf->disks + i; in raid5_spare_active()
7135 spin_lock_irqsave(&conf->device_lock, flags); in raid5_spare_active()
7136 mddev->degraded = calc_degraded(conf); in raid5_spare_active()
7137 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_spare_active()
7138 print_raid5_conf(conf); in raid5_spare_active()
7144 struct r5conf *conf = mddev->private; in raid5_remove_disk() local
7148 struct disk_info *p = conf->disks + number; in raid5_remove_disk()
7150 print_raid5_conf(conf); in raid5_remove_disk()
7167 if (number >= conf->raid_disks && in raid5_remove_disk()
7168 conf->reshape_progress == MaxSector) in raid5_remove_disk()
7180 mddev->recovery_disabled != conf->recovery_disabled && in raid5_remove_disk()
7181 !has_failed(conf) && in raid5_remove_disk()
7183 number < conf->raid_disks) { in raid5_remove_disk()
7209 print_raid5_conf(conf); in raid5_remove_disk()
7215 struct r5conf *conf = mddev->private; in raid5_add_disk() local
7220 int last = conf->raid_disks - 1; in raid5_add_disk()
7224 if (mddev->recovery_disabled == conf->recovery_disabled) in raid5_add_disk()
7227 if (rdev->saved_raid_disk < 0 && has_failed(conf)) in raid5_add_disk()
7240 conf->disks[rdev->saved_raid_disk].rdev == NULL) in raid5_add_disk()
7244 p = conf->disks + disk; in raid5_add_disk()
7250 conf->fullsync = 1; in raid5_add_disk()
7256 p = conf->disks + disk; in raid5_add_disk()
7263 conf->fullsync = 1; in raid5_add_disk()
7269 print_raid5_conf(conf); in raid5_add_disk()
7283 struct r5conf *conf = mddev->private; in raid5_resize() local
7285 if (conf->log) in raid5_resize()
7287 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_resize()
7320 struct r5conf *conf = mddev->private; in check_stripe_cache() local
7322 > conf->min_nr_stripes || in check_stripe_cache()
7324 > conf->min_nr_stripes) { in check_stripe_cache()
7336 struct r5conf *conf = mddev->private; in check_reshape() local
7338 if (conf->log) in check_reshape()
7344 if (has_failed(conf)) in check_reshape()
7364 if (resize_chunks(conf, in check_reshape()
7365 conf->previous_raid_disks in check_reshape()
7371 return resize_stripes(conf, (conf->previous_raid_disks in check_reshape()
7377 struct r5conf *conf = mddev->private; in raid5_start_reshape() local
7388 if (has_failed(conf)) in raid5_start_reshape()
7397 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) in raid5_start_reshape()
7407 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) in raid5_start_reshape()
7414 atomic_set(&conf->reshape_stripes, 0); in raid5_start_reshape()
7415 spin_lock_irq(&conf->device_lock); in raid5_start_reshape()
7416 write_seqcount_begin(&conf->gen_lock); in raid5_start_reshape()
7417 conf->previous_raid_disks = conf->raid_disks; in raid5_start_reshape()
7418 conf->raid_disks += mddev->delta_disks; in raid5_start_reshape()
7419 conf->prev_chunk_sectors = conf->chunk_sectors; in raid5_start_reshape()
7420 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
7421 conf->prev_algo = conf->algorithm; in raid5_start_reshape()
7422 conf->algorithm = mddev->new_layout; in raid5_start_reshape()
7423 conf->generation++; in raid5_start_reshape()
7429 conf->reshape_progress = raid5_size(mddev, 0, 0); in raid5_start_reshape()
7431 conf->reshape_progress = 0; in raid5_start_reshape()
7432 conf->reshape_safe = conf->reshape_progress; in raid5_start_reshape()
7433 write_seqcount_end(&conf->gen_lock); in raid5_start_reshape()
7434 spin_unlock_irq(&conf->device_lock); in raid5_start_reshape()
7456 >= conf->previous_raid_disks) in raid5_start_reshape()
7464 } else if (rdev->raid_disk >= conf->previous_raid_disks in raid5_start_reshape()
7474 spin_lock_irqsave(&conf->device_lock, flags); in raid5_start_reshape()
7475 mddev->degraded = calc_degraded(conf); in raid5_start_reshape()
7476 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_start_reshape()
7478 mddev->raid_disks = conf->raid_disks; in raid5_start_reshape()
7479 mddev->reshape_position = conf->reshape_progress; in raid5_start_reshape()
7491 spin_lock_irq(&conf->device_lock); in raid5_start_reshape()
7492 write_seqcount_begin(&conf->gen_lock); in raid5_start_reshape()
7493 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; in raid5_start_reshape()
7495 conf->chunk_sectors = conf->prev_chunk_sectors; in raid5_start_reshape()
7496 mddev->new_layout = conf->algorithm = conf->prev_algo; in raid5_start_reshape()
7500 conf->generation --; in raid5_start_reshape()
7501 conf->reshape_progress = MaxSector; in raid5_start_reshape()
7503 write_seqcount_end(&conf->gen_lock); in raid5_start_reshape()
7504 spin_unlock_irq(&conf->device_lock); in raid5_start_reshape()
7507 conf->reshape_checkpoint = jiffies; in raid5_start_reshape()
7516 static void end_reshape(struct r5conf *conf) in end_reshape() argument
7519 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in end_reshape()
7522 spin_lock_irq(&conf->device_lock); in end_reshape()
7523 conf->previous_raid_disks = conf->raid_disks; in end_reshape()
7524 rdev_for_each(rdev, conf->mddev) in end_reshape()
7527 conf->reshape_progress = MaxSector; in end_reshape()
7528 conf->mddev->reshape_position = MaxSector; in end_reshape()
7529 spin_unlock_irq(&conf->device_lock); in end_reshape()
7530 wake_up(&conf->wait_for_overlap); in end_reshape()
7535 if (conf->mddev->queue) { in end_reshape()
7536 int data_disks = conf->raid_disks - conf->max_degraded; in end_reshape()
7537 int stripe = data_disks * ((conf->chunk_sectors << 9) in end_reshape()
7539 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) in end_reshape()
7540 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; in end_reshape()
7550 struct r5conf *conf = mddev->private; in raid5_finish_reshape() local
7560 spin_lock_irq(&conf->device_lock); in raid5_finish_reshape()
7561 mddev->degraded = calc_degraded(conf); in raid5_finish_reshape()
7562 spin_unlock_irq(&conf->device_lock); in raid5_finish_reshape()
7563 for (d = conf->raid_disks ; in raid5_finish_reshape()
7564 d < conf->raid_disks - mddev->delta_disks; in raid5_finish_reshape()
7566 struct md_rdev *rdev = conf->disks[d].rdev; in raid5_finish_reshape()
7569 rdev = conf->disks[d].replacement; in raid5_finish_reshape()
7574 mddev->layout = conf->algorithm; in raid5_finish_reshape()
7575 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
7584 struct r5conf *conf = mddev->private; in raid5_quiesce() local
7588 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
7592 lock_all_device_hash_locks_irq(conf); in raid5_quiesce()
7596 conf->quiesce = 2; in raid5_quiesce()
7597 wait_event_cmd(conf->wait_for_quiescent, in raid5_quiesce()
7598 atomic_read(&conf->active_stripes) == 0 && in raid5_quiesce()
7599 atomic_read(&conf->active_aligned_reads) == 0, in raid5_quiesce()
7600 unlock_all_device_hash_locks_irq(conf), in raid5_quiesce()
7601 lock_all_device_hash_locks_irq(conf)); in raid5_quiesce()
7602 conf->quiesce = 1; in raid5_quiesce()
7603 unlock_all_device_hash_locks_irq(conf); in raid5_quiesce()
7605 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
7609 lock_all_device_hash_locks_irq(conf); in raid5_quiesce()
7610 conf->quiesce = 0; in raid5_quiesce()
7611 wake_up(&conf->wait_for_quiescent); in raid5_quiesce()
7612 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
7613 unlock_all_device_hash_locks_irq(conf); in raid5_quiesce()
7616 r5l_quiesce(conf->log, state); in raid5_quiesce()
7712 struct r5conf *conf = mddev->private; in raid5_check_reshape() local
7732 conf->algorithm = mddev->new_layout; in raid5_check_reshape()
7736 conf->chunk_sectors = new_chunk ; in raid5_check_reshape()