Lines Matching refs:conf
69 static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
71 static void lower_barrier(struct r1conf *conf);
181 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) in put_all_bios() argument
185 for (i = 0; i < conf->raid_disks * 2; i++) { in put_all_bios()
195 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio() local
197 put_all_bios(conf, r1_bio); in free_r1bio()
198 mempool_free(r1_bio, conf->r1bio_pool); in free_r1bio()
203 struct r1conf *conf = r1_bio->mddev->private; in put_buf() local
206 for (i = 0; i < conf->raid_disks * 2; i++) { in put_buf()
209 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
212 mempool_free(r1_bio, conf->r1buf_pool); in put_buf()
214 lower_barrier(conf); in put_buf()
221 struct r1conf *conf = mddev->private; in reschedule_retry() local
223 spin_lock_irqsave(&conf->device_lock, flags); in reschedule_retry()
224 list_add(&r1_bio->retry_list, &conf->retry_list); in reschedule_retry()
225 conf->nr_queued ++; in reschedule_retry()
226 spin_unlock_irqrestore(&conf->device_lock, flags); in reschedule_retry()
228 wake_up(&conf->wait_barrier); in reschedule_retry()
241 struct r1conf *conf = r1_bio->mddev->private; in call_bio_endio() local
247 spin_lock_irqsave(&conf->device_lock, flags); in call_bio_endio()
250 spin_unlock_irqrestore(&conf->device_lock, flags); in call_bio_endio()
255 wake_up(&conf->wait_barrier); in call_bio_endio()
268 allow_barrier(conf, start_next_window, bi_sector); in call_bio_endio()
293 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos() local
295 conf->mirrors[disk].head_position = in update_head_pos()
305 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk() local
306 int raid_disks = conf->raid_disks; in find_bio_disk()
323 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_read_request() local
339 spin_lock_irqsave(&conf->device_lock, flags); in raid1_end_read_request()
340 if (r1_bio->mddev->degraded == conf->raid_disks || in raid1_end_read_request()
341 (r1_bio->mddev->degraded == conf->raid_disks-1 && in raid1_end_read_request()
342 test_bit(In_sync, &conf->mirrors[mirror].rdev->flags))) in raid1_end_read_request()
344 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_end_read_request()
349 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); in raid1_end_read_request()
358 mdname(conf->mddev), in raid1_end_read_request()
359 bdevname(conf->mirrors[mirror].rdev->bdev, in raid1_end_read_request()
407 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_write_request() local
417 &conf->mirrors[mirror].rdev->flags); in raid1_end_write_request()
419 &conf->mirrors[mirror].rdev->flags)) in raid1_end_write_request()
421 conf->mddev->recovery); in raid1_end_write_request()
448 if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) && in raid1_end_write_request()
449 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)) in raid1_end_write_request()
453 if (is_badblock(conf->mirrors[mirror].rdev, in raid1_end_write_request()
462 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) in raid1_end_write_request()
486 rdev_dec_pending(conf->mirrors[mirror].rdev, in raid1_end_write_request()
487 conf->mddev); in raid1_end_write_request()
513 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) in read_balance() argument
544 if ((conf->mddev->recovery_cp < this_sector + sectors) || in read_balance()
545 (mddev_is_clustered(conf->mddev) && in read_balance()
546 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, in read_balance()
552 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { in read_balance()
559 rdev = rcu_dereference(conf->mirrors[disk].rdev); in read_balance()
619 dist = abs(this_sector - conf->mirrors[disk].head_position); in read_balance()
625 if (conf->mirrors[disk].next_seq_sect == this_sector in read_balance()
628 struct raid1_info *mirror = &conf->mirrors[disk]; in read_balance()
688 rdev = rcu_dereference(conf->mirrors[best_disk].rdev); in read_balance()
696 rdev_dec_pending(rdev, conf->mddev); in read_balance()
701 if (conf->mirrors[best_disk].next_seq_sect != this_sector) in read_balance()
702 conf->mirrors[best_disk].seq_start = this_sector; in read_balance()
704 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; in read_balance()
714 struct r1conf *conf = mddev->private; in raid1_congested() local
718 conf->pending_count >= max_queued_requests) in raid1_congested()
722 for (i = 0; i < conf->raid_disks * 2; i++) { in raid1_congested()
723 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid1_congested()
742 static void flush_pending_writes(struct r1conf *conf) in flush_pending_writes() argument
747 spin_lock_irq(&conf->device_lock); in flush_pending_writes()
749 if (conf->pending_bio_list.head) { in flush_pending_writes()
751 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
752 conf->pending_count = 0; in flush_pending_writes()
753 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
756 bitmap_unplug(conf->mddev->bitmap); in flush_pending_writes()
757 wake_up(&conf->wait_barrier); in flush_pending_writes()
771 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
795 static void raise_barrier(struct r1conf *conf, sector_t sector_nr) in raise_barrier() argument
797 spin_lock_irq(&conf->resync_lock); in raise_barrier()
800 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, in raise_barrier()
801 conf->resync_lock); in raise_barrier()
804 conf->barrier++; in raise_barrier()
805 conf->next_resync = sector_nr; in raise_barrier()
816 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
817 !conf->array_frozen && in raise_barrier()
818 conf->barrier < RESYNC_DEPTH && in raise_barrier()
819 conf->current_window_requests == 0 && in raise_barrier()
820 (conf->start_next_window >= in raise_barrier()
821 conf->next_resync + RESYNC_SECTORS), in raise_barrier()
822 conf->resync_lock); in raise_barrier()
824 conf->nr_pending++; in raise_barrier()
825 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
828 static void lower_barrier(struct r1conf *conf) in lower_barrier() argument
831 BUG_ON(conf->barrier <= 0); in lower_barrier()
832 spin_lock_irqsave(&conf->resync_lock, flags); in lower_barrier()
833 conf->barrier--; in lower_barrier()
834 conf->nr_pending--; in lower_barrier()
835 spin_unlock_irqrestore(&conf->resync_lock, flags); in lower_barrier()
836 wake_up(&conf->wait_barrier); in lower_barrier()
839 static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) in need_to_wait_for_sync() argument
843 if (conf->array_frozen || !bio) in need_to_wait_for_sync()
845 else if (conf->barrier && bio_data_dir(bio) == WRITE) { in need_to_wait_for_sync()
846 if ((conf->mddev->curr_resync_completed in need_to_wait_for_sync()
848 (conf->next_resync + NEXT_NORMALIO_DISTANCE in need_to_wait_for_sync()
858 static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) in wait_barrier() argument
862 spin_lock_irq(&conf->resync_lock); in wait_barrier()
863 if (need_to_wait_for_sync(conf, bio)) { in wait_barrier()
864 conf->nr_waiting++; in wait_barrier()
874 wait_event_lock_irq(conf->wait_barrier, in wait_barrier()
875 !conf->array_frozen && in wait_barrier()
876 (!conf->barrier || in wait_barrier()
877 ((conf->start_next_window < in wait_barrier()
878 conf->next_resync + RESYNC_SECTORS) && in wait_barrier()
881 conf->resync_lock); in wait_barrier()
882 conf->nr_waiting--; in wait_barrier()
886 if (bio->bi_iter.bi_sector >= conf->next_resync) { in wait_barrier()
887 if (conf->start_next_window == MaxSector) in wait_barrier()
888 conf->start_next_window = in wait_barrier()
889 conf->next_resync + in wait_barrier()
892 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) in wait_barrier()
894 conf->next_window_requests++; in wait_barrier()
896 conf->current_window_requests++; in wait_barrier()
897 sector = conf->start_next_window; in wait_barrier()
901 conf->nr_pending++; in wait_barrier()
902 spin_unlock_irq(&conf->resync_lock); in wait_barrier()
906 static void allow_barrier(struct r1conf *conf, sector_t start_next_window, in allow_barrier() argument
911 spin_lock_irqsave(&conf->resync_lock, flags); in allow_barrier()
912 conf->nr_pending--; in allow_barrier()
914 if (start_next_window == conf->start_next_window) { in allow_barrier()
915 if (conf->start_next_window + NEXT_NORMALIO_DISTANCE in allow_barrier()
917 conf->next_window_requests--; in allow_barrier()
919 conf->current_window_requests--; in allow_barrier()
921 conf->current_window_requests--; in allow_barrier()
923 if (!conf->current_window_requests) { in allow_barrier()
924 if (conf->next_window_requests) { in allow_barrier()
925 conf->current_window_requests = in allow_barrier()
926 conf->next_window_requests; in allow_barrier()
927 conf->next_window_requests = 0; in allow_barrier()
928 conf->start_next_window += in allow_barrier()
931 conf->start_next_window = MaxSector; in allow_barrier()
934 spin_unlock_irqrestore(&conf->resync_lock, flags); in allow_barrier()
935 wake_up(&conf->wait_barrier); in allow_barrier()
938 static void freeze_array(struct r1conf *conf, int extra) in freeze_array() argument
951 spin_lock_irq(&conf->resync_lock); in freeze_array()
952 conf->array_frozen = 1; in freeze_array()
953 wait_event_lock_irq_cmd(conf->wait_barrier, in freeze_array()
954 conf->nr_pending == conf->nr_queued+extra, in freeze_array()
955 conf->resync_lock, in freeze_array()
956 flush_pending_writes(conf)); in freeze_array()
957 spin_unlock_irq(&conf->resync_lock); in freeze_array()
959 static void unfreeze_array(struct r1conf *conf) in unfreeze_array() argument
962 spin_lock_irq(&conf->resync_lock); in unfreeze_array()
963 conf->array_frozen = 0; in unfreeze_array()
964 wake_up(&conf->wait_barrier); in unfreeze_array()
965 spin_unlock_irq(&conf->resync_lock); in unfreeze_array()
1014 struct r1conf *conf = mddev->private; in raid1_unplug() local
1018 spin_lock_irq(&conf->device_lock); in raid1_unplug()
1019 bio_list_merge(&conf->pending_bio_list, &plug->pending); in raid1_unplug()
1020 conf->pending_count += plug->pending_cnt; in raid1_unplug()
1021 spin_unlock_irq(&conf->device_lock); in raid1_unplug()
1022 wake_up(&conf->wait_barrier); in raid1_unplug()
1031 wake_up(&conf->wait_barrier); in raid1_unplug()
1049 struct r1conf *conf = mddev->private; in make_request() local
1091 prepare_to_wait(&conf->wait_barrier, in make_request()
1101 finish_wait(&conf->wait_barrier, &w); in make_request()
1104 start_next_window = wait_barrier(conf, bio); in make_request()
1113 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); in make_request()
1138 rdisk = read_balance(conf, r1_bio, &max_sectors); in make_request()
1145 mirror = conf->mirrors + rdisk; in make_request()
1180 spin_lock_irq(&conf->device_lock); in make_request()
1185 spin_unlock_irq(&conf->device_lock); in make_request()
1193 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); in make_request()
1210 if (conf->pending_count >= max_queued_requests) { in make_request()
1212 wait_event(conf->wait_barrier, in make_request()
1213 conf->pending_count < max_queued_requests); in make_request()
1226 disks = conf->raid_disks * 2; in make_request()
1233 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in make_request()
1241 if (i < conf->raid_disks) in make_request()
1300 rdev_dec_pending(conf->mirrors[j].rdev, mddev); in make_request()
1302 allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector); in make_request()
1304 start_next_window = wait_barrier(conf, bio); in make_request()
1312 wait_event(conf->wait_barrier, in make_request()
1322 spin_lock_irq(&conf->device_lock); in make_request()
1327 spin_unlock_irq(&conf->device_lock); in make_request()
1369 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) in make_request()
1376 conf->mirrors[i].rdev->data_offset); in make_request()
1377 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; in make_request()
1390 spin_lock_irqsave(&conf->device_lock, flags); in make_request()
1395 bio_list_add(&conf->pending_bio_list, mbio); in make_request()
1396 conf->pending_count++; in make_request()
1398 spin_unlock_irqrestore(&conf->device_lock, flags); in make_request()
1410 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); in make_request()
1422 wake_up(&conf->wait_barrier); in make_request()
1427 struct r1conf *conf = mddev->private; in status() local
1430 seq_printf(seq, " [%d/%d] [", conf->raid_disks, in status()
1431 conf->raid_disks - mddev->degraded); in status()
1433 for (i = 0; i < conf->raid_disks; i++) { in status()
1434 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in status()
1445 struct r1conf *conf = mddev->private; in error() local
1455 && (conf->raid_disks - mddev->degraded) == 1) { in error()
1462 conf->recovery_disabled = mddev->recovery_disabled; in error()
1466 spin_lock_irqsave(&conf->device_lock, flags); in error()
1472 spin_unlock_irqrestore(&conf->device_lock, flags); in error()
1483 mdname(mddev), conf->raid_disks - mddev->degraded); in error()
1486 static void print_conf(struct r1conf *conf) in print_conf() argument
1491 if (!conf) { in print_conf()
1495 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, in print_conf()
1496 conf->raid_disks); in print_conf()
1499 for (i = 0; i < conf->raid_disks; i++) { in print_conf()
1501 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in print_conf()
1511 static void close_sync(struct r1conf *conf) in close_sync() argument
1513 wait_barrier(conf, NULL); in close_sync()
1514 allow_barrier(conf, 0, 0); in close_sync()
1516 mempool_destroy(conf->r1buf_pool); in close_sync()
1517 conf->r1buf_pool = NULL; in close_sync()
1519 spin_lock_irq(&conf->resync_lock); in close_sync()
1520 conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE; in close_sync()
1521 conf->start_next_window = MaxSector; in close_sync()
1522 conf->current_window_requests += in close_sync()
1523 conf->next_window_requests; in close_sync()
1524 conf->next_window_requests = 0; in close_sync()
1525 spin_unlock_irq(&conf->resync_lock); in close_sync()
1531 struct r1conf *conf = mddev->private; in raid1_spare_active() local
1542 spin_lock_irqsave(&conf->device_lock, flags); in raid1_spare_active()
1543 for (i = 0; i < conf->raid_disks; i++) { in raid1_spare_active()
1544 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_spare_active()
1545 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; in raid1_spare_active()
1574 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_spare_active()
1576 print_conf(conf); in raid1_spare_active()
1582 struct r1conf *conf = mddev->private; in raid1_add_disk() local
1587 int last = conf->raid_disks - 1; in raid1_add_disk()
1589 if (mddev->recovery_disabled == conf->recovery_disabled) in raid1_add_disk()
1604 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) in raid1_add_disk()
1608 p = conf->mirrors+mirror; in raid1_add_disk()
1622 conf->fullsync = 1; in raid1_add_disk()
1627 p[conf->raid_disks].rdev == NULL) { in raid1_add_disk()
1633 conf->fullsync = 1; in raid1_add_disk()
1634 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev); in raid1_add_disk()
1640 print_conf(conf); in raid1_add_disk()
1646 struct r1conf *conf = mddev->private; in raid1_remove_disk() local
1649 struct raid1_info *p = conf->mirrors + number; in raid1_remove_disk()
1652 p = conf->mirrors + conf->raid_disks + number; in raid1_remove_disk()
1654 print_conf(conf); in raid1_remove_disk()
1665 mddev->recovery_disabled != conf->recovery_disabled && in raid1_remove_disk()
1666 mddev->degraded < conf->raid_disks) { in raid1_remove_disk()
1677 } else if (conf->mirrors[conf->raid_disks + number].rdev) { in raid1_remove_disk()
1683 conf->mirrors[conf->raid_disks + number].rdev; in raid1_remove_disk()
1684 freeze_array(conf, 0); in raid1_remove_disk()
1687 conf->mirrors[conf->raid_disks + number].rdev = NULL; in raid1_remove_disk()
1688 unfreeze_array(conf); in raid1_remove_disk()
1696 print_conf(conf); in raid1_remove_disk()
1723 struct r1conf *conf = mddev->private; in end_sync_write() local
1742 &conf->mirrors[mirror].rdev->flags); in end_sync_write()
1744 &conf->mirrors[mirror].rdev->flags)) in end_sync_write()
1748 } else if (is_badblock(conf->mirrors[mirror].rdev, in end_sync_write()
1752 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, in end_sync_write()
1804 struct r1conf *conf = mddev->private; in fix_sync_read_error() local
1825 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
1834 if (d == conf->raid_disks * 2) in fix_sync_read_error()
1851 for (d = 0; d < conf->raid_disks * 2; d++) { in fix_sync_read_error()
1852 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
1859 conf->recovery_disabled = in fix_sync_read_error()
1877 d = conf->raid_disks * 2; in fix_sync_read_error()
1881 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
1892 d = conf->raid_disks * 2; in fix_sync_read_error()
1896 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
1921 struct r1conf *conf = mddev->private; in process_checks() local
1928 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
1942 conf->mirrors[i].rdev->data_offset; in process_checks()
1943 b->bi_bdev = conf->mirrors[i].rdev->bdev; in process_checks()
1959 for (primary = 0; primary < conf->raid_disks * 2; primary++) in process_checks()
1963 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); in process_checks()
1967 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
1996 rdev_dec_pending(conf->mirrors[i].rdev, mddev); in process_checks()
2006 struct r1conf *conf = mddev->private; in sync_request_write() local
2008 int disks = conf->raid_disks * 2; in sync_request_write()
2036 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); in sync_request_write()
2062 static void fix_read_error(struct r1conf *conf, int read_disk, in fix_read_error() argument
2065 struct mddev *mddev = conf->mddev; in fix_read_error()
2085 rdev = conf->mirrors[d].rdev; in fix_read_error()
2093 conf->tmppage, READ, false)) in fix_read_error()
2097 if (d == conf->raid_disks * 2) in fix_read_error()
2104 struct md_rdev *rdev = conf->mirrors[read_disk].rdev; in fix_read_error()
2113 d = conf->raid_disks * 2; in fix_read_error()
2115 rdev = conf->mirrors[d].rdev; in fix_read_error()
2119 conf->tmppage, WRITE); in fix_read_error()
2125 d = conf->raid_disks * 2; in fix_read_error()
2127 rdev = conf->mirrors[d].rdev; in fix_read_error()
2131 conf->tmppage, READ)) { in fix_read_error()
2151 struct r1conf *conf = mddev->private; in narrow_write_error() local
2152 struct md_rdev *rdev = conf->mirrors[i].rdev; in narrow_write_error()
2225 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_sync_write_finished() argument
2229 for (m = 0; m < conf->raid_disks * 2 ; m++) { in handle_sync_write_finished()
2230 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_sync_write_finished()
2241 md_error(conf->mddev, rdev); in handle_sync_write_finished()
2245 md_done_sync(conf->mddev, s, 1); in handle_sync_write_finished()
2248 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_write_finished() argument
2252 for (m = 0; m < conf->raid_disks * 2 ; m++) in handle_write_finished()
2254 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_write_finished()
2258 rdev_dec_pending(rdev, conf->mddev); in handle_write_finished()
2266 md_error(conf->mddev, in handle_write_finished()
2267 conf->mirrors[m].rdev); in handle_write_finished()
2271 rdev_dec_pending(conf->mirrors[m].rdev, in handle_write_finished()
2272 conf->mddev); in handle_write_finished()
2275 spin_lock_irq(&conf->device_lock); in handle_write_finished()
2276 list_add(&r1_bio->retry_list, &conf->bio_end_io_list); in handle_write_finished()
2277 conf->nr_queued++; in handle_write_finished()
2278 spin_unlock_irq(&conf->device_lock); in handle_write_finished()
2279 md_wakeup_thread(conf->mddev->thread); in handle_write_finished()
2287 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) in handle_read_error() argument
2291 struct mddev *mddev = conf->mddev; in handle_read_error()
2306 freeze_array(conf, 1); in handle_read_error()
2307 fix_read_error(conf, r1_bio->read_disk, in handle_read_error()
2309 unfreeze_array(conf); in handle_read_error()
2311 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); in handle_read_error()
2312 rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); in handle_read_error()
2317 disk = read_balance(conf, r1_bio, &max_sectors); in handle_read_error()
2336 rdev = conf->mirrors[disk].rdev; in handle_read_error()
2354 spin_lock_irq(&conf->device_lock); in handle_read_error()
2359 spin_unlock_irq(&conf->device_lock); in handle_read_error()
2363 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); in handle_read_error()
2384 struct r1conf *conf = mddev->private; in raid1d() local
2385 struct list_head *head = &conf->retry_list; in raid1d()
2390 if (!list_empty_careful(&conf->bio_end_io_list) && in raid1d()
2393 spin_lock_irqsave(&conf->device_lock, flags); in raid1d()
2395 while (!list_empty(&conf->bio_end_io_list)) { in raid1d()
2396 list_move(conf->bio_end_io_list.prev, &tmp); in raid1d()
2397 conf->nr_queued--; in raid1d()
2400 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2416 flush_pending_writes(conf); in raid1d()
2418 spin_lock_irqsave(&conf->device_lock, flags); in raid1d()
2420 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2425 conf->nr_queued--; in raid1d()
2426 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2429 conf = mddev->private; in raid1d()
2433 handle_sync_write_finished(conf, r1_bio); in raid1d()
2438 handle_write_finished(conf, r1_bio); in raid1d()
2440 handle_read_error(conf, r1_bio); in raid1d()
2454 static int init_resync(struct r1conf *conf) in init_resync() argument
2459 BUG_ON(conf->r1buf_pool); in init_resync()
2460 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free, in init_resync()
2461 conf->poolinfo); in init_resync()
2462 if (!conf->r1buf_pool) in init_resync()
2464 conf->next_resync = 0; in init_resync()
2480 struct r1conf *conf = mddev->private; in sync_request() local
2493 if (!conf->r1buf_pool) in sync_request()
2494 if (init_resync(conf)) in sync_request()
2508 conf->fullsync = 0; in sync_request()
2511 close_sync(conf); in sync_request()
2514 conf->cluster_sync_low = 0; in sync_request()
2515 conf->cluster_sync_high = 0; in sync_request()
2523 conf->fullsync == 0) { in sync_request()
2531 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in sync_request()
2542 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); in sync_request()
2543 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); in sync_request()
2545 raise_barrier(conf, sector_nr); in sync_request()
2562 for (i = 0; i < conf->raid_disks * 2; i++) { in sync_request()
2567 rdev = rcu_dereference(conf->mirrors[i].rdev); in sync_request()
2570 if (i < conf->raid_disks) in sync_request()
2634 for (i = 0 ; i < conf->raid_disks * 2 ; i++) in sync_request()
2636 struct md_rdev *rdev = conf->mirrors[i].rdev; in sync_request()
2651 conf->recovery_disabled = mddev->recovery_disabled; in sync_request()
2697 !conf->fullsync && in sync_request()
2705 for (i = 0 ; i < conf->raid_disks * 2; i++) { in sync_request()
2734 conf->cluster_sync_high < sector_nr + nr_sectors) { in sync_request()
2735 conf->cluster_sync_low = mddev->curr_resync_completed; in sync_request()
2736 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS; in sync_request()
2739 conf->cluster_sync_low, in sync_request()
2740 conf->cluster_sync_high); in sync_request()
2748 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { in sync_request()
2776 struct r1conf *conf; in setup_conf() local
2782 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL); in setup_conf()
2783 if (!conf) in setup_conf()
2786 conf->mirrors = kzalloc(sizeof(struct raid1_info) in setup_conf()
2789 if (!conf->mirrors) in setup_conf()
2792 conf->tmppage = alloc_page(GFP_KERNEL); in setup_conf()
2793 if (!conf->tmppage) in setup_conf()
2796 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); in setup_conf()
2797 if (!conf->poolinfo) in setup_conf()
2799 conf->poolinfo->raid_disks = mddev->raid_disks * 2; in setup_conf()
2800 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, in setup_conf()
2802 conf->poolinfo); in setup_conf()
2803 if (!conf->r1bio_pool) in setup_conf()
2806 conf->poolinfo->mddev = mddev; in setup_conf()
2809 spin_lock_init(&conf->device_lock); in setup_conf()
2817 disk = conf->mirrors + mddev->raid_disks + disk_idx; in setup_conf()
2819 disk = conf->mirrors + disk_idx; in setup_conf()
2829 conf->raid_disks = mddev->raid_disks; in setup_conf()
2830 conf->mddev = mddev; in setup_conf()
2831 INIT_LIST_HEAD(&conf->retry_list); in setup_conf()
2832 INIT_LIST_HEAD(&conf->bio_end_io_list); in setup_conf()
2834 spin_lock_init(&conf->resync_lock); in setup_conf()
2835 init_waitqueue_head(&conf->wait_barrier); in setup_conf()
2837 bio_list_init(&conf->pending_bio_list); in setup_conf()
2838 conf->pending_count = 0; in setup_conf()
2839 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
2841 conf->start_next_window = MaxSector; in setup_conf()
2842 conf->current_window_requests = conf->next_window_requests = 0; in setup_conf()
2845 for (i = 0; i < conf->raid_disks * 2; i++) { in setup_conf()
2847 disk = conf->mirrors + i; in setup_conf()
2849 if (i < conf->raid_disks && in setup_conf()
2850 disk[conf->raid_disks].rdev) { in setup_conf()
2857 disk[conf->raid_disks].rdev; in setup_conf()
2858 disk[conf->raid_disks].rdev = NULL; in setup_conf()
2869 conf->fullsync = 1; in setup_conf()
2874 conf->thread = md_register_thread(raid1d, mddev, "raid1"); in setup_conf()
2875 if (!conf->thread) { in setup_conf()
2882 return conf; in setup_conf()
2885 if (conf) { in setup_conf()
2886 mempool_destroy(conf->r1bio_pool); in setup_conf()
2887 kfree(conf->mirrors); in setup_conf()
2888 safe_put_page(conf->tmppage); in setup_conf()
2889 kfree(conf->poolinfo); in setup_conf()
2890 kfree(conf); in setup_conf()
2898 struct r1conf *conf; in run() local
2920 conf = setup_conf(mddev); in run()
2922 conf = mddev->private; in run()
2924 if (IS_ERR(conf)) in run()
2925 return PTR_ERR(conf); in run()
2940 for (i=0; i < conf->raid_disks; i++) in run()
2941 if (conf->mirrors[i].rdev == NULL || in run()
2942 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || in run()
2943 test_bit(Faulty, &conf->mirrors[i].rdev->flags)) in run()
2946 if (conf->raid_disks - mddev->degraded == 1) in run()
2961 mddev->thread = conf->thread; in run()
2962 conf->thread = NULL; in run()
2963 mddev->private = conf; in run()
2979 raid1_free(mddev, conf); in run()
2986 struct r1conf *conf = priv; in raid1_free() local
2988 mempool_destroy(conf->r1bio_pool); in raid1_free()
2989 kfree(conf->mirrors); in raid1_free()
2990 safe_put_page(conf->tmppage); in raid1_free()
2991 kfree(conf->poolinfo); in raid1_free()
2992 kfree(conf); in raid1_free()
3042 struct r1conf *conf = mddev->private; in raid1_reshape() local
3065 if (raid_disks < conf->raid_disks) { in raid1_reshape()
3067 for (d= 0; d < conf->raid_disks; d++) in raid1_reshape()
3068 if (conf->mirrors[d].rdev) in raid1_reshape()
3094 freeze_array(conf, 0); in raid1_reshape()
3097 oldpool = conf->r1bio_pool; in raid1_reshape()
3098 conf->r1bio_pool = newpool; in raid1_reshape()
3100 for (d = d2 = 0; d < conf->raid_disks; d++) { in raid1_reshape()
3101 struct md_rdev *rdev = conf->mirrors[d].rdev; in raid1_reshape()
3114 kfree(conf->mirrors); in raid1_reshape()
3115 conf->mirrors = newmirrors; in raid1_reshape()
3116 kfree(conf->poolinfo); in raid1_reshape()
3117 conf->poolinfo = newpoolinfo; in raid1_reshape()
3119 spin_lock_irqsave(&conf->device_lock, flags); in raid1_reshape()
3120 mddev->degraded += (raid_disks - conf->raid_disks); in raid1_reshape()
3121 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_reshape()
3122 conf->raid_disks = mddev->raid_disks = raid_disks; in raid1_reshape()
3125 unfreeze_array(conf); in raid1_reshape()
3137 struct r1conf *conf = mddev->private; in raid1_quiesce() local
3141 wake_up(&conf->wait_barrier); in raid1_quiesce()
3144 freeze_array(conf, 0); in raid1_quiesce()
3147 unfreeze_array(conf); in raid1_quiesce()
3158 struct r1conf *conf; in raid1_takeover() local
3162 conf = setup_conf(mddev); in raid1_takeover()
3163 if (!IS_ERR(conf)) in raid1_takeover()
3165 conf->array_frozen = 1; in raid1_takeover()
3166 return conf; in raid1_takeover()