Lines Matching refs:conf

99 static void allow_barrier(struct r10conf *conf);
100 static void lower_barrier(struct r10conf *conf);
101 static int _enough(struct r10conf *conf, int previous, int ignore);
106 static void end_reshape(struct r10conf *conf);
110 struct r10conf *conf = data; in r10bio_pool_alloc() local
111 int size = offsetof(struct r10bio, devs[conf->copies]); in r10bio_pool_alloc()
140 struct r10conf *conf = data; in r10buf_pool_alloc() local
147 r10_bio = r10bio_pool_alloc(gfp_flags, conf); in r10buf_pool_alloc()
151 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in r10buf_pool_alloc()
152 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in r10buf_pool_alloc()
153 nalloc = conf->copies; /* resync */ in r10buf_pool_alloc()
165 if (!conf->have_replacement) in r10buf_pool_alloc()
181 &conf->mddev->recovery)) { in r10buf_pool_alloc()
214 r10bio_pool_free(r10_bio, conf); in r10buf_pool_alloc()
221 struct r10conf *conf = data; in r10buf_pool_free() local
225 for (j=0; j < conf->copies; j++) { in r10buf_pool_free()
238 r10bio_pool_free(r10bio, conf); in r10buf_pool_free()
241 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) in put_all_bios() argument
245 for (i = 0; i < conf->copies; i++) { in put_all_bios()
259 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio() local
261 put_all_bios(conf, r10_bio); in free_r10bio()
262 mempool_free(r10_bio, conf->r10bio_pool); in free_r10bio()
267 struct r10conf *conf = r10_bio->mddev->private; in put_buf() local
269 mempool_free(r10_bio, conf->r10buf_pool); in put_buf()
271 lower_barrier(conf); in put_buf()
278 struct r10conf *conf = mddev->private; in reschedule_retry() local
280 spin_lock_irqsave(&conf->device_lock, flags); in reschedule_retry()
281 list_add(&r10_bio->retry_list, &conf->retry_list); in reschedule_retry()
282 conf->nr_queued ++; in reschedule_retry()
283 spin_unlock_irqrestore(&conf->device_lock, flags); in reschedule_retry()
286 wake_up(&conf->wait_barrier); in reschedule_retry()
300 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io() local
304 spin_lock_irqsave(&conf->device_lock, flags); in raid_end_bio_io()
307 spin_unlock_irqrestore(&conf->device_lock, flags); in raid_end_bio_io()
318 allow_barrier(conf); in raid_end_bio_io()
328 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos() local
330 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
337 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, in find_bio_disk() argument
343 for (slot = 0; slot < conf->copies; slot++) { in find_bio_disk()
352 BUG_ON(slot == conf->copies); in find_bio_disk()
368 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request() local
395 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), in raid10_end_read_request()
401 rdev_dec_pending(rdev, conf->mddev); in raid10_end_read_request()
409 mdname(conf->mddev), in raid10_end_read_request()
447 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request() local
451 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_write_request()
454 rdev = conf->mirrors[dev].replacement; in raid10_end_write_request()
458 rdev = conf->mirrors[dev].rdev; in raid10_end_write_request()
524 rdev_dec_pending(rdev, conf->mddev); in raid10_end_write_request()
615 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) in raid10_find_phys() argument
617 struct geom *geo = &conf->geo; in raid10_find_phys()
619 if (conf->reshape_progress != MaxSector && in raid10_find_phys()
620 ((r10bio->sector >= conf->reshape_progress) != in raid10_find_phys()
621 conf->mddev->reshape_backwards)) { in raid10_find_phys()
623 geo = &conf->prev; in raid10_find_phys()
630 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) in raid10_find_virt() argument
636 struct geom *geo = &conf->geo; in raid10_find_virt()
694 static struct md_rdev *read_balance(struct r10conf *conf, in read_balance() argument
706 struct geom *geo = &conf->geo; in read_balance()
708 raid10_find_phys(conf, r10_bio); in read_balance()
723 if (conf->mddev->recovery_cp < MaxSector in read_balance()
724 && (this_sector + sectors >= conf->next_resync)) in read_balance()
727 for (slot = 0; slot < conf->copies ; slot++) { in read_balance()
735 rdev = rcu_dereference(conf->mirrors[disk].replacement); in read_balance()
738 rdev = rcu_dereference(conf->mirrors[disk].rdev); in read_balance()
793 conf->mirrors[disk].head_position); in read_balance()
800 if (slot >= conf->copies) { in read_balance()
811 rdev_dec_pending(rdev, conf->mddev); in read_balance()
825 struct r10conf *conf = mddev->private; in raid10_congested() local
829 conf->pending_count >= max_queued_requests) in raid10_congested()
834 (i < conf->geo.raid_disks || i < conf->prev.raid_disks) in raid10_congested()
837 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid10_congested()
848 static void flush_pending_writes(struct r10conf *conf) in flush_pending_writes() argument
853 spin_lock_irq(&conf->device_lock); in flush_pending_writes()
855 if (conf->pending_bio_list.head) { in flush_pending_writes()
857 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
858 conf->pending_count = 0; in flush_pending_writes()
859 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
862 bitmap_unplug(conf->mddev->bitmap); in flush_pending_writes()
863 wake_up(&conf->wait_barrier); in flush_pending_writes()
877 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
902 static void raise_barrier(struct r10conf *conf, int force) in raise_barrier() argument
904 BUG_ON(force && !conf->barrier); in raise_barrier()
905 spin_lock_irq(&conf->resync_lock); in raise_barrier()
908 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, in raise_barrier()
909 conf->resync_lock); in raise_barrier()
912 conf->barrier++; in raise_barrier()
915 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
916 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, in raise_barrier()
917 conf->resync_lock); in raise_barrier()
919 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
922 static void lower_barrier(struct r10conf *conf) in lower_barrier() argument
925 spin_lock_irqsave(&conf->resync_lock, flags); in lower_barrier()
926 conf->barrier--; in lower_barrier()
927 spin_unlock_irqrestore(&conf->resync_lock, flags); in lower_barrier()
928 wake_up(&conf->wait_barrier); in lower_barrier()
931 static void wait_barrier(struct r10conf *conf) in wait_barrier() argument
933 spin_lock_irq(&conf->resync_lock); in wait_barrier()
934 if (conf->barrier) { in wait_barrier()
935 conf->nr_waiting++; in wait_barrier()
945 wait_event_lock_irq(conf->wait_barrier, in wait_barrier()
946 !conf->barrier || in wait_barrier()
947 (conf->nr_pending && in wait_barrier()
950 conf->resync_lock); in wait_barrier()
951 conf->nr_waiting--; in wait_barrier()
953 conf->nr_pending++; in wait_barrier()
954 spin_unlock_irq(&conf->resync_lock); in wait_barrier()
957 static void allow_barrier(struct r10conf *conf) in allow_barrier() argument
960 spin_lock_irqsave(&conf->resync_lock, flags); in allow_barrier()
961 conf->nr_pending--; in allow_barrier()
962 spin_unlock_irqrestore(&conf->resync_lock, flags); in allow_barrier()
963 wake_up(&conf->wait_barrier); in allow_barrier()
966 static void freeze_array(struct r10conf *conf, int extra) in freeze_array() argument
980 spin_lock_irq(&conf->resync_lock); in freeze_array()
981 conf->barrier++; in freeze_array()
982 conf->nr_waiting++; in freeze_array()
983 wait_event_lock_irq_cmd(conf->wait_barrier, in freeze_array()
984 conf->nr_pending == conf->nr_queued+extra, in freeze_array()
985 conf->resync_lock, in freeze_array()
986 flush_pending_writes(conf)); in freeze_array()
988 spin_unlock_irq(&conf->resync_lock); in freeze_array()
991 static void unfreeze_array(struct r10conf *conf) in unfreeze_array() argument
994 spin_lock_irq(&conf->resync_lock); in unfreeze_array()
995 conf->barrier--; in unfreeze_array()
996 conf->nr_waiting--; in unfreeze_array()
997 wake_up(&conf->wait_barrier); in unfreeze_array()
998 spin_unlock_irq(&conf->resync_lock); in unfreeze_array()
1022 struct r10conf *conf = mddev->private; in raid10_unplug() local
1026 spin_lock_irq(&conf->device_lock); in raid10_unplug()
1027 bio_list_merge(&conf->pending_bio_list, &plug->pending); in raid10_unplug()
1028 conf->pending_count += plug->pending_cnt; in raid10_unplug()
1029 spin_unlock_irq(&conf->device_lock); in raid10_unplug()
1030 wake_up(&conf->wait_barrier); in raid10_unplug()
1039 wake_up(&conf->wait_barrier); in raid10_unplug()
1057 struct r10conf *conf = mddev->private; in __make_request() local
1080 wait_barrier(conf); in __make_request()
1084 bio->bi_iter.bi_sector < conf->reshape_progress && in __make_request()
1085 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in __make_request()
1089 allow_barrier(conf); in __make_request()
1090 wait_event(conf->wait_barrier, in __make_request()
1091 conf->reshape_progress <= bio->bi_iter.bi_sector || in __make_request()
1092 conf->reshape_progress >= bio->bi_iter.bi_sector + in __make_request()
1094 wait_barrier(conf); in __make_request()
1099 ? (bio->bi_iter.bi_sector < conf->reshape_safe && in __make_request()
1100 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in __make_request()
1101 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in __make_request()
1102 bio->bi_iter.bi_sector < conf->reshape_progress))) { in __make_request()
1104 mddev->reshape_position = conf->reshape_progress; in __make_request()
1111 conf->reshape_safe = mddev->reshape_position; in __make_request()
1114 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); in __make_request()
1141 rdev = read_balance(conf, r10_bio, &max_sectors); in __make_request()
1169 spin_lock_irq(&conf->device_lock); in __make_request()
1174 spin_unlock_irq(&conf->device_lock); in __make_request()
1182 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); in __make_request()
1199 if (conf->pending_count >= max_queued_requests) { in __make_request()
1201 wait_event(conf->wait_barrier, in __make_request()
1202 conf->pending_count < max_queued_requests); in __make_request()
1217 raid10_find_phys(conf, r10_bio); in __make_request()
1223 for (i = 0; i < conf->copies; i++) { in __make_request()
1225 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); in __make_request()
1227 conf->mirrors[d].replacement); in __make_request()
1313 rdev_dec_pending(conf->mirrors[d].rdev, mddev); in __make_request()
1318 rdev = conf->mirrors[d].replacement; in __make_request()
1322 rdev = conf->mirrors[d].rdev; in __make_request()
1327 allow_barrier(conf); in __make_request()
1329 wait_barrier(conf); in __make_request()
1338 spin_lock_irq(&conf->device_lock); in __make_request()
1343 spin_unlock_irq(&conf->device_lock); in __make_request()
1351 for (i = 0; i < conf->copies; i++) { in __make_request()
1355 struct md_rdev *rdev = conf->mirrors[d].rdev; in __make_request()
1379 spin_lock_irqsave(&conf->device_lock, flags); in __make_request()
1384 bio_list_add(&conf->pending_bio_list, mbio); in __make_request()
1385 conf->pending_count++; in __make_request()
1387 spin_unlock_irqrestore(&conf->device_lock, flags); in __make_request()
1393 struct md_rdev *rdev = conf->mirrors[d].replacement; in __make_request()
1397 rdev = conf->mirrors[d].rdev; in __make_request()
1414 spin_lock_irqsave(&conf->device_lock, flags); in __make_request()
1415 bio_list_add(&conf->pending_bio_list, mbio); in __make_request()
1416 conf->pending_count++; in __make_request()
1417 spin_unlock_irqrestore(&conf->device_lock, flags); in __make_request()
1432 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); in __make_request()
1447 struct r10conf *conf = mddev->private; in make_request() local
1448 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); in make_request()
1468 && (conf->geo.near_copies < conf->geo.raid_disks in make_request()
1469 || conf->prev.near_copies < in make_request()
1470 conf->prev.raid_disks))) { in make_request()
1484 wake_up(&conf->wait_barrier); in make_request()
1489 struct r10conf *conf = mddev->private; in status() local
1492 if (conf->geo.near_copies < conf->geo.raid_disks) in status()
1494 if (conf->geo.near_copies > 1) in status()
1495 seq_printf(seq, " %d near-copies", conf->geo.near_copies); in status()
1496 if (conf->geo.far_copies > 1) { in status()
1497 if (conf->geo.far_offset) in status()
1498 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); in status()
1500 seq_printf(seq, " %d far-copies", conf->geo.far_copies); in status()
1501 if (conf->geo.far_set_size != conf->geo.raid_disks) in status()
1502 seq_printf(seq, " %d devices per set", conf->geo.far_set_size); in status()
1504 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, in status()
1505 conf->geo.raid_disks - mddev->degraded); in status()
1506 for (i = 0; i < conf->geo.raid_disks; i++) in status()
1508 conf->mirrors[i].rdev && in status()
1509 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); in status()
1518 static int _enough(struct r10conf *conf, int previous, int ignore) in _enough() argument
1524 disks = conf->prev.raid_disks; in _enough()
1525 ncopies = conf->prev.near_copies; in _enough()
1527 disks = conf->geo.raid_disks; in _enough()
1528 ncopies = conf->geo.near_copies; in _enough()
1533 int n = conf->copies; in _enough()
1539 (rdev = rcu_dereference(conf->mirrors[this].rdev)) && in _enough()
1554 static int enough(struct r10conf *conf, int ignore) in enough() argument
1561 return _enough(conf, 0, ignore) && in enough()
1562 _enough(conf, 1, ignore); in enough()
1568 struct r10conf *conf = mddev->private; in error() local
1577 spin_lock_irqsave(&conf->device_lock, flags); in error()
1579 && !enough(conf, rdev->raid_disk)) { in error()
1583 spin_unlock_irqrestore(&conf->device_lock, flags); in error()
1596 spin_unlock_irqrestore(&conf->device_lock, flags); in error()
1601 mdname(mddev), conf->geo.raid_disks - mddev->degraded); in error()
1604 static void print_conf(struct r10conf *conf) in print_conf() argument
1610 if (!conf) { in print_conf()
1614 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, in print_conf()
1615 conf->geo.raid_disks); in print_conf()
1617 for (i = 0; i < conf->geo.raid_disks; i++) { in print_conf()
1619 tmp = conf->mirrors + i; in print_conf()
1628 static void close_sync(struct r10conf *conf) in close_sync() argument
1630 wait_barrier(conf); in close_sync()
1631 allow_barrier(conf); in close_sync()
1633 mempool_destroy(conf->r10buf_pool); in close_sync()
1634 conf->r10buf_pool = NULL; in close_sync()
1640 struct r10conf *conf = mddev->private; in raid10_spare_active() local
1649 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_spare_active()
1650 tmp = conf->mirrors + i; in raid10_spare_active()
1677 spin_lock_irqsave(&conf->device_lock, flags); in raid10_spare_active()
1679 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_spare_active()
1681 print_conf(conf); in raid10_spare_active()
1687 struct r10conf *conf = mddev->private; in raid10_add_disk() local
1691 int last = conf->geo.raid_disks - 1; in raid10_add_disk()
1698 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) in raid10_add_disk()
1708 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) in raid10_add_disk()
1713 struct raid10_info *p = &conf->mirrors[mirror]; in raid10_add_disk()
1727 conf->fullsync = 1; in raid10_add_disk()
1741 conf->fullsync = 1; in raid10_add_disk()
1748 print_conf(conf); in raid10_add_disk()
1754 struct r10conf *conf = mddev->private; in raid10_remove_disk() local
1758 struct raid10_info *p = conf->mirrors + number; in raid10_remove_disk()
1760 print_conf(conf); in raid10_remove_disk()
1779 number < conf->geo.raid_disks && in raid10_remove_disk()
1780 enough(conf, -1)) { in raid10_remove_disk()
1810 print_conf(conf); in raid10_remove_disk()
1817 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read() local
1824 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); in end_sync_read()
1833 &conf->mirrors[d].rdev->corrected_errors); in end_sync_read()
1838 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); in end_sync_read()
1879 struct r10conf *conf = mddev->private; in end_sync_write() local
1887 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_sync_write()
1889 rdev = conf->mirrors[d].replacement; in end_sync_write()
1891 rdev = conf->mirrors[d].rdev; in end_sync_write()
1932 struct r10conf *conf = mddev->private; in sync_request_write() local
1940 for (i=0; i<conf->copies; i++) in sync_request_write()
1944 if (i == conf->copies) in sync_request_write()
1954 for (i=0 ; i < conf->copies ; i++) { in sync_request_write()
2003 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in sync_request_write()
2005 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); in sync_request_write()
2007 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; in sync_request_write()
2008 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; in sync_request_write()
2015 for (i = 0; i < conf->copies; i++) { in sync_request_write()
2026 md_sync_acct(conf->mirrors[d].replacement->bdev, in sync_request_write()
2058 struct r10conf *conf = mddev->private; in fix_recovery_read_error() local
2075 rdev = conf->mirrors[dr].rdev; in fix_recovery_read_error()
2083 rdev = conf->mirrors[dw].rdev; in fix_recovery_read_error()
2105 if (rdev != conf->mirrors[dw].rdev) { in fix_recovery_read_error()
2107 struct md_rdev *rdev2 = conf->mirrors[dw].rdev; in fix_recovery_read_error()
2117 conf->mirrors[dw].recovery_disabled in fix_recovery_read_error()
2134 struct r10conf *conf = mddev->private; in recovery_request_write() local
2158 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in recovery_request_write()
2159 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); in recovery_request_write()
2163 atomic_inc(&conf->mirrors[d].replacement->nr_pending); in recovery_request_write()
2164 md_sync_acct(conf->mirrors[d].replacement->bdev, in recovery_request_write()
2239 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2250 rdev = conf->mirrors[d].rdev; in fix_read_error()
2271 md_error(mddev, conf->mirrors[d].rdev); in fix_read_error()
2291 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2302 conf->tmppage, READ, false); in fix_read_error()
2309 if (sl == conf->copies) in fix_read_error()
2320 rdev = conf->mirrors[dn].rdev; in fix_read_error()
2341 sl = conf->copies; in fix_read_error()
2344 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2354 s, conf->tmppage, WRITE) in fix_read_error()
2380 sl = conf->copies; in fix_read_error()
2383 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2393 s, conf->tmppage, in fix_read_error()
2437 struct r10conf *conf = mddev->private; in narrow_write_error() local
2438 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2495 struct r10conf *conf = mddev->private; in handle_read_error() local
2515 freeze_array(conf, 1); in handle_read_error()
2516 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2517 unfreeze_array(conf); in handle_read_error()
2524 rdev = read_balance(conf, r10_bio, &max_sectors); in handle_read_error()
2561 spin_lock_irq(&conf->device_lock); in handle_read_error()
2566 spin_unlock_irq(&conf->device_lock); in handle_read_error()
2569 r10_bio = mempool_alloc(conf->r10bio_pool, in handle_read_error()
2585 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) in handle_write_completed() argument
2598 for (m = 0; m < conf->copies; m++) { in handle_write_completed()
2600 rdev = conf->mirrors[dev].rdev; in handle_write_completed()
2613 md_error(conf->mddev, rdev); in handle_write_completed()
2615 rdev = conf->mirrors[dev].replacement; in handle_write_completed()
2629 md_error(conf->mddev, rdev); in handle_write_completed()
2635 for (m = 0; m < conf->copies; m++) { in handle_write_completed()
2638 rdev = conf->mirrors[dev].rdev; in handle_write_completed()
2644 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2648 md_error(conf->mddev, rdev); in handle_write_completed()
2652 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2655 rdev = conf->mirrors[dev].replacement; in handle_write_completed()
2661 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2665 spin_lock_irq(&conf->device_lock); in handle_write_completed()
2666 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); in handle_write_completed()
2667 conf->nr_queued++; in handle_write_completed()
2668 spin_unlock_irq(&conf->device_lock); in handle_write_completed()
2669 md_wakeup_thread(conf->mddev->thread); in handle_write_completed()
2684 struct r10conf *conf = mddev->private; in raid10d() local
2685 struct list_head *head = &conf->retry_list; in raid10d()
2690 if (!list_empty_careful(&conf->bio_end_io_list) && in raid10d()
2693 spin_lock_irqsave(&conf->device_lock, flags); in raid10d()
2695 while (!list_empty(&conf->bio_end_io_list)) { in raid10d()
2696 list_move(conf->bio_end_io_list.prev, &tmp); in raid10d()
2697 conf->nr_queued--; in raid10d()
2700 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
2718 flush_pending_writes(conf); in raid10d()
2720 spin_lock_irqsave(&conf->device_lock, flags); in raid10d()
2722 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
2727 conf->nr_queued--; in raid10d()
2728 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
2731 conf = mddev->private; in raid10d()
2734 handle_write_completed(conf, r10_bio); in raid10d()
2758 static int init_resync(struct r10conf *conf) in init_resync() argument
2764 BUG_ON(conf->r10buf_pool); in init_resync()
2765 conf->have_replacement = 0; in init_resync()
2766 for (i = 0; i < conf->geo.raid_disks; i++) in init_resync()
2767 if (conf->mirrors[i].replacement) in init_resync()
2768 conf->have_replacement = 1; in init_resync()
2769 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf); in init_resync()
2770 if (!conf->r10buf_pool) in init_resync()
2772 conf->next_resync = 0; in init_resync()
2811 struct r10conf *conf = mddev->private; in sync_request() local
2820 sector_t chunk_mask = conf->geo.chunk_mask; in sync_request()
2822 if (!conf->r10buf_pool) in sync_request()
2823 if (init_resync(conf)) in sync_request()
2836 conf->fullsync == 0) { in sync_request()
2857 end_reshape(conf); in sync_request()
2858 close_sync(conf); in sync_request()
2866 else for (i = 0; i < conf->geo.raid_disks; i++) { in sync_request()
2868 raid10_find_virt(conf, mddev->curr_resync, i); in sync_request()
2874 if ((!mddev->bitmap || conf->fullsync) in sync_request()
2875 && conf->have_replacement in sync_request()
2880 for (i = 0; i < conf->geo.raid_disks; i++) in sync_request()
2881 if (conf->mirrors[i].replacement) in sync_request()
2882 conf->mirrors[i].replacement in sync_request()
2886 conf->fullsync = 0; in sync_request()
2889 close_sync(conf); in sync_request()
2897 if (chunks_skipped >= conf->geo.raid_disks) { in sync_request()
2911 if (conf->geo.near_copies < conf->geo.raid_disks && in sync_request()
2936 for (i = 0 ; i < conf->geo.raid_disks; i++) { in sync_request()
2942 struct raid10_info *mirror = &conf->mirrors[i]; in sync_request()
2955 sect = raid10_find_virt(conf, sector_nr, i); in sync_request()
2972 !conf->fullsync) { in sync_request()
2980 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); in sync_request()
2982 raise_barrier(conf, rb2 != NULL); in sync_request()
2992 raid10_find_phys(conf, r10_bio); in sync_request()
2997 for (j = 0; j < conf->geo.raid_disks; j++) in sync_request()
2998 if (conf->mirrors[j].rdev == NULL || in sync_request()
2999 test_bit(Faulty, &conf->mirrors[j].rdev->flags)) { in sync_request()
3008 for (j=0; j<conf->copies;j++) { in sync_request()
3015 if (!conf->mirrors[d].rdev || in sync_request()
3016 !test_bit(In_sync, &conf->mirrors[d].rdev->flags)) in sync_request()
3020 rdev = conf->mirrors[d].rdev; in sync_request()
3049 for (k=0; k<conf->copies; k++) in sync_request()
3052 BUG_ON(k == conf->copies); in sync_request()
3103 if (j == conf->copies) { in sync_request()
3111 for (k = 0; k < conf->copies; k++) in sync_request()
3161 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, in sync_request()
3169 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); in sync_request()
3174 raise_barrier(conf, 0); in sync_request()
3175 conf->next_resync = sector_nr; in sync_request()
3180 raid10_find_phys(conf, r10_bio); in sync_request()
3183 for (i = 0; i < conf->copies; i++) { in sync_request()
3194 if (conf->mirrors[d].rdev == NULL || in sync_request()
3195 test_bit(Faulty, &conf->mirrors[d].rdev->flags)) in sync_request()
3198 if (is_badblock(conf->mirrors[d].rdev, in sync_request()
3210 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in sync_request()
3218 conf->mirrors[d].rdev->data_offset; in sync_request()
3219 bio->bi_bdev = conf->mirrors[d].rdev->bdev; in sync_request()
3222 if (conf->mirrors[d].replacement == NULL || in sync_request()
3224 &conf->mirrors[d].replacement->flags)) in sync_request()
3233 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in sync_request()
3240 conf->mirrors[d].replacement->data_offset; in sync_request()
3241 bio->bi_bdev = conf->mirrors[d].replacement->bdev; in sync_request()
3246 for (i=0; i<conf->copies; i++) { in sync_request()
3249 rdev_dec_pending(conf->mirrors[d].rdev, in sync_request()
3254 conf->mirrors[d].replacement, in sync_request()
3337 struct r10conf *conf = mddev->private; in raid10_size() local
3340 raid_disks = min(conf->geo.raid_disks, in raid10_size()
3341 conf->prev.raid_disks); in raid10_size()
3343 sectors = conf->dev_sectors; in raid10_size()
3345 size = sectors >> conf->geo.chunk_shift; in raid10_size()
3346 sector_div(size, conf->geo.far_copies); in raid10_size()
3348 sector_div(size, conf->geo.near_copies); in raid10_size()
3350 return size << conf->geo.chunk_shift; in raid10_size()
3353 static void calc_sectors(struct r10conf *conf, sector_t size) in calc_sectors() argument
3360 size = size >> conf->geo.chunk_shift; in calc_sectors()
3361 sector_div(size, conf->geo.far_copies); in calc_sectors()
3362 size = size * conf->geo.raid_disks; in calc_sectors()
3363 sector_div(size, conf->geo.near_copies); in calc_sectors()
3366 size = size * conf->copies; in calc_sectors()
3371 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); in calc_sectors()
3373 conf->dev_sectors = size << conf->geo.chunk_shift; in calc_sectors()
3375 if (conf->geo.far_offset) in calc_sectors()
3376 conf->geo.stride = 1 << conf->geo.chunk_shift; in calc_sectors()
3378 sector_div(size, conf->geo.far_copies); in calc_sectors()
3379 conf->geo.stride = size << conf->geo.chunk_shift; in calc_sectors()
3442 struct r10conf *conf = NULL; in setup_conf() local
3463 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL); in setup_conf()
3464 if (!conf) in setup_conf()
3468 conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks + in setup_conf()
3471 if (!conf->mirrors) in setup_conf()
3474 conf->tmppage = alloc_page(GFP_KERNEL); in setup_conf()
3475 if (!conf->tmppage) in setup_conf()
3478 conf->geo = geo; in setup_conf()
3479 conf->copies = copies; in setup_conf()
3480 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc, in setup_conf()
3481 r10bio_pool_free, conf); in setup_conf()
3482 if (!conf->r10bio_pool) in setup_conf()
3485 calc_sectors(conf, mddev->dev_sectors); in setup_conf()
3487 conf->prev = conf->geo; in setup_conf()
3488 conf->reshape_progress = MaxSector; in setup_conf()
3490 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { in setup_conf()
3494 conf->reshape_progress = mddev->reshape_position; in setup_conf()
3495 if (conf->prev.far_offset) in setup_conf()
3496 conf->prev.stride = 1 << conf->prev.chunk_shift; in setup_conf()
3499 conf->prev.stride = conf->dev_sectors; in setup_conf()
3501 conf->reshape_safe = conf->reshape_progress; in setup_conf()
3502 spin_lock_init(&conf->device_lock); in setup_conf()
3503 INIT_LIST_HEAD(&conf->retry_list); in setup_conf()
3504 INIT_LIST_HEAD(&conf->bio_end_io_list); in setup_conf()
3506 spin_lock_init(&conf->resync_lock); in setup_conf()
3507 init_waitqueue_head(&conf->wait_barrier); in setup_conf()
3509 conf->thread = md_register_thread(raid10d, mddev, "raid10"); in setup_conf()
3510 if (!conf->thread) in setup_conf()
3513 conf->mddev = mddev; in setup_conf()
3514 return conf; in setup_conf()
3520 if (conf) { in setup_conf()
3521 mempool_destroy(conf->r10bio_pool); in setup_conf()
3522 kfree(conf->mirrors); in setup_conf()
3523 safe_put_page(conf->tmppage); in setup_conf()
3524 kfree(conf); in setup_conf()
3531 struct r10conf *conf; in run() local
3541 conf = setup_conf(mddev); in run()
3542 if (IS_ERR(conf)) in run()
3543 return PTR_ERR(conf); in run()
3544 mddev->private = conf; in run()
3546 conf = mddev->private; in run()
3547 if (!conf) in run()
3550 mddev->thread = conf->thread; in run()
3551 conf->thread = NULL; in run()
3559 if (conf->geo.raid_disks % conf->geo.near_copies) in run()
3560 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); in run()
3563 (conf->geo.raid_disks / conf->geo.near_copies)); in run()
3573 if (disk_idx >= conf->geo.raid_disks && in run()
3574 disk_idx >= conf->prev.raid_disks) in run()
3576 disk = conf->mirrors + disk_idx; in run()
3615 if (!enough(conf, -1)) { in run()
3621 if (conf->reshape_progress != MaxSector) { in run()
3623 if (conf->geo.far_copies != 1 && in run()
3624 conf->geo.far_offset == 0) in run()
3626 if (conf->prev.far_copies != 1 && in run()
3627 conf->prev.far_offset == 0) in run()
3633 i < conf->geo.raid_disks in run()
3634 || i < conf->prev.raid_disks; in run()
3637 disk = conf->mirrors + i; in run()
3652 conf->fullsync = 1; in run()
3663 mdname(mddev), conf->geo.raid_disks - mddev->degraded, in run()
3664 conf->geo.raid_disks); in run()
3668 mddev->dev_sectors = conf->dev_sectors; in run()
3674 int stripe = conf->geo.raid_disks * in run()
3681 stripe /= conf->geo.near_copies; in run()
3689 if (conf->reshape_progress != MaxSector) { in run()
3692 before_length = ((1 << conf->prev.chunk_shift) * in run()
3693 conf->prev.far_copies); in run()
3694 after_length = ((1 << conf->geo.chunk_shift) * in run()
3695 conf->geo.far_copies); in run()
3702 conf->offset_diff = min_offset_diff; in run()
3716 mempool_destroy(conf->r10bio_pool); in run()
3717 safe_put_page(conf->tmppage); in run()
3718 kfree(conf->mirrors); in run()
3719 kfree(conf); in run()
3727 struct r10conf *conf = priv; in raid10_free() local
3729 mempool_destroy(conf->r10bio_pool); in raid10_free()
3730 safe_put_page(conf->tmppage); in raid10_free()
3731 kfree(conf->mirrors); in raid10_free()
3732 kfree(conf->mirrors_old); in raid10_free()
3733 kfree(conf->mirrors_new); in raid10_free()
3734 kfree(conf); in raid10_free()
3739 struct r10conf *conf = mddev->private; in raid10_quiesce() local
3743 raise_barrier(conf, 0); in raid10_quiesce()
3746 lower_barrier(conf); in raid10_quiesce()
3765 struct r10conf *conf = mddev->private; in raid10_resize() local
3771 if (conf->geo.far_copies > 1 && !conf->geo.far_offset) in raid10_resize()
3792 calc_sectors(conf, sectors); in raid10_resize()
3793 mddev->dev_sectors = conf->dev_sectors; in raid10_resize()
3801 struct r10conf *conf; in raid10_takeover_raid0() local
3821 conf = setup_conf(mddev); in raid10_takeover_raid0()
3822 if (!IS_ERR(conf)) { in raid10_takeover_raid0()
3828 conf->barrier = 1; in raid10_takeover_raid0()
3831 return conf; in raid10_takeover_raid0()
3873 struct r10conf *conf = mddev->private; in raid10_check_reshape() local
3876 if (conf->geo.far_copies != 1 && !conf->geo.far_offset) in raid10_check_reshape()
3879 if (setup_geo(&geo, mddev, geo_start) != conf->copies) in raid10_check_reshape()
3890 if (!enough(conf, -1)) in raid10_check_reshape()
3893 kfree(conf->mirrors_new); in raid10_check_reshape()
3894 conf->mirrors_new = NULL; in raid10_check_reshape()
3897 conf->mirrors_new = kzalloc( in raid10_check_reshape()
3902 if (!conf->mirrors_new) in raid10_check_reshape()
3921 static int calc_degraded(struct r10conf *conf) in calc_degraded() argument
3929 for (i = 0; i < conf->prev.raid_disks; i++) { in calc_degraded()
3930 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in calc_degraded()
3941 if (conf->geo.raid_disks == conf->prev.raid_disks) in calc_degraded()
3945 for (i = 0; i < conf->geo.raid_disks; i++) { in calc_degraded()
3946 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in calc_degraded()
3955 if (conf->geo.raid_disks <= conf->prev.raid_disks) in calc_degraded()
3981 struct r10conf *conf = mddev->private; in raid10_start_reshape() local
3989 if (setup_geo(&new, mddev, geo_start) != conf->copies) in raid10_start_reshape()
3992 before_length = ((1 << conf->prev.chunk_shift) * in raid10_start_reshape()
3993 conf->prev.far_copies); in raid10_start_reshape()
3994 after_length = ((1 << conf->geo.chunk_shift) * in raid10_start_reshape()
3995 conf->geo.far_copies); in raid10_start_reshape()
4019 conf->offset_diff = min_offset_diff; in raid10_start_reshape()
4020 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4021 if (conf->mirrors_new) { in raid10_start_reshape()
4022 memcpy(conf->mirrors_new, conf->mirrors, in raid10_start_reshape()
4023 sizeof(struct raid10_info)*conf->prev.raid_disks); in raid10_start_reshape()
4025 kfree(conf->mirrors_old); in raid10_start_reshape()
4026 conf->mirrors_old = conf->mirrors; in raid10_start_reshape()
4027 conf->mirrors = conf->mirrors_new; in raid10_start_reshape()
4028 conf->mirrors_new = NULL; in raid10_start_reshape()
4030 setup_geo(&conf->geo, mddev, geo_start); in raid10_start_reshape()
4035 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4041 conf->reshape_progress = size; in raid10_start_reshape()
4043 conf->reshape_progress = 0; in raid10_start_reshape()
4044 conf->reshape_safe = conf->reshape_progress; in raid10_start_reshape()
4045 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4050 conf->geo.raid_disks), in raid10_start_reshape()
4061 conf->prev.raid_disks) in raid10_start_reshape()
4069 } else if (rdev->raid_disk >= conf->prev.raid_disks in raid10_start_reshape()
4079 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4080 mddev->degraded = calc_degraded(conf); in raid10_start_reshape()
4081 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4082 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4083 mddev->reshape_position = conf->reshape_progress; in raid10_start_reshape()
4098 conf->reshape_checkpoint = jiffies; in raid10_start_reshape()
4105 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4106 conf->geo = conf->prev; in raid10_start_reshape()
4107 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4111 conf->reshape_progress = MaxSector; in raid10_start_reshape()
4112 conf->reshape_safe = MaxSector; in raid10_start_reshape()
4114 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4189 struct r10conf *conf = mddev->private; in reshape_request() local
4204 conf->reshape_progress < raid10_size(mddev, 0, 0)) { in reshape_request()
4206 - conf->reshape_progress); in reshape_request()
4208 conf->reshape_progress > 0) in reshape_request()
4209 sector_nr = conf->reshape_progress; in reshape_request()
4226 next = first_dev_address(conf->reshape_progress - 1, in reshape_request()
4227 &conf->geo); in reshape_request()
4232 safe = last_dev_address(conf->reshape_safe - 1, in reshape_request()
4233 &conf->prev); in reshape_request()
4235 if (next + conf->offset_diff < safe) in reshape_request()
4238 last = conf->reshape_progress - 1; in reshape_request()
4239 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask in reshape_request()
4240 & conf->prev.chunk_mask); in reshape_request()
4247 next = last_dev_address(conf->reshape_progress, &conf->geo); in reshape_request()
4252 safe = first_dev_address(conf->reshape_safe, &conf->prev); in reshape_request()
4257 if (next > safe + conf->offset_diff) in reshape_request()
4260 sector_nr = conf->reshape_progress; in reshape_request()
4261 last = sector_nr | (conf->geo.chunk_mask in reshape_request()
4262 & conf->prev.chunk_mask); in reshape_request()
4269 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { in reshape_request()
4271 wait_barrier(conf); in reshape_request()
4272 mddev->reshape_position = conf->reshape_progress; in reshape_request()
4275 - conf->reshape_progress; in reshape_request()
4277 mddev->curr_resync_completed = conf->reshape_progress; in reshape_request()
4278 conf->reshape_checkpoint = jiffies; in reshape_request()
4284 allow_barrier(conf); in reshape_request()
4287 conf->reshape_safe = mddev->reshape_position; in reshape_request()
4288 allow_barrier(conf); in reshape_request()
4293 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); in reshape_request()
4295 raise_barrier(conf, sectors_done != 0); in reshape_request()
4301 rdev = read_balance(conf, r10_bio, &max_sectors); in reshape_request()
4309 mempool_free(r10_bio, conf->r10buf_pool); in reshape_request()
4330 __raid10_find_phys(&conf->geo, r10_bio); in reshape_request()
4335 for (s = 0; s < conf->copies*2; s++) { in reshape_request()
4340 rdev2 = conf->mirrors[d].replacement; in reshape_request()
4343 rdev2 = conf->mirrors[d].rdev; in reshape_request()
4404 conf->reshape_progress -= sectors_done; in reshape_request()
4406 conf->reshape_progress += sectors_done; in reshape_request()
4421 struct r10conf *conf = mddev->private; in reshape_request_write() local
4435 for (s = 0; s < conf->copies*2; s++) { in reshape_request_write()
4440 rdev = conf->mirrors[d].replacement; in reshape_request_write()
4443 rdev = conf->mirrors[d].rdev; in reshape_request_write()
4457 static void end_reshape(struct r10conf *conf) in end_reshape() argument
4459 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) in end_reshape()
4462 spin_lock_irq(&conf->device_lock); in end_reshape()
4463 conf->prev = conf->geo; in end_reshape()
4464 md_finish_reshape(conf->mddev); in end_reshape()
4466 conf->reshape_progress = MaxSector; in end_reshape()
4467 conf->reshape_safe = MaxSector; in end_reshape()
4468 spin_unlock_irq(&conf->device_lock); in end_reshape()
4473 if (conf->mddev->queue) { in end_reshape()
4474 int stripe = conf->geo.raid_disks * in end_reshape()
4475 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); in end_reshape()
4476 stripe /= conf->geo.near_copies; in end_reshape()
4477 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) in end_reshape()
4478 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; in end_reshape()
4480 conf->fullsync = 0; in end_reshape()
4488 struct r10conf *conf = mddev->private; in handle_reshape_read_error() local
4491 struct r10dev devs[conf->copies]; in handle_reshape_read_error()
4499 __raid10_find_phys(&conf->prev, r10b); in handle_reshape_read_error()
4511 struct md_rdev *rdev = conf->mirrors[d].rdev; in handle_reshape_read_error()
4528 if (slot >= conf->copies) in handle_reshape_read_error()
4549 struct r10conf *conf = mddev->private; in end_reshape_write() local
4555 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_reshape_write()
4557 rdev = conf->mirrors[d].replacement; in end_reshape_write()
4560 rdev = conf->mirrors[d].rdev; in end_reshape_write()
4583 struct r10conf *conf = mddev->private; in raid10_finish_reshape() local
4600 for (d = conf->geo.raid_disks ; in raid10_finish_reshape()
4601 d < conf->geo.raid_disks - mddev->delta_disks; in raid10_finish_reshape()
4603 struct md_rdev *rdev = conf->mirrors[d].rdev; in raid10_finish_reshape()
4606 rdev = conf->mirrors[d].replacement; in raid10_finish_reshape()
4612 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()