Lines Matching refs:mddev
79 static int remove_and_add_spares(struct mddev *mddev,
81 static void mddev_detach(struct mddev *mddev);
104 static inline int speed_min(struct mddev *mddev) in speed_min() argument
106 return mddev->sync_speed_min ? in speed_min()
107 mddev->sync_speed_min : sysctl_speed_limit_min; in speed_min()
110 static inline int speed_max(struct mddev *mddev) in speed_max() argument
112 return mddev->sync_speed_max ? in speed_max()
113 mddev->sync_speed_max : sysctl_speed_limit_max; in speed_max()
165 struct mddev *mddev) in bio_alloc_mddev() argument
169 if (!mddev || !mddev->bio_set) in bio_alloc_mddev()
172 b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set); in bio_alloc_mddev()
180 struct mddev *mddev) in bio_clone_mddev() argument
182 if (!mddev || !mddev->bio_set) in bio_clone_mddev()
185 return bio_clone_bioset(bio, gfp_mask, mddev->bio_set); in bio_clone_mddev()
201 void md_new_event(struct mddev *mddev) in md_new_event() argument
211 static void md_new_event_inintr(struct mddev *mddev) in md_new_event_inintr() argument
237 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
240 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
256 struct mddev *mddev = q->queuedata; in md_make_request() local
262 if (mddev == NULL || mddev->pers == NULL in md_make_request()
263 || !mddev->ready) { in md_make_request()
267 if (mddev->ro == 1 && unlikely(rw == WRITE)) { in md_make_request()
275 if (mddev->suspended) { in md_make_request()
278 prepare_to_wait(&mddev->sb_wait, &__wait, in md_make_request()
280 if (!mddev->suspended) in md_make_request()
286 finish_wait(&mddev->sb_wait, &__wait); in md_make_request()
288 atomic_inc(&mddev->active_io); in md_make_request()
298 mddev->pers->make_request(mddev, bio); in md_make_request()
301 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); in md_make_request()
302 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); in md_make_request()
305 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) in md_make_request()
306 wake_up(&mddev->sb_wait); in md_make_request()
317 void mddev_suspend(struct mddev *mddev) in mddev_suspend() argument
319 if (mddev->suspended++) in mddev_suspend()
322 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); in mddev_suspend()
323 mddev->pers->quiesce(mddev, 1); in mddev_suspend()
325 del_timer_sync(&mddev->safemode_timer); in mddev_suspend()
329 void mddev_resume(struct mddev *mddev) in mddev_resume() argument
331 if (--mddev->suspended) in mddev_resume()
333 wake_up(&mddev->sb_wait); in mddev_resume()
334 mddev->pers->quiesce(mddev, 0); in mddev_resume()
336 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in mddev_resume()
337 md_wakeup_thread(mddev->thread); in mddev_resume()
338 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ in mddev_resume()
342 int mddev_congested(struct mddev *mddev, int bits) in mddev_congested() argument
344 struct md_personality *pers = mddev->pers; in mddev_congested()
348 if (mddev->suspended) in mddev_congested()
351 ret = pers->congested(mddev, bits); in mddev_congested()
358 struct mddev *mddev = data; in md_congested() local
359 return mddev_congested(mddev, bits); in md_congested()
369 struct mddev *mddev = rdev->mddev; in md_end_flush() local
371 rdev_dec_pending(rdev, mddev); in md_end_flush()
373 if (atomic_dec_and_test(&mddev->flush_pending)) { in md_end_flush()
375 queue_work(md_wq, &mddev->flush_work); in md_end_flush()
384 struct mddev *mddev = container_of(ws, struct mddev, flush_work); in submit_flushes() local
387 INIT_WORK(&mddev->flush_work, md_submit_flush_data); in submit_flushes()
388 atomic_set(&mddev->flush_pending, 1); in submit_flushes()
390 rdev_for_each_rcu(rdev, mddev) in submit_flushes()
401 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); in submit_flushes()
405 atomic_inc(&mddev->flush_pending); in submit_flushes()
408 rdev_dec_pending(rdev, mddev); in submit_flushes()
411 if (atomic_dec_and_test(&mddev->flush_pending)) in submit_flushes()
412 queue_work(md_wq, &mddev->flush_work); in submit_flushes()
417 struct mddev *mddev = container_of(ws, struct mddev, flush_work); in md_submit_flush_data() local
418 struct bio *bio = mddev->flush_bio; in md_submit_flush_data()
425 mddev->pers->make_request(mddev, bio); in md_submit_flush_data()
428 mddev->flush_bio = NULL; in md_submit_flush_data()
429 wake_up(&mddev->sb_wait); in md_submit_flush_data()
432 void md_flush_request(struct mddev *mddev, struct bio *bio) in md_flush_request() argument
434 spin_lock_irq(&mddev->lock); in md_flush_request()
435 wait_event_lock_irq(mddev->sb_wait, in md_flush_request()
436 !mddev->flush_bio, in md_flush_request()
437 mddev->lock); in md_flush_request()
438 mddev->flush_bio = bio; in md_flush_request()
439 spin_unlock_irq(&mddev->lock); in md_flush_request()
441 INIT_WORK(&mddev->flush_work, submit_flushes); in md_flush_request()
442 queue_work(md_wq, &mddev->flush_work); in md_flush_request()
448 struct mddev *mddev = cb->data; in md_unplug() local
449 md_wakeup_thread(mddev->thread); in md_unplug()
454 static inline struct mddev *mddev_get(struct mddev *mddev) in mddev_get() argument
456 atomic_inc(&mddev->active); in mddev_get()
457 return mddev; in mddev_get()
462 static void mddev_put(struct mddev *mddev) in mddev_put() argument
466 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) in mddev_put()
468 if (!mddev->raid_disks && list_empty(&mddev->disks) && in mddev_put()
469 mddev->ctime == 0 && !mddev->hold_active) { in mddev_put()
472 list_del_init(&mddev->all_mddevs); in mddev_put()
473 bs = mddev->bio_set; in mddev_put()
474 mddev->bio_set = NULL; in mddev_put()
475 if (mddev->gendisk) { in mddev_put()
481 INIT_WORK(&mddev->del_work, mddev_delayed_delete); in mddev_put()
482 queue_work(md_misc_wq, &mddev->del_work); in mddev_put()
484 kfree(mddev); in mddev_put()
493 void mddev_init(struct mddev *mddev) in mddev_init() argument
495 mutex_init(&mddev->open_mutex); in mddev_init()
496 mutex_init(&mddev->reconfig_mutex); in mddev_init()
497 mutex_init(&mddev->bitmap_info.mutex); in mddev_init()
498 INIT_LIST_HEAD(&mddev->disks); in mddev_init()
499 INIT_LIST_HEAD(&mddev->all_mddevs); in mddev_init()
500 setup_timer(&mddev->safemode_timer, md_safemode_timeout, in mddev_init()
501 (unsigned long) mddev); in mddev_init()
502 atomic_set(&mddev->active, 1); in mddev_init()
503 atomic_set(&mddev->openers, 0); in mddev_init()
504 atomic_set(&mddev->active_io, 0); in mddev_init()
505 spin_lock_init(&mddev->lock); in mddev_init()
506 atomic_set(&mddev->flush_pending, 0); in mddev_init()
507 init_waitqueue_head(&mddev->sb_wait); in mddev_init()
508 init_waitqueue_head(&mddev->recovery_wait); in mddev_init()
509 mddev->reshape_position = MaxSector; in mddev_init()
510 mddev->reshape_backwards = 0; in mddev_init()
511 mddev->last_sync_action = "none"; in mddev_init()
512 mddev->resync_min = 0; in mddev_init()
513 mddev->resync_max = MaxSector; in mddev_init()
514 mddev->level = LEVEL_NONE; in mddev_init()
518 static struct mddev *mddev_find(dev_t unit) in mddev_find()
520 struct mddev *mddev, *new = NULL; in mddev_find() local
529 list_for_each_entry(mddev, &all_mddevs, all_mddevs) in mddev_find()
530 if (mddev->unit == unit) { in mddev_find()
531 mddev_get(mddev); in mddev_find()
534 return mddev; in mddev_find()
562 list_for_each_entry(mddev, &all_mddevs, all_mddevs) in mddev_find()
563 if (mddev->unit == dev) { in mddev_find()
594 void mddev_unlock(struct mddev *mddev) in mddev_unlock() argument
596 if (mddev->to_remove) { in mddev_unlock()
609 struct attribute_group *to_remove = mddev->to_remove; in mddev_unlock()
610 mddev->to_remove = NULL; in mddev_unlock()
611 mddev->sysfs_active = 1; in mddev_unlock()
612 mutex_unlock(&mddev->reconfig_mutex); in mddev_unlock()
614 if (mddev->kobj.sd) { in mddev_unlock()
616 sysfs_remove_group(&mddev->kobj, to_remove); in mddev_unlock()
617 if (mddev->pers == NULL || in mddev_unlock()
618 mddev->pers->sync_request == NULL) { in mddev_unlock()
619 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); in mddev_unlock()
620 if (mddev->sysfs_action) in mddev_unlock()
621 sysfs_put(mddev->sysfs_action); in mddev_unlock()
622 mddev->sysfs_action = NULL; in mddev_unlock()
625 mddev->sysfs_active = 0; in mddev_unlock()
627 mutex_unlock(&mddev->reconfig_mutex); in mddev_unlock()
633 md_wakeup_thread(mddev->thread); in mddev_unlock()
638 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) in md_find_rdev_nr_rcu() argument
642 rdev_for_each_rcu(rdev, mddev) in md_find_rdev_nr_rcu()
650 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) in find_rdev() argument
654 rdev_for_each(rdev, mddev) in find_rdev()
661 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev) in find_rdev_rcu() argument
665 rdev_for_each_rcu(rdev, mddev) in find_rdev_rcu()
723 struct mddev *mddev = rdev->mddev; in super_written() local
727 md_error(mddev, rdev); in super_written()
730 if (atomic_dec_and_test(&mddev->pending_writes)) in super_written()
731 wake_up(&mddev->sb_wait); in super_written()
735 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, in md_super_write() argument
744 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); in md_super_write()
752 atomic_inc(&mddev->pending_writes); in md_super_write()
756 void md_super_wait(struct mddev *mddev) in md_super_wait() argument
759 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); in md_super_wait()
765 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); in sync_page_io()
772 else if (rdev->mddev->reshape_position != MaxSector && in sync_page_io()
773 (rdev->mddev->reshape_backwards == in sync_page_io()
774 (sector >= rdev->mddev->reshape_position))) in sync_page_io()
915 int (*validate_super)(struct mddev *mddev,
917 void (*sync_super)(struct mddev *mddev,
933 int md_check_no_bitmap(struct mddev *mddev) in md_check_no_bitmap() argument
935 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) in md_check_no_bitmap()
938 mdname(mddev), mddev->pers->name); in md_check_no_bitmap()
1045 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) in super_90_validate() argument
1057 if (mddev->raid_disks == 0) { in super_90_validate()
1058 mddev->major_version = 0; in super_90_validate()
1059 mddev->minor_version = sb->minor_version; in super_90_validate()
1060 mddev->patch_version = sb->patch_version; in super_90_validate()
1061 mddev->external = 0; in super_90_validate()
1062 mddev->chunk_sectors = sb->chunk_size >> 9; in super_90_validate()
1063 mddev->ctime = sb->ctime; in super_90_validate()
1064 mddev->utime = sb->utime; in super_90_validate()
1065 mddev->level = sb->level; in super_90_validate()
1066 mddev->clevel[0] = 0; in super_90_validate()
1067 mddev->layout = sb->layout; in super_90_validate()
1068 mddev->raid_disks = sb->raid_disks; in super_90_validate()
1069 mddev->dev_sectors = ((sector_t)sb->size) * 2; in super_90_validate()
1070 mddev->events = ev1; in super_90_validate()
1071 mddev->bitmap_info.offset = 0; in super_90_validate()
1072 mddev->bitmap_info.space = 0; in super_90_validate()
1074 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; in super_90_validate()
1075 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); in super_90_validate()
1076 mddev->reshape_backwards = 0; in super_90_validate()
1078 if (mddev->minor_version >= 91) { in super_90_validate()
1079 mddev->reshape_position = sb->reshape_position; in super_90_validate()
1080 mddev->delta_disks = sb->delta_disks; in super_90_validate()
1081 mddev->new_level = sb->new_level; in super_90_validate()
1082 mddev->new_layout = sb->new_layout; in super_90_validate()
1083 mddev->new_chunk_sectors = sb->new_chunk >> 9; in super_90_validate()
1084 if (mddev->delta_disks < 0) in super_90_validate()
1085 mddev->reshape_backwards = 1; in super_90_validate()
1087 mddev->reshape_position = MaxSector; in super_90_validate()
1088 mddev->delta_disks = 0; in super_90_validate()
1089 mddev->new_level = mddev->level; in super_90_validate()
1090 mddev->new_layout = mddev->layout; in super_90_validate()
1091 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_90_validate()
1095 mddev->recovery_cp = MaxSector; in super_90_validate()
1099 mddev->recovery_cp = sb->recovery_cp; in super_90_validate()
1101 mddev->recovery_cp = 0; in super_90_validate()
1104 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); in super_90_validate()
1105 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); in super_90_validate()
1106 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); in super_90_validate()
1107 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); in super_90_validate()
1109 mddev->max_disks = MD_SB_DISKS; in super_90_validate()
1112 mddev->bitmap_info.file == NULL) { in super_90_validate()
1113 mddev->bitmap_info.offset = in super_90_validate()
1114 mddev->bitmap_info.default_offset; in super_90_validate()
1115 mddev->bitmap_info.space = in super_90_validate()
1116 mddev->bitmap_info.default_space; in super_90_validate()
1119 } else if (mddev->pers == NULL) { in super_90_validate()
1125 if (ev1 < mddev->events) in super_90_validate()
1127 } else if (mddev->bitmap) { in super_90_validate()
1131 if (ev1 < mddev->bitmap->events_cleared) in super_90_validate()
1133 if (ev1 < mddev->events) in super_90_validate()
1136 if (ev1 < mddev->events) in super_90_validate()
1141 if (mddev->level != LEVEL_MULTIPATH) { in super_90_validate()
1155 if (mddev->minor_version >= 91) { in super_90_validate()
1170 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) in super_90_sync() argument
1174 int next_spare = mddev->raid_disks; in super_90_sync()
1196 sb->major_version = mddev->major_version; in super_90_sync()
1197 sb->patch_version = mddev->patch_version; in super_90_sync()
1199 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); in super_90_sync()
1200 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); in super_90_sync()
1201 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); in super_90_sync()
1202 memcpy(&sb->set_uuid3, mddev->uuid+12,4); in super_90_sync()
1204 sb->ctime = mddev->ctime; in super_90_sync()
1205 sb->level = mddev->level; in super_90_sync()
1206 sb->size = mddev->dev_sectors / 2; in super_90_sync()
1207 sb->raid_disks = mddev->raid_disks; in super_90_sync()
1208 sb->md_minor = mddev->md_minor; in super_90_sync()
1210 sb->utime = mddev->utime; in super_90_sync()
1212 sb->events_hi = (mddev->events>>32); in super_90_sync()
1213 sb->events_lo = (u32)mddev->events; in super_90_sync()
1215 if (mddev->reshape_position == MaxSector) in super_90_sync()
1219 sb->reshape_position = mddev->reshape_position; in super_90_sync()
1220 sb->new_level = mddev->new_level; in super_90_sync()
1221 sb->delta_disks = mddev->delta_disks; in super_90_sync()
1222 sb->new_layout = mddev->new_layout; in super_90_sync()
1223 sb->new_chunk = mddev->new_chunk_sectors << 9; in super_90_sync()
1225 mddev->minor_version = sb->minor_version; in super_90_sync()
1226 if (mddev->in_sync) in super_90_sync()
1228 sb->recovery_cp = mddev->recovery_cp; in super_90_sync()
1229 sb->cp_events_hi = (mddev->events>>32); in super_90_sync()
1230 sb->cp_events_lo = (u32)mddev->events; in super_90_sync()
1231 if (mddev->recovery_cp == MaxSector) in super_90_sync()
1236 sb->layout = mddev->layout; in super_90_sync()
1237 sb->chunk_size = mddev->chunk_sectors << 9; in super_90_sync()
1239 if (mddev->bitmap && mddev->bitmap_info.file == NULL) in super_90_sync()
1243 rdev_for_each(rdev2, mddev) { in super_90_sync()
1289 for (i=0 ; i < mddev->raid_disks ; i++) { in super_90_sync()
1315 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_90_rdev_size_change()
1317 if (rdev->mddev->bitmap_info.offset) in super_90_rdev_size_change()
1325 if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) in super_90_rdev_size_change()
1327 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_90_rdev_size_change()
1329 md_super_wait(rdev->mddev); in super_90_rdev_size_change()
1534 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) in super_1_validate() argument
1545 if (mddev->raid_disks == 0) { in super_1_validate()
1546 mddev->major_version = 1; in super_1_validate()
1547 mddev->patch_version = 0; in super_1_validate()
1548 mddev->external = 0; in super_1_validate()
1549 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); in super_1_validate()
1550 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); in super_1_validate()
1551 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); in super_1_validate()
1552 mddev->level = le32_to_cpu(sb->level); in super_1_validate()
1553 mddev->clevel[0] = 0; in super_1_validate()
1554 mddev->layout = le32_to_cpu(sb->layout); in super_1_validate()
1555 mddev->raid_disks = le32_to_cpu(sb->raid_disks); in super_1_validate()
1556 mddev->dev_sectors = le64_to_cpu(sb->size); in super_1_validate()
1557 mddev->events = ev1; in super_1_validate()
1558 mddev->bitmap_info.offset = 0; in super_1_validate()
1559 mddev->bitmap_info.space = 0; in super_1_validate()
1563 mddev->bitmap_info.default_offset = 1024 >> 9; in super_1_validate()
1564 mddev->bitmap_info.default_space = (4096-1024) >> 9; in super_1_validate()
1565 mddev->reshape_backwards = 0; in super_1_validate()
1567 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); in super_1_validate()
1568 memcpy(mddev->uuid, sb->set_uuid, 16); in super_1_validate()
1570 mddev->max_disks = (4096-256)/2; in super_1_validate()
1573 mddev->bitmap_info.file == NULL) { in super_1_validate()
1574 mddev->bitmap_info.offset = in super_1_validate()
1581 if (mddev->minor_version > 0) in super_1_validate()
1582 mddev->bitmap_info.space = 0; in super_1_validate()
1583 else if (mddev->bitmap_info.offset > 0) in super_1_validate()
1584 mddev->bitmap_info.space = in super_1_validate()
1585 8 - mddev->bitmap_info.offset; in super_1_validate()
1587 mddev->bitmap_info.space = in super_1_validate()
1588 -mddev->bitmap_info.offset; in super_1_validate()
1592 mddev->reshape_position = le64_to_cpu(sb->reshape_position); in super_1_validate()
1593 mddev->delta_disks = le32_to_cpu(sb->delta_disks); in super_1_validate()
1594 mddev->new_level = le32_to_cpu(sb->new_level); in super_1_validate()
1595 mddev->new_layout = le32_to_cpu(sb->new_layout); in super_1_validate()
1596 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); in super_1_validate()
1597 if (mddev->delta_disks < 0 || in super_1_validate()
1598 (mddev->delta_disks == 0 && in super_1_validate()
1601 mddev->reshape_backwards = 1; in super_1_validate()
1603 mddev->reshape_position = MaxSector; in super_1_validate()
1604 mddev->delta_disks = 0; in super_1_validate()
1605 mddev->new_level = mddev->level; in super_1_validate()
1606 mddev->new_layout = mddev->layout; in super_1_validate()
1607 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_1_validate()
1610 } else if (mddev->pers == NULL) { in super_1_validate()
1618 if (ev1 < mddev->events) in super_1_validate()
1620 } else if (mddev->bitmap) { in super_1_validate()
1624 if (ev1 < mddev->bitmap->events_cleared) in super_1_validate()
1626 if (ev1 < mddev->events) in super_1_validate()
1629 if (ev1 < mddev->events) in super_1_validate()
1633 if (mddev->level != LEVEL_MULTIPATH) { in super_1_validate()
1656 if (mddev->recovery_cp == MaxSector) in super_1_validate()
1657 set_bit(MD_JOURNAL_CLEAN, &mddev->flags); in super_1_validate()
1678 set_bit(MD_HAS_JOURNAL, &mddev->flags); in super_1_validate()
1685 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) in super_1_sync() argument
1699 sb->utime = cpu_to_le64((__u64)mddev->utime); in super_1_sync()
1700 sb->events = cpu_to_le64(mddev->events); in super_1_sync()
1701 if (mddev->in_sync) in super_1_sync()
1702 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); in super_1_sync()
1703 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) in super_1_sync()
1710 sb->raid_disks = cpu_to_le32(mddev->raid_disks); in super_1_sync()
1711 sb->size = cpu_to_le64(mddev->dev_sectors); in super_1_sync()
1712 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); in super_1_sync()
1713 sb->level = cpu_to_le32(mddev->level); in super_1_sync()
1714 sb->layout = cpu_to_le32(mddev->layout); in super_1_sync()
1723 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { in super_1_sync()
1724 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); in super_1_sync()
1734 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) in super_1_sync()
1745 if (mddev->reshape_position != MaxSector) { in super_1_sync()
1747 sb->reshape_position = cpu_to_le64(mddev->reshape_position); in super_1_sync()
1748 sb->new_layout = cpu_to_le32(mddev->new_layout); in super_1_sync()
1749 sb->delta_disks = cpu_to_le32(mddev->delta_disks); in super_1_sync()
1750 sb->new_level = cpu_to_le32(mddev->new_level); in super_1_sync()
1751 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); in super_1_sync()
1752 if (mddev->delta_disks == 0 && in super_1_sync()
1753 mddev->reshape_backwards) in super_1_sync()
1764 if (mddev_is_clustered(mddev)) in super_1_sync()
1771 md_error(mddev, rdev); in super_1_sync()
1802 rdev_for_each(rdev2, mddev) in super_1_sync()
1819 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) in super_1_sync()
1822 rdev_for_each(rdev2, mddev) { in super_1_sync()
1844 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_1_rdev_size_change()
1854 } else if (rdev->mddev->bitmap_info.offset) { in super_1_rdev_size_change()
1871 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_1_rdev_size_change()
1873 md_super_wait(rdev->mddev); in super_1_rdev_size_change()
1889 if (rdev->mddev->minor_version == 0) in super_1_allow_new_offset()
1900 bitmap = rdev->mddev->bitmap; in super_1_allow_new_offset()
1901 if (bitmap && !rdev->mddev->bitmap_info.file && in super_1_allow_new_offset()
1902 rdev->sb_start + rdev->mddev->bitmap_info.offset + in super_1_allow_new_offset()
1932 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) in sync_super() argument
1934 if (mddev->sync_super) { in sync_super()
1935 mddev->sync_super(mddev, rdev); in sync_super()
1939 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); in sync_super()
1941 super_types[mddev->major_version].sync_super(mddev, rdev); in sync_super()
1944 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) in match_mddev_units()
1979 int md_integrity_register(struct mddev *mddev) in md_integrity_register() argument
1983 if (list_empty(&mddev->disks)) in md_integrity_register()
1985 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) in md_integrity_register()
1987 rdev_for_each(rdev, mddev) { in md_integrity_register()
2009 blk_integrity_register(mddev->gendisk, in md_integrity_register()
2012 printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev)); in md_integrity_register()
2013 if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) { in md_integrity_register()
2015 mdname(mddev)); in md_integrity_register()
2026 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_integrity_add_rdev() argument
2032 if (!mddev->gendisk) in md_integrity_add_rdev()
2036 bi_mddev = blk_get_integrity(mddev->gendisk); in md_integrity_add_rdev()
2041 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { in md_integrity_add_rdev()
2043 mdname(mddev), bdevname(rdev->bdev, name)); in md_integrity_add_rdev()
2051 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) in bind_rdev_to_array() argument
2058 if (find_rdev(mddev, rdev->bdev->bd_dev)) in bind_rdev_to_array()
2062 if (rdev->sectors && (mddev->dev_sectors == 0 || in bind_rdev_to_array()
2063 rdev->sectors < mddev->dev_sectors)) { in bind_rdev_to_array()
2064 if (mddev->pers) { in bind_rdev_to_array()
2069 if (mddev->level > 0) in bind_rdev_to_array()
2072 mddev->dev_sectors = rdev->sectors; in bind_rdev_to_array()
2082 if (mddev->pers) in bind_rdev_to_array()
2083 choice = mddev->raid_disks; in bind_rdev_to_array()
2084 while (md_find_rdev_nr_rcu(mddev, choice)) in bind_rdev_to_array()
2088 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { in bind_rdev_to_array()
2094 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { in bind_rdev_to_array()
2096 mdname(mddev), mddev->max_disks); in bind_rdev_to_array()
2102 rdev->mddev = mddev; in bind_rdev_to_array()
2105 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) in bind_rdev_to_array()
2113 list_add_rcu(&rdev->same_set, &mddev->disks); in bind_rdev_to_array()
2114 bd_link_disk_holder(rdev->bdev, mddev->gendisk); in bind_rdev_to_array()
2117 mddev->recovery_disabled++; in bind_rdev_to_array()
2123 b, mdname(mddev)); in bind_rdev_to_array()
2138 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); in unbind_rdev_from_array()
2141 rdev->mddev = NULL; in unbind_rdev_from_array()
2209 static void export_array(struct mddev *mddev) in export_array() argument
2213 while (!list_empty(&mddev->disks)) { in export_array()
2214 rdev = list_first_entry(&mddev->disks, struct md_rdev, in export_array()
2218 mddev->raid_disks = 0; in export_array()
2219 mddev->major_version = 0; in export_array()
2222 static void sync_sbs(struct mddev *mddev, int nospares) in sync_sbs() argument
2231 rdev_for_each(rdev, mddev) { in sync_sbs()
2232 if (rdev->sb_events == mddev->events || in sync_sbs()
2235 rdev->sb_events+1 == mddev->events)) { in sync_sbs()
2239 sync_super(mddev, rdev); in sync_sbs()
2245 static bool does_sb_need_changing(struct mddev *mddev) in does_sb_need_changing() argument
2252 rdev_for_each(rdev, mddev) in does_sb_need_changing()
2262 rdev_for_each(rdev, mddev) { in does_sb_need_changing()
2274 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || in does_sb_need_changing()
2275 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || in does_sb_need_changing()
2276 (mddev->layout != le64_to_cpu(sb->layout)) || in does_sb_need_changing()
2277 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || in does_sb_need_changing()
2278 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) in does_sb_need_changing()
2284 void md_update_sb(struct mddev *mddev, int force_change) in md_update_sb() argument
2292 if (mddev->ro) { in md_update_sb()
2294 set_bit(MD_CHANGE_DEVS, &mddev->flags); in md_update_sb()
2298 if (mddev_is_clustered(mddev)) { in md_update_sb()
2299 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) in md_update_sb()
2301 ret = md_cluster_ops->metadata_update_start(mddev); in md_update_sb()
2303 if (!does_sb_need_changing(mddev)) { in md_update_sb()
2305 md_cluster_ops->metadata_update_cancel(mddev); in md_update_sb()
2306 clear_bit(MD_CHANGE_PENDING, &mddev->flags); in md_update_sb()
2312 rdev_for_each(rdev, mddev) { in md_update_sb()
2314 mddev->delta_disks >= 0 && in md_update_sb()
2317 mddev->curr_resync_completed > rdev->recovery_offset) in md_update_sb()
2318 rdev->recovery_offset = mddev->curr_resync_completed; in md_update_sb()
2321 if (!mddev->persistent) { in md_update_sb()
2322 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); in md_update_sb()
2323 clear_bit(MD_CHANGE_DEVS, &mddev->flags); in md_update_sb()
2324 if (!mddev->external) { in md_update_sb()
2325 clear_bit(MD_CHANGE_PENDING, &mddev->flags); in md_update_sb()
2326 rdev_for_each(rdev, mddev) { in md_update_sb()
2330 md_error(mddev, rdev); in md_update_sb()
2337 wake_up(&mddev->sb_wait); in md_update_sb()
2341 spin_lock(&mddev->lock); in md_update_sb()
2343 mddev->utime = get_seconds(); in md_update_sb()
2345 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) in md_update_sb()
2347 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) in md_update_sb()
2355 if (mddev->degraded) in md_update_sb()
2367 sync_req = mddev->in_sync; in md_update_sb()
2372 && (mddev->in_sync && mddev->recovery_cp == MaxSector) in md_update_sb()
2373 && mddev->can_decrease_events in md_update_sb()
2374 && mddev->events != 1) { in md_update_sb()
2375 mddev->events--; in md_update_sb()
2376 mddev->can_decrease_events = 0; in md_update_sb()
2379 mddev->events ++; in md_update_sb()
2380 mddev->can_decrease_events = nospares; in md_update_sb()
2388 WARN_ON(mddev->events == 0); in md_update_sb()
2390 rdev_for_each(rdev, mddev) { in md_update_sb()
2397 sync_sbs(mddev, nospares); in md_update_sb()
2398 spin_unlock(&mddev->lock); in md_update_sb()
2401 mdname(mddev), mddev->in_sync); in md_update_sb()
2403 bitmap_update_sb(mddev->bitmap); in md_update_sb()
2404 rdev_for_each(rdev, mddev) { in md_update_sb()
2411 md_super_write(mddev,rdev, in md_update_sb()
2417 rdev->sb_events = mddev->events; in md_update_sb()
2419 md_super_write(mddev, rdev, in md_update_sb()
2430 if (mddev->level == LEVEL_MULTIPATH) in md_update_sb()
2434 md_super_wait(mddev); in md_update_sb()
2437 spin_lock(&mddev->lock); in md_update_sb()
2438 if (mddev->in_sync != sync_req || in md_update_sb()
2439 test_bit(MD_CHANGE_DEVS, &mddev->flags)) { in md_update_sb()
2441 spin_unlock(&mddev->lock); in md_update_sb()
2444 clear_bit(MD_CHANGE_PENDING, &mddev->flags); in md_update_sb()
2445 spin_unlock(&mddev->lock); in md_update_sb()
2446 wake_up(&mddev->sb_wait); in md_update_sb()
2447 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in md_update_sb()
2448 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in md_update_sb()
2450 rdev_for_each(rdev, mddev) { in md_update_sb()
2460 if (mddev_is_clustered(mddev) && ret == 0) in md_update_sb()
2461 md_cluster_ops->metadata_update_finish(mddev); in md_update_sb()
2467 struct mddev *mddev = rdev->mddev; in add_bound_rdev() local
2470 if (!mddev->pers->hot_remove_disk) { in add_bound_rdev()
2475 super_types[mddev->major_version]. in add_bound_rdev()
2476 validate_super(mddev, rdev); in add_bound_rdev()
2477 err = mddev->pers->hot_add_disk(mddev, rdev); in add_bound_rdev()
2486 set_bit(MD_CHANGE_DEVS, &mddev->flags); in add_bound_rdev()
2487 if (mddev->degraded) in add_bound_rdev()
2488 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in add_bound_rdev()
2489 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in add_bound_rdev()
2490 md_new_event(mddev); in add_bound_rdev()
2491 md_wakeup_thread(mddev->thread); in add_bound_rdev()
2590 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { in state_store()
2591 md_error(rdev->mddev, rdev); in state_store()
2600 struct mddev *mddev = rdev->mddev; in state_store() local
2602 if (mddev_is_clustered(mddev)) in state_store()
2603 err = md_cluster_ops->remove_disk(mddev, rdev); in state_store()
2607 if (mddev->pers) in state_store()
2608 md_update_sb(mddev, 1); in state_store()
2609 md_new_event(mddev); in state_store()
2627 md_error(rdev->mddev, rdev); in state_store()
2632 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
2633 md_wakeup_thread(rdev->mddev->thread); in state_store()
2641 if (rdev->mddev->pers == NULL) { in state_store()
2662 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
2663 md_wakeup_thread(rdev->mddev->thread); in state_store()
2676 if (rdev->mddev->pers) in state_store()
2684 if (rdev->mddev->pers) in state_store()
2698 if (!mddev_is_clustered(rdev->mddev) || in state_store()
2760 if (rdev->mddev->pers && slot == -1) { in slot_store()
2771 if (rdev->mddev->pers->hot_remove_disk == NULL) in slot_store()
2774 remove_and_add_spares(rdev->mddev, rdev); in slot_store()
2777 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in slot_store()
2778 md_wakeup_thread(rdev->mddev->thread); in slot_store()
2779 } else if (rdev->mddev->pers) { in slot_store()
2788 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) in slot_store()
2791 if (rdev->mddev->pers->hot_add_disk == NULL) in slot_store()
2794 if (slot >= rdev->mddev->raid_disks && in slot_store()
2795 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
2805 err = rdev->mddev->pers-> in slot_store()
2806 hot_add_disk(rdev->mddev, rdev); in slot_store()
2812 if (sysfs_link_rdev(rdev->mddev, rdev)) in slot_store()
2816 if (slot >= rdev->mddev->raid_disks && in slot_store()
2817 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
2844 if (rdev->mddev->pers && rdev->raid_disk >= 0) in offset_store()
2846 if (rdev->sectors && rdev->mddev->external) in offset_store()
2868 struct mddev *mddev = rdev->mddev; in new_offset_store() local
2873 if (mddev->sync_thread || in new_offset_store()
2874 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) in new_offset_store()
2882 + mddev->dev_sectors > rdev->sectors) in new_offset_store()
2891 mddev->reshape_backwards) in new_offset_store()
2898 !mddev->reshape_backwards) in new_offset_store()
2901 if (mddev->pers && mddev->persistent && in new_offset_store()
2902 !super_types[mddev->major_version] in new_offset_store()
2907 mddev->reshape_backwards = 1; in new_offset_store()
2909 mddev->reshape_backwards = 0; in new_offset_store()
2954 struct mddev *my_mddev = rdev->mddev; in rdev_size_store()
2988 struct mddev *mddev; in rdev_size_store() local
2993 for_each_mddev(mddev, tmp) { in rdev_size_store()
2996 rdev_for_each(rdev2, mddev) in rdev_size_store()
3006 mddev_put(mddev); in rdev_size_store()
3048 if (rdev->mddev->pers && in recovery_start_store()
3114 if (!rdev->mddev) in rdev_attr_show()
3126 struct mddev *mddev = rdev->mddev; in rdev_attr_store() local
3132 rv = mddev ? mddev_lock(mddev): -EBUSY; in rdev_attr_store()
3134 if (rdev->mddev == NULL) in rdev_attr_store()
3138 mddev_unlock(mddev); in rdev_attr_store()
3270 static void analyze_sbs(struct mddev *mddev) in analyze_sbs() argument
3277 rdev_for_each_safe(rdev, tmp, mddev) in analyze_sbs()
3278 switch (super_types[mddev->major_version]. in analyze_sbs()
3279 load_super(rdev, freshest, mddev->minor_version)) { in analyze_sbs()
3293 super_types[mddev->major_version]. in analyze_sbs()
3294 validate_super(mddev, freshest); in analyze_sbs()
3297 rdev_for_each_safe(rdev, tmp, mddev) { in analyze_sbs()
3298 if (mddev->max_disks && in analyze_sbs()
3299 (rdev->desc_nr >= mddev->max_disks || in analyze_sbs()
3300 i > mddev->max_disks)) { in analyze_sbs()
3303 mdname(mddev), bdevname(rdev->bdev, b), in analyze_sbs()
3304 mddev->max_disks); in analyze_sbs()
3309 if (super_types[mddev->major_version]. in analyze_sbs()
3310 validate_super(mddev, rdev)) { in analyze_sbs()
3318 if (mddev->level == LEVEL_MULTIPATH) { in analyze_sbs()
3323 (mddev->raid_disks - min(0, mddev->delta_disks)) && in analyze_sbs()
3372 safe_delay_show(struct mddev *mddev, char *page) in safe_delay_show() argument
3374 int msec = (mddev->safemode_delay*1000)/HZ; in safe_delay_show()
3378 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) in safe_delay_store() argument
3382 if (mddev_is_clustered(mddev)) { in safe_delay_store()
3390 mddev->safemode_delay = 0; in safe_delay_store()
3392 unsigned long old_delay = mddev->safemode_delay; in safe_delay_store()
3397 mddev->safemode_delay = new_delay; in safe_delay_store()
3399 mod_timer(&mddev->safemode_timer, jiffies+1); in safe_delay_store()
3407 level_show(struct mddev *mddev, char *page) in level_show() argument
3411 spin_lock(&mddev->lock); in level_show()
3412 p = mddev->pers; in level_show()
3415 else if (mddev->clevel[0]) in level_show()
3416 ret = sprintf(page, "%s\n", mddev->clevel); in level_show()
3417 else if (mddev->level != LEVEL_NONE) in level_show()
3418 ret = sprintf(page, "%d\n", mddev->level); in level_show()
3421 spin_unlock(&mddev->lock); in level_show()
3426 level_store(struct mddev *mddev, const char *buf, size_t len) in level_store() argument
3439 rv = mddev_lock(mddev); in level_store()
3443 if (mddev->pers == NULL) { in level_store()
3444 strncpy(mddev->clevel, buf, slen); in level_store()
3445 if (mddev->clevel[slen-1] == '\n') in level_store()
3447 mddev->clevel[slen] = 0; in level_store()
3448 mddev->level = LEVEL_NONE; in level_store()
3453 if (mddev->ro) in level_store()
3463 if (mddev->sync_thread || in level_store()
3464 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in level_store()
3465 mddev->reshape_position != MaxSector || in level_store()
3466 mddev->sysfs_active) in level_store()
3470 if (!mddev->pers->quiesce) { in level_store()
3472 mdname(mddev), mddev->pers->name); in level_store()
3496 if (pers == mddev->pers) { in level_store()
3505 mdname(mddev), clevel); in level_store()
3510 rdev_for_each(rdev, mddev) in level_store()
3516 priv = pers->takeover(mddev); in level_store()
3518 mddev->new_level = mddev->level; in level_store()
3519 mddev->new_layout = mddev->layout; in level_store()
3520 mddev->new_chunk_sectors = mddev->chunk_sectors; in level_store()
3521 mddev->raid_disks -= mddev->delta_disks; in level_store()
3522 mddev->delta_disks = 0; in level_store()
3523 mddev->reshape_backwards = 0; in level_store()
3526 mdname(mddev), clevel); in level_store()
3532 mddev_suspend(mddev); in level_store()
3533 mddev_detach(mddev); in level_store()
3535 spin_lock(&mddev->lock); in level_store()
3536 oldpers = mddev->pers; in level_store()
3537 oldpriv = mddev->private; in level_store()
3538 mddev->pers = pers; in level_store()
3539 mddev->private = priv; in level_store()
3540 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); in level_store()
3541 mddev->level = mddev->new_level; in level_store()
3542 mddev->layout = mddev->new_layout; in level_store()
3543 mddev->chunk_sectors = mddev->new_chunk_sectors; in level_store()
3544 mddev->delta_disks = 0; in level_store()
3545 mddev->reshape_backwards = 0; in level_store()
3546 mddev->degraded = 0; in level_store()
3547 spin_unlock(&mddev->lock); in level_store()
3550 mddev->external) { in level_store()
3558 mddev->in_sync = 0; in level_store()
3559 mddev->safemode_delay = 0; in level_store()
3560 mddev->safemode = 0; in level_store()
3563 oldpers->free(mddev, oldpriv); in level_store()
3568 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) in level_store()
3571 mdname(mddev)); in level_store()
3572 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); in level_store()
3577 if (mddev->to_remove == NULL) in level_store()
3578 mddev->to_remove = &md_redundancy_group; in level_store()
3581 rdev_for_each(rdev, mddev) { in level_store()
3584 if (rdev->new_raid_disk >= mddev->raid_disks) in level_store()
3588 sysfs_unlink_rdev(mddev, rdev); in level_store()
3590 rdev_for_each(rdev, mddev) { in level_store()
3599 if (sysfs_link_rdev(mddev, rdev)) in level_store()
3602 rdev->raid_disk, mdname(mddev)); in level_store()
3610 mddev->in_sync = 1; in level_store()
3611 del_timer_sync(&mddev->safemode_timer); in level_store()
3613 blk_set_stacking_limits(&mddev->queue->limits); in level_store()
3614 pers->run(mddev); in level_store()
3615 set_bit(MD_CHANGE_DEVS, &mddev->flags); in level_store()
3616 mddev_resume(mddev); in level_store()
3617 if (!mddev->thread) in level_store()
3618 md_update_sb(mddev, 1); in level_store()
3619 sysfs_notify(&mddev->kobj, NULL, "level"); in level_store()
3620 md_new_event(mddev); in level_store()
3623 mddev_unlock(mddev); in level_store()
3631 layout_show(struct mddev *mddev, char *page) in layout_show() argument
3634 if (mddev->reshape_position != MaxSector && in layout_show()
3635 mddev->layout != mddev->new_layout) in layout_show()
3637 mddev->new_layout, mddev->layout); in layout_show()
3638 return sprintf(page, "%d\n", mddev->layout); in layout_show()
3642 layout_store(struct mddev *mddev, const char *buf, size_t len) in layout_store() argument
3650 err = mddev_lock(mddev); in layout_store()
3654 if (mddev->pers) { in layout_store()
3655 if (mddev->pers->check_reshape == NULL) in layout_store()
3657 else if (mddev->ro) in layout_store()
3660 mddev->new_layout = n; in layout_store()
3661 err = mddev->pers->check_reshape(mddev); in layout_store()
3663 mddev->new_layout = mddev->layout; in layout_store()
3666 mddev->new_layout = n; in layout_store()
3667 if (mddev->reshape_position == MaxSector) in layout_store()
3668 mddev->layout = n; in layout_store()
3670 mddev_unlock(mddev); in layout_store()
3677 raid_disks_show(struct mddev *mddev, char *page) in raid_disks_show() argument
3679 if (mddev->raid_disks == 0) in raid_disks_show()
3681 if (mddev->reshape_position != MaxSector && in raid_disks_show()
3682 mddev->delta_disks != 0) in raid_disks_show()
3683 return sprintf(page, "%d (%d)\n", mddev->raid_disks, in raid_disks_show()
3684 mddev->raid_disks - mddev->delta_disks); in raid_disks_show()
3685 return sprintf(page, "%d\n", mddev->raid_disks); in raid_disks_show()
3688 static int update_raid_disks(struct mddev *mddev, int raid_disks);
3691 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) in raid_disks_store() argument
3700 err = mddev_lock(mddev); in raid_disks_store()
3703 if (mddev->pers) in raid_disks_store()
3704 err = update_raid_disks(mddev, n); in raid_disks_store()
3705 else if (mddev->reshape_position != MaxSector) { in raid_disks_store()
3707 int olddisks = mddev->raid_disks - mddev->delta_disks; in raid_disks_store()
3710 rdev_for_each(rdev, mddev) { in raid_disks_store()
3719 mddev->delta_disks = n - olddisks; in raid_disks_store()
3720 mddev->raid_disks = n; in raid_disks_store()
3721 mddev->reshape_backwards = (mddev->delta_disks < 0); in raid_disks_store()
3723 mddev->raid_disks = n; in raid_disks_store()
3725 mddev_unlock(mddev); in raid_disks_store()
3732 chunk_size_show(struct mddev *mddev, char *page) in chunk_size_show() argument
3734 if (mddev->reshape_position != MaxSector && in chunk_size_show()
3735 mddev->chunk_sectors != mddev->new_chunk_sectors) in chunk_size_show()
3737 mddev->new_chunk_sectors << 9, in chunk_size_show()
3738 mddev->chunk_sectors << 9); in chunk_size_show()
3739 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); in chunk_size_show()
3743 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) in chunk_size_store() argument
3752 err = mddev_lock(mddev); in chunk_size_store()
3755 if (mddev->pers) { in chunk_size_store()
3756 if (mddev->pers->check_reshape == NULL) in chunk_size_store()
3758 else if (mddev->ro) in chunk_size_store()
3761 mddev->new_chunk_sectors = n >> 9; in chunk_size_store()
3762 err = mddev->pers->check_reshape(mddev); in chunk_size_store()
3764 mddev->new_chunk_sectors = mddev->chunk_sectors; in chunk_size_store()
3767 mddev->new_chunk_sectors = n >> 9; in chunk_size_store()
3768 if (mddev->reshape_position == MaxSector) in chunk_size_store()
3769 mddev->chunk_sectors = n >> 9; in chunk_size_store()
3771 mddev_unlock(mddev); in chunk_size_store()
3778 resync_start_show(struct mddev *mddev, char *page) in resync_start_show() argument
3780 if (mddev->recovery_cp == MaxSector) in resync_start_show()
3782 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); in resync_start_show()
3786 resync_start_store(struct mddev *mddev, const char *buf, size_t len) in resync_start_store() argument
3801 err = mddev_lock(mddev); in resync_start_store()
3804 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) in resync_start_store()
3808 mddev->recovery_cp = n; in resync_start_store()
3809 if (mddev->pers) in resync_start_store()
3810 set_bit(MD_CHANGE_CLEAN, &mddev->flags); in resync_start_store()
3812 mddev_unlock(mddev); in resync_start_store()
3871 array_state_show(struct mddev *mddev, char *page) in array_state_show() argument
3875 if (mddev->pers) in array_state_show()
3876 switch(mddev->ro) { in array_state_show()
3884 if (mddev->in_sync) in array_state_show()
3886 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) in array_state_show()
3888 else if (mddev->safemode) in array_state_show()
3894 if (list_empty(&mddev->disks) && in array_state_show()
3895 mddev->raid_disks == 0 && in array_state_show()
3896 mddev->dev_sectors == 0) in array_state_show()
3904 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
3905 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
3906 static int do_md_run(struct mddev *mddev);
3907 static int restart_array(struct mddev *mddev);
3910 array_state_store(struct mddev *mddev, const char *buf, size_t len) in array_state_store() argument
3915 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) { in array_state_store()
3919 spin_lock(&mddev->lock); in array_state_store()
3921 restart_array(mddev); in array_state_store()
3922 clear_bit(MD_CHANGE_PENDING, &mddev->flags); in array_state_store()
3923 wake_up(&mddev->sb_wait); in array_state_store()
3926 restart_array(mddev); in array_state_store()
3927 if (atomic_read(&mddev->writes_pending) == 0) { in array_state_store()
3928 if (mddev->in_sync == 0) { in array_state_store()
3929 mddev->in_sync = 1; in array_state_store()
3930 if (mddev->safemode == 1) in array_state_store()
3931 mddev->safemode = 0; in array_state_store()
3932 set_bit(MD_CHANGE_CLEAN, &mddev->flags); in array_state_store()
3938 spin_unlock(&mddev->lock); in array_state_store()
3941 err = mddev_lock(mddev); in array_state_store()
3950 err = do_md_stop(mddev, 0, NULL); in array_state_store()
3954 if (mddev->pers) in array_state_store()
3955 err = do_md_stop(mddev, 2, NULL); in array_state_store()
3962 if (mddev->pers) in array_state_store()
3963 err = md_set_readonly(mddev, NULL); in array_state_store()
3965 mddev->ro = 1; in array_state_store()
3966 set_disk_ro(mddev->gendisk, 1); in array_state_store()
3967 err = do_md_run(mddev); in array_state_store()
3971 if (mddev->pers) { in array_state_store()
3972 if (mddev->ro == 0) in array_state_store()
3973 err = md_set_readonly(mddev, NULL); in array_state_store()
3974 else if (mddev->ro == 1) in array_state_store()
3975 err = restart_array(mddev); in array_state_store()
3977 mddev->ro = 2; in array_state_store()
3978 set_disk_ro(mddev->gendisk, 0); in array_state_store()
3981 mddev->ro = 2; in array_state_store()
3982 err = do_md_run(mddev); in array_state_store()
3986 if (mddev->pers) { in array_state_store()
3987 err = restart_array(mddev); in array_state_store()
3990 spin_lock(&mddev->lock); in array_state_store()
3991 if (atomic_read(&mddev->writes_pending) == 0) { in array_state_store()
3992 if (mddev->in_sync == 0) { in array_state_store()
3993 mddev->in_sync = 1; in array_state_store()
3994 if (mddev->safemode == 1) in array_state_store()
3995 mddev->safemode = 0; in array_state_store()
3996 set_bit(MD_CHANGE_CLEAN, &mddev->flags); in array_state_store()
4001 spin_unlock(&mddev->lock); in array_state_store()
4006 if (mddev->pers) { in array_state_store()
4007 err = restart_array(mddev); in array_state_store()
4010 clear_bit(MD_CHANGE_PENDING, &mddev->flags); in array_state_store()
4011 wake_up(&mddev->sb_wait); in array_state_store()
4014 mddev->ro = 0; in array_state_store()
4015 set_disk_ro(mddev->gendisk, 0); in array_state_store()
4016 err = do_md_run(mddev); in array_state_store()
4026 if (mddev->hold_active == UNTIL_IOCTL) in array_state_store()
4027 mddev->hold_active = 0; in array_state_store()
4028 sysfs_notify_dirent_safe(mddev->sysfs_state); in array_state_store()
4030 mddev_unlock(mddev); in array_state_store()
4037 max_corrected_read_errors_show(struct mddev *mddev, char *page) { in max_corrected_read_errors_show() argument
4039 atomic_read(&mddev->max_corr_read_errors)); in max_corrected_read_errors_show()
4043 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) in max_corrected_read_errors_store() argument
4051 atomic_set(&mddev->max_corr_read_errors, n); in max_corrected_read_errors_store()
4060 null_show(struct mddev *mddev, char *page) in null_show() argument
4066 new_dev_store(struct mddev *mddev, const char *buf, size_t len) in new_dev_store() argument
4094 err = mddev_lock(mddev); in new_dev_store()
4097 if (mddev->persistent) { in new_dev_store()
4098 rdev = md_import_device(dev, mddev->major_version, in new_dev_store()
4099 mddev->minor_version); in new_dev_store()
4100 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { in new_dev_store()
4102 = list_entry(mddev->disks.next, in new_dev_store()
4104 err = super_types[mddev->major_version] in new_dev_store()
4105 .load_super(rdev, rdev0, mddev->minor_version); in new_dev_store()
4109 } else if (mddev->external) in new_dev_store()
4115 mddev_unlock(mddev); in new_dev_store()
4118 err = bind_rdev_to_array(rdev, mddev); in new_dev_store()
4122 mddev_unlock(mddev); in new_dev_store()
4130 bitmap_store(struct mddev *mddev, const char *buf, size_t len) in bitmap_store() argument
4136 err = mddev_lock(mddev); in bitmap_store()
4139 if (!mddev->bitmap) in bitmap_store()
4151 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); in bitmap_store()
4154 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ in bitmap_store()
4156 mddev_unlock(mddev); in bitmap_store()
4164 size_show(struct mddev *mddev, char *page) in size_show() argument
4167 (unsigned long long)mddev->dev_sectors / 2); in size_show()
4170 static int update_size(struct mddev *mddev, sector_t num_sectors);
4173 size_store(struct mddev *mddev, const char *buf, size_t len) in size_store() argument
4184 err = mddev_lock(mddev); in size_store()
4187 if (mddev->pers) { in size_store()
4188 err = update_size(mddev, sectors); in size_store()
4189 md_update_sb(mddev, 1); in size_store()
4191 if (mddev->dev_sectors == 0 || in size_store()
4192 mddev->dev_sectors > sectors) in size_store()
4193 mddev->dev_sectors = sectors; in size_store()
4197 mddev_unlock(mddev); in size_store()
4211 metadata_show(struct mddev *mddev, char *page) in metadata_show() argument
4213 if (mddev->persistent) in metadata_show()
4215 mddev->major_version, mddev->minor_version); in metadata_show()
4216 else if (mddev->external) in metadata_show()
4217 return sprintf(page, "external:%s\n", mddev->metadata_type); in metadata_show()
4223 metadata_store(struct mddev *mddev, const char *buf, size_t len) in metadata_store() argument
4233 err = mddev_lock(mddev); in metadata_store()
4237 if (mddev->external && strncmp(buf, "external:", 9) == 0) in metadata_store()
4239 else if (!list_empty(&mddev->disks)) in metadata_store()
4244 mddev->persistent = 0; in metadata_store()
4245 mddev->external = 0; in metadata_store()
4246 mddev->major_version = 0; in metadata_store()
4247 mddev->minor_version = 90; in metadata_store()
4252 if (namelen >= sizeof(mddev->metadata_type)) in metadata_store()
4253 namelen = sizeof(mddev->metadata_type)-1; in metadata_store()
4254 strncpy(mddev->metadata_type, buf+9, namelen); in metadata_store()
4255 mddev->metadata_type[namelen] = 0; in metadata_store()
4256 if (namelen && mddev->metadata_type[namelen-1] == '\n') in metadata_store()
4257 mddev->metadata_type[--namelen] = 0; in metadata_store()
4258 mddev->persistent = 0; in metadata_store()
4259 mddev->external = 1; in metadata_store()
4260 mddev->major_version = 0; in metadata_store()
4261 mddev->minor_version = 90; in metadata_store()
4275 mddev->major_version = major; in metadata_store()
4276 mddev->minor_version = minor; in metadata_store()
4277 mddev->persistent = 1; in metadata_store()
4278 mddev->external = 0; in metadata_store()
4281 mddev_unlock(mddev); in metadata_store()
4289 action_show(struct mddev *mddev, char *page) in action_show() argument
4292 unsigned long recovery = mddev->recovery; in action_show()
4296 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) { in action_show()
4308 else if (mddev->reshape_position != MaxSector) in action_show()
4315 action_store(struct mddev *mddev, const char *page, size_t len) in action_store() argument
4317 if (!mddev->pers || !mddev->pers->sync_request) in action_store()
4323 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4325 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4326 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in action_store()
4327 mddev_lock(mddev) == 0) { in action_store()
4329 if (mddev->sync_thread) { in action_store()
4330 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in action_store()
4331 md_reap_sync_thread(mddev); in action_store()
4333 mddev_unlock(mddev); in action_store()
4335 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in action_store()
4338 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4340 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4341 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in action_store()
4344 if (mddev->pers->start_reshape == NULL) in action_store()
4346 err = mddev_lock(mddev); in action_store()
4348 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in action_store()
4351 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4352 err = mddev->pers->start_reshape(mddev); in action_store()
4354 mddev_unlock(mddev); in action_store()
4358 sysfs_notify(&mddev->kobj, NULL, "degraded"); in action_store()
4361 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); in action_store()
4364 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4365 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in action_store()
4366 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in action_store()
4368 if (mddev->ro == 2) { in action_store()
4372 mddev->ro = 0; in action_store()
4373 md_wakeup_thread(mddev->sync_thread); in action_store()
4375 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in action_store()
4376 md_wakeup_thread(mddev->thread); in action_store()
4377 sysfs_notify_dirent_safe(mddev->sysfs_action); in action_store()
4385 last_sync_action_show(struct mddev *mddev, char *page) in last_sync_action_show() argument
4387 return sprintf(page, "%s\n", mddev->last_sync_action); in last_sync_action_show()
4393 mismatch_cnt_show(struct mddev *mddev, char *page) in mismatch_cnt_show() argument
4397 atomic64_read(&mddev->resync_mismatches)); in mismatch_cnt_show()
4403 sync_min_show(struct mddev *mddev, char *page) in sync_min_show() argument
4405 return sprintf(page, "%d (%s)\n", speed_min(mddev), in sync_min_show()
4406 mddev->sync_speed_min ? "local": "system"); in sync_min_show()
4410 sync_min_store(struct mddev *mddev, const char *buf, size_t len) in sync_min_store() argument
4424 mddev->sync_speed_min = min; in sync_min_store()
4432 sync_max_show(struct mddev *mddev, char *page) in sync_max_show() argument
4434 return sprintf(page, "%d (%s)\n", speed_max(mddev), in sync_max_show()
4435 mddev->sync_speed_max ? "local": "system"); in sync_max_show()
4439 sync_max_store(struct mddev *mddev, const char *buf, size_t len) in sync_max_store() argument
4453 mddev->sync_speed_max = max; in sync_max_store()
4461 degraded_show(struct mddev *mddev, char *page) in degraded_show() argument
4463 return sprintf(page, "%d\n", mddev->degraded); in degraded_show()
4468 sync_force_parallel_show(struct mddev *mddev, char *page) in sync_force_parallel_show() argument
4470 return sprintf(page, "%d\n", mddev->parallel_resync); in sync_force_parallel_show()
4474 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) in sync_force_parallel_store() argument
4484 mddev->parallel_resync = n; in sync_force_parallel_store()
4486 if (mddev->sync_thread) in sync_force_parallel_store()
4498 sync_speed_show(struct mddev *mddev, char *page) in sync_speed_show() argument
4501 if (mddev->curr_resync == 0) in sync_speed_show()
4503 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); in sync_speed_show()
4504 dt = (jiffies - mddev->resync_mark) / HZ; in sync_speed_show()
4506 db = resync - mddev->resync_mark_cnt; in sync_speed_show()
4513 sync_completed_show(struct mddev *mddev, char *page) in sync_completed_show() argument
4517 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in sync_completed_show()
4520 if (mddev->curr_resync == 1 || in sync_completed_show()
4521 mddev->curr_resync == 2) in sync_completed_show()
4524 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in sync_completed_show()
4525 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in sync_completed_show()
4526 max_sectors = mddev->resync_max_sectors; in sync_completed_show()
4528 max_sectors = mddev->dev_sectors; in sync_completed_show()
4530 resync = mddev->curr_resync_completed; in sync_completed_show()
4538 min_sync_show(struct mddev *mddev, char *page) in min_sync_show() argument
4541 (unsigned long long)mddev->resync_min); in min_sync_show()
4544 min_sync_store(struct mddev *mddev, const char *buf, size_t len) in min_sync_store() argument
4552 spin_lock(&mddev->lock); in min_sync_store()
4554 if (min > mddev->resync_max) in min_sync_store()
4558 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in min_sync_store()
4562 mddev->resync_min = round_down(min, 8); in min_sync_store()
4566 spin_unlock(&mddev->lock); in min_sync_store()
4574 max_sync_show(struct mddev *mddev, char *page) in max_sync_show() argument
4576 if (mddev->resync_max == MaxSector) in max_sync_show()
4580 (unsigned long long)mddev->resync_max); in max_sync_show()
4583 max_sync_store(struct mddev *mddev, const char *buf, size_t len) in max_sync_store() argument
4586 spin_lock(&mddev->lock); in max_sync_store()
4588 mddev->resync_max = MaxSector; in max_sync_store()
4596 if (max < mddev->resync_min) in max_sync_store()
4600 if (max < mddev->resync_max && in max_sync_store()
4601 mddev->ro == 0 && in max_sync_store()
4602 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in max_sync_store()
4606 chunk = mddev->chunk_sectors; in max_sync_store()
4614 mddev->resync_max = max; in max_sync_store()
4616 wake_up(&mddev->recovery_wait); in max_sync_store()
4619 spin_unlock(&mddev->lock); in max_sync_store()
4627 suspend_lo_show(struct mddev *mddev, char *page) in suspend_lo_show() argument
4629 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); in suspend_lo_show()
4633 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) in suspend_lo_store() argument
4644 err = mddev_lock(mddev); in suspend_lo_store()
4648 if (mddev->pers == NULL || in suspend_lo_store()
4649 mddev->pers->quiesce == NULL) in suspend_lo_store()
4651 old = mddev->suspend_lo; in suspend_lo_store()
4652 mddev->suspend_lo = new; in suspend_lo_store()
4655 mddev->pers->quiesce(mddev, 2); in suspend_lo_store()
4658 mddev->pers->quiesce(mddev, 1); in suspend_lo_store()
4659 mddev->pers->quiesce(mddev, 0); in suspend_lo_store()
4663 mddev_unlock(mddev); in suspend_lo_store()
4670 suspend_hi_show(struct mddev *mddev, char *page) in suspend_hi_show() argument
4672 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); in suspend_hi_show()
4676 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) in suspend_hi_store() argument
4687 err = mddev_lock(mddev); in suspend_hi_store()
4691 if (mddev->pers == NULL || in suspend_hi_store()
4692 mddev->pers->quiesce == NULL) in suspend_hi_store()
4694 old = mddev->suspend_hi; in suspend_hi_store()
4695 mddev->suspend_hi = new; in suspend_hi_store()
4698 mddev->pers->quiesce(mddev, 2); in suspend_hi_store()
4701 mddev->pers->quiesce(mddev, 1); in suspend_hi_store()
4702 mddev->pers->quiesce(mddev, 0); in suspend_hi_store()
4706 mddev_unlock(mddev); in suspend_hi_store()
4713 reshape_position_show(struct mddev *mddev, char *page) in reshape_position_show() argument
4715 if (mddev->reshape_position != MaxSector) in reshape_position_show()
4717 (unsigned long long)mddev->reshape_position); in reshape_position_show()
4723 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) in reshape_position_store() argument
4734 err = mddev_lock(mddev); in reshape_position_store()
4738 if (mddev->pers) in reshape_position_store()
4740 mddev->reshape_position = new; in reshape_position_store()
4741 mddev->delta_disks = 0; in reshape_position_store()
4742 mddev->reshape_backwards = 0; in reshape_position_store()
4743 mddev->new_level = mddev->level; in reshape_position_store()
4744 mddev->new_layout = mddev->layout; in reshape_position_store()
4745 mddev->new_chunk_sectors = mddev->chunk_sectors; in reshape_position_store()
4746 rdev_for_each(rdev, mddev) in reshape_position_store()
4750 mddev_unlock(mddev); in reshape_position_store()
4759 reshape_direction_show(struct mddev *mddev, char *page) in reshape_direction_show() argument
4762 mddev->reshape_backwards ? "backwards" : "forwards"); in reshape_direction_show()
4766 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) in reshape_direction_store() argument
4777 if (mddev->reshape_backwards == backwards) in reshape_direction_store()
4780 err = mddev_lock(mddev); in reshape_direction_store()
4784 if (mddev->delta_disks) in reshape_direction_store()
4786 else if (mddev->persistent && in reshape_direction_store()
4787 mddev->major_version == 0) in reshape_direction_store()
4790 mddev->reshape_backwards = backwards; in reshape_direction_store()
4791 mddev_unlock(mddev); in reshape_direction_store()
4800 array_size_show(struct mddev *mddev, char *page) in array_size_show() argument
4802 if (mddev->external_size) in array_size_show()
4804 (unsigned long long)mddev->array_sectors/2); in array_size_show()
4810 array_size_store(struct mddev *mddev, const char *buf, size_t len) in array_size_store() argument
4815 err = mddev_lock(mddev); in array_size_store()
4820 if (mddev->pers) in array_size_store()
4821 sectors = mddev->pers->size(mddev, 0, 0); in array_size_store()
4823 sectors = mddev->array_sectors; in array_size_store()
4825 mddev->external_size = 0; in array_size_store()
4829 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) in array_size_store()
4832 mddev->external_size = 1; in array_size_store()
4836 mddev->array_sectors = sectors; in array_size_store()
4837 if (mddev->pers) { in array_size_store()
4838 set_capacity(mddev->gendisk, mddev->array_sectors); in array_size_store()
4839 revalidate_disk(mddev->gendisk); in array_size_store()
4842 mddev_unlock(mddev); in array_size_store()
4894 struct mddev *mddev = container_of(kobj, struct mddev, kobj); in md_attr_show() local
4900 if (list_empty(&mddev->all_mddevs)) { in md_attr_show()
4904 mddev_get(mddev); in md_attr_show()
4907 rv = entry->show(mddev, page); in md_attr_show()
4908 mddev_put(mddev); in md_attr_show()
4917 struct mddev *mddev = container_of(kobj, struct mddev, kobj); in md_attr_store() local
4925 if (list_empty(&mddev->all_mddevs)) { in md_attr_store()
4929 mddev_get(mddev); in md_attr_store()
4931 rv = entry->store(mddev, page, length); in md_attr_store()
4932 mddev_put(mddev); in md_attr_store()
4938 struct mddev *mddev = container_of(ko, struct mddev, kobj); in md_free() local
4940 if (mddev->sysfs_state) in md_free()
4941 sysfs_put(mddev->sysfs_state); in md_free()
4943 if (mddev->queue) in md_free()
4944 blk_cleanup_queue(mddev->queue); in md_free()
4945 if (mddev->gendisk) { in md_free()
4946 del_gendisk(mddev->gendisk); in md_free()
4947 put_disk(mddev->gendisk); in md_free()
4950 kfree(mddev); in md_free()
4967 struct mddev *mddev = container_of(ws, struct mddev, del_work); in mddev_delayed_delete() local
4969 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); in mddev_delayed_delete()
4970 kobject_del(&mddev->kobj); in mddev_delayed_delete()
4971 kobject_put(&mddev->kobj); in mddev_delayed_delete()
4977 struct mddev *mddev = mddev_find(dev); in md_alloc() local
4984 if (!mddev) in md_alloc()
4987 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); in md_alloc()
4989 unit = MINOR(mddev->unit) >> shift; in md_alloc()
4998 if (mddev->gendisk) in md_alloc()
5004 struct mddev *mddev2; in md_alloc()
5017 mddev->queue = blk_alloc_queue(GFP_KERNEL); in md_alloc()
5018 if (!mddev->queue) in md_alloc()
5020 mddev->queue->queuedata = mddev; in md_alloc()
5022 blk_queue_make_request(mddev->queue, md_make_request); in md_alloc()
5023 blk_set_stacking_limits(&mddev->queue->limits); in md_alloc()
5027 blk_cleanup_queue(mddev->queue); in md_alloc()
5028 mddev->queue = NULL; in md_alloc()
5031 disk->major = MAJOR(mddev->unit); in md_alloc()
5040 disk->private_data = mddev; in md_alloc()
5041 disk->queue = mddev->queue; in md_alloc()
5042 blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA); in md_alloc()
5048 mddev->gendisk = disk; in md_alloc()
5052 mutex_lock(&mddev->open_mutex); in md_alloc()
5055 error = kobject_init_and_add(&mddev->kobj, &md_ktype, in md_alloc()
5065 if (mddev->kobj.sd && in md_alloc()
5066 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) in md_alloc()
5068 mutex_unlock(&mddev->open_mutex); in md_alloc()
5071 if (!error && mddev->kobj.sd) { in md_alloc()
5072 kobject_uevent(&mddev->kobj, KOBJ_ADD); in md_alloc()
5073 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); in md_alloc()
5075 mddev_put(mddev); in md_alloc()
5106 struct mddev *mddev = (struct mddev *) data; in md_safemode_timeout() local
5108 if (!atomic_read(&mddev->writes_pending)) { in md_safemode_timeout()
5109 mddev->safemode = 1; in md_safemode_timeout()
5110 if (mddev->external) in md_safemode_timeout()
5111 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_safemode_timeout()
5113 md_wakeup_thread(mddev->thread); in md_safemode_timeout()
5118 int md_run(struct mddev *mddev) in md_run() argument
5124 if (list_empty(&mddev->disks)) in md_run()
5128 if (mddev->pers) in md_run()
5131 if (mddev->sysfs_active) in md_run()
5137 if (!mddev->raid_disks) { in md_run()
5138 if (!mddev->persistent) in md_run()
5140 analyze_sbs(mddev); in md_run()
5143 if (mddev->level != LEVEL_NONE) in md_run()
5144 request_module("md-level-%d", mddev->level); in md_run()
5145 else if (mddev->clevel[0]) in md_run()
5146 request_module("md-%s", mddev->clevel); in md_run()
5153 rdev_for_each(rdev, mddev) { in md_run()
5166 if (mddev->dev_sectors && in md_run()
5167 rdev->data_offset + mddev->dev_sectors in md_run()
5170 mdname(mddev)); in md_run()
5177 mdname(mddev)); in md_run()
5184 if (mddev->bio_set == NULL) in md_run()
5185 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0); in md_run()
5188 pers = find_pers(mddev->level, mddev->clevel); in md_run()
5191 if (mddev->level != LEVEL_NONE) in md_run()
5193 mddev->level); in md_run()
5196 mddev->clevel); in md_run()
5200 if (mddev->level != pers->level) { in md_run()
5201 mddev->level = pers->level; in md_run()
5202 mddev->new_level = pers->level; in md_run()
5204 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); in md_run()
5206 if (mddev->reshape_position != MaxSector && in md_run()
5221 rdev_for_each(rdev, mddev) in md_run()
5222 rdev_for_each(rdev2, mddev) { in md_run()
5230 mdname(mddev), in md_run()
5243 mddev->recovery = 0; in md_run()
5245 mddev->resync_max_sectors = mddev->dev_sectors; in md_run()
5247 mddev->ok_start_degraded = start_dirty_degraded; in md_run()
5249 if (start_readonly && mddev->ro == 0) in md_run()
5250 mddev->ro = 2; /* read-only, but switch on first write */ in md_run()
5252 err = pers->run(mddev); in md_run()
5255 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { in md_run()
5256 WARN_ONCE(!mddev->external_size, "%s: default size too small," in md_run()
5260 (unsigned long long)mddev->array_sectors / 2, in md_run()
5261 (unsigned long long)pers->size(mddev, 0, 0) / 2); in md_run()
5265 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { in md_run()
5268 bitmap = bitmap_create(mddev, -1); in md_run()
5272 mdname(mddev), err); in md_run()
5274 mddev->bitmap = bitmap; in md_run()
5278 mddev_detach(mddev); in md_run()
5279 if (mddev->private) in md_run()
5280 pers->free(mddev, mddev->private); in md_run()
5281 mddev->private = NULL; in md_run()
5283 bitmap_destroy(mddev); in md_run()
5286 if (mddev->queue) { in md_run()
5287 mddev->queue->backing_dev_info.congested_data = mddev; in md_run()
5288 mddev->queue->backing_dev_info.congested_fn = md_congested; in md_run()
5291 if (mddev->kobj.sd && in md_run()
5292 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) in md_run()
5295 mdname(mddev)); in md_run()
5296 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); in md_run()
5297 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ in md_run()
5298 mddev->ro = 0; in md_run()
5300 atomic_set(&mddev->writes_pending,0); in md_run()
5301 atomic_set(&mddev->max_corr_read_errors, in md_run()
5303 mddev->safemode = 0; in md_run()
5304 if (mddev_is_clustered(mddev)) in md_run()
5305 mddev->safemode_delay = 0; in md_run()
5307 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ in md_run()
5308 mddev->in_sync = 1; in md_run()
5310 spin_lock(&mddev->lock); in md_run()
5311 mddev->pers = pers; in md_run()
5312 mddev->ready = 1; in md_run()
5313 spin_unlock(&mddev->lock); in md_run()
5314 rdev_for_each(rdev, mddev) in md_run()
5316 if (sysfs_link_rdev(mddev, rdev)) in md_run()
5319 if (mddev->degraded && !mddev->ro) in md_run()
5323 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_run()
5324 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_run()
5326 if (mddev->flags & MD_UPDATE_SB_FLAGS) in md_run()
5327 md_update_sb(mddev, 0); in md_run()
5329 md_new_event(mddev); in md_run()
5330 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_run()
5331 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_run()
5332 sysfs_notify(&mddev->kobj, NULL, "degraded"); in md_run()
5337 static int do_md_run(struct mddev *mddev) in do_md_run() argument
5341 err = md_run(mddev); in do_md_run()
5344 err = bitmap_load(mddev); in do_md_run()
5346 bitmap_destroy(mddev); in do_md_run()
5350 if (mddev_is_clustered(mddev)) in do_md_run()
5351 md_allow_write(mddev); in do_md_run()
5353 md_wakeup_thread(mddev->thread); in do_md_run()
5354 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ in do_md_run()
5356 set_capacity(mddev->gendisk, mddev->array_sectors); in do_md_run()
5357 revalidate_disk(mddev->gendisk); in do_md_run()
5358 mddev->changed = 1; in do_md_run()
5359 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); in do_md_run()
5364 static int restart_array(struct mddev *mddev) in restart_array() argument
5366 struct gendisk *disk = mddev->gendisk; in restart_array()
5369 if (list_empty(&mddev->disks)) in restart_array()
5371 if (!mddev->pers) in restart_array()
5373 if (!mddev->ro) in restart_array()
5375 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { in restart_array()
5380 rdev_for_each_rcu(rdev, mddev) { in restart_array()
5394 mddev->safemode = 0; in restart_array()
5395 mddev->ro = 0; in restart_array()
5398 mdname(mddev)); in restart_array()
5400 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in restart_array()
5401 md_wakeup_thread(mddev->thread); in restart_array()
5402 md_wakeup_thread(mddev->sync_thread); in restart_array()
5403 sysfs_notify_dirent_safe(mddev->sysfs_state); in restart_array()
5407 static void md_clean(struct mddev *mddev) in md_clean() argument
5409 mddev->array_sectors = 0; in md_clean()
5410 mddev->external_size = 0; in md_clean()
5411 mddev->dev_sectors = 0; in md_clean()
5412 mddev->raid_disks = 0; in md_clean()
5413 mddev->recovery_cp = 0; in md_clean()
5414 mddev->resync_min = 0; in md_clean()
5415 mddev->resync_max = MaxSector; in md_clean()
5416 mddev->reshape_position = MaxSector; in md_clean()
5417 mddev->external = 0; in md_clean()
5418 mddev->persistent = 0; in md_clean()
5419 mddev->level = LEVEL_NONE; in md_clean()
5420 mddev->clevel[0] = 0; in md_clean()
5421 mddev->flags = 0; in md_clean()
5422 mddev->ro = 0; in md_clean()
5423 mddev->metadata_type[0] = 0; in md_clean()
5424 mddev->chunk_sectors = 0; in md_clean()
5425 mddev->ctime = mddev->utime = 0; in md_clean()
5426 mddev->layout = 0; in md_clean()
5427 mddev->max_disks = 0; in md_clean()
5428 mddev->events = 0; in md_clean()
5429 mddev->can_decrease_events = 0; in md_clean()
5430 mddev->delta_disks = 0; in md_clean()
5431 mddev->reshape_backwards = 0; in md_clean()
5432 mddev->new_level = LEVEL_NONE; in md_clean()
5433 mddev->new_layout = 0; in md_clean()
5434 mddev->new_chunk_sectors = 0; in md_clean()
5435 mddev->curr_resync = 0; in md_clean()
5436 atomic64_set(&mddev->resync_mismatches, 0); in md_clean()
5437 mddev->suspend_lo = mddev->suspend_hi = 0; in md_clean()
5438 mddev->sync_speed_min = mddev->sync_speed_max = 0; in md_clean()
5439 mddev->recovery = 0; in md_clean()
5440 mddev->in_sync = 0; in md_clean()
5441 mddev->changed = 0; in md_clean()
5442 mddev->degraded = 0; in md_clean()
5443 mddev->safemode = 0; in md_clean()
5444 mddev->private = NULL; in md_clean()
5445 mddev->bitmap_info.offset = 0; in md_clean()
5446 mddev->bitmap_info.default_offset = 0; in md_clean()
5447 mddev->bitmap_info.default_space = 0; in md_clean()
5448 mddev->bitmap_info.chunksize = 0; in md_clean()
5449 mddev->bitmap_info.daemon_sleep = 0; in md_clean()
5450 mddev->bitmap_info.max_write_behind = 0; in md_clean()
5453 static void __md_stop_writes(struct mddev *mddev) in __md_stop_writes() argument
5455 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in __md_stop_writes()
5457 if (mddev->sync_thread) { in __md_stop_writes()
5458 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in __md_stop_writes()
5459 md_reap_sync_thread(mddev); in __md_stop_writes()
5462 del_timer_sync(&mddev->safemode_timer); in __md_stop_writes()
5464 bitmap_flush(mddev); in __md_stop_writes()
5465 md_super_wait(mddev); in __md_stop_writes()
5467 if (mddev->ro == 0 && in __md_stop_writes()
5468 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || in __md_stop_writes()
5469 (mddev->flags & MD_UPDATE_SB_FLAGS))) { in __md_stop_writes()
5471 if (!mddev_is_clustered(mddev)) in __md_stop_writes()
5472 mddev->in_sync = 1; in __md_stop_writes()
5473 md_update_sb(mddev, 1); in __md_stop_writes()
5477 void md_stop_writes(struct mddev *mddev) in md_stop_writes() argument
5479 mddev_lock_nointr(mddev); in md_stop_writes()
5480 __md_stop_writes(mddev); in md_stop_writes()
5481 mddev_unlock(mddev); in md_stop_writes()
5485 static void mddev_detach(struct mddev *mddev) in mddev_detach() argument
5487 struct bitmap *bitmap = mddev->bitmap; in mddev_detach()
5491 mdname(mddev)); in mddev_detach()
5496 if (mddev->pers && mddev->pers->quiesce) { in mddev_detach()
5497 mddev->pers->quiesce(mddev, 1); in mddev_detach()
5498 mddev->pers->quiesce(mddev, 0); in mddev_detach()
5500 md_unregister_thread(&mddev->thread); in mddev_detach()
5501 if (mddev->queue) in mddev_detach()
5502 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ in mddev_detach()
5505 static void __md_stop(struct mddev *mddev) in __md_stop() argument
5507 struct md_personality *pers = mddev->pers; in __md_stop()
5508 mddev_detach(mddev); in __md_stop()
5511 spin_lock(&mddev->lock); in __md_stop()
5512 mddev->ready = 0; in __md_stop()
5513 mddev->pers = NULL; in __md_stop()
5514 spin_unlock(&mddev->lock); in __md_stop()
5515 pers->free(mddev, mddev->private); in __md_stop()
5516 mddev->private = NULL; in __md_stop()
5517 if (pers->sync_request && mddev->to_remove == NULL) in __md_stop()
5518 mddev->to_remove = &md_redundancy_group; in __md_stop()
5520 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in __md_stop()
5523 void md_stop(struct mddev *mddev) in md_stop() argument
5528 __md_stop(mddev); in md_stop()
5529 bitmap_destroy(mddev); in md_stop()
5530 if (mddev->bio_set) in md_stop()
5531 bioset_free(mddev->bio_set); in md_stop()
5536 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) in md_set_readonly() argument
5541 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { in md_set_readonly()
5543 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
5544 md_wakeup_thread(mddev->thread); in md_set_readonly()
5546 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in md_set_readonly()
5547 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_set_readonly()
5548 if (mddev->sync_thread) in md_set_readonly()
5551 wake_up_process(mddev->sync_thread->tsk); in md_set_readonly()
5553 if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags)) in md_set_readonly()
5555 mddev_unlock(mddev); in md_set_readonly()
5557 &mddev->recovery)); in md_set_readonly()
5558 wait_event(mddev->sb_wait, in md_set_readonly()
5559 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); in md_set_readonly()
5560 mddev_lock_nointr(mddev); in md_set_readonly()
5562 mutex_lock(&mddev->open_mutex); in md_set_readonly()
5563 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || in md_set_readonly()
5564 mddev->sync_thread || in md_set_readonly()
5565 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in md_set_readonly()
5566 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { in md_set_readonly()
5567 printk("md: %s still in use.\n",mdname(mddev)); in md_set_readonly()
5569 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
5570 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_set_readonly()
5571 md_wakeup_thread(mddev->thread); in md_set_readonly()
5576 if (mddev->pers) { in md_set_readonly()
5577 __md_stop_writes(mddev); in md_set_readonly()
5580 if (mddev->ro==1) in md_set_readonly()
5582 mddev->ro = 1; in md_set_readonly()
5583 set_disk_ro(mddev->gendisk, 1); in md_set_readonly()
5584 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
5585 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_set_readonly()
5586 md_wakeup_thread(mddev->thread); in md_set_readonly()
5587 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_set_readonly()
5591 mutex_unlock(&mddev->open_mutex); in md_set_readonly()
5599 static int do_md_stop(struct mddev *mddev, int mode, in do_md_stop() argument
5602 struct gendisk *disk = mddev->gendisk; in do_md_stop()
5606 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { in do_md_stop()
5608 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in do_md_stop()
5609 md_wakeup_thread(mddev->thread); in do_md_stop()
5611 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in do_md_stop()
5612 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in do_md_stop()
5613 if (mddev->sync_thread) in do_md_stop()
5616 wake_up_process(mddev->sync_thread->tsk); in do_md_stop()
5618 mddev_unlock(mddev); in do_md_stop()
5619 wait_event(resync_wait, (mddev->sync_thread == NULL && in do_md_stop()
5621 &mddev->recovery))); in do_md_stop()
5622 mddev_lock_nointr(mddev); in do_md_stop()
5624 mutex_lock(&mddev->open_mutex); in do_md_stop()
5625 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || in do_md_stop()
5626 mddev->sysfs_active || in do_md_stop()
5627 mddev->sync_thread || in do_md_stop()
5628 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in do_md_stop()
5629 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { in do_md_stop()
5630 printk("md: %s still in use.\n",mdname(mddev)); in do_md_stop()
5631 mutex_unlock(&mddev->open_mutex); in do_md_stop()
5633 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in do_md_stop()
5634 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in do_md_stop()
5635 md_wakeup_thread(mddev->thread); in do_md_stop()
5639 if (mddev->pers) { in do_md_stop()
5640 if (mddev->ro) in do_md_stop()
5643 __md_stop_writes(mddev); in do_md_stop()
5644 __md_stop(mddev); in do_md_stop()
5645 mddev->queue->backing_dev_info.congested_fn = NULL; in do_md_stop()
5648 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_stop()
5650 rdev_for_each(rdev, mddev) in do_md_stop()
5652 sysfs_unlink_rdev(mddev, rdev); in do_md_stop()
5655 mutex_unlock(&mddev->open_mutex); in do_md_stop()
5656 mddev->changed = 1; in do_md_stop()
5659 if (mddev->ro) in do_md_stop()
5660 mddev->ro = 0; in do_md_stop()
5662 mutex_unlock(&mddev->open_mutex); in do_md_stop()
5667 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); in do_md_stop()
5669 bitmap_destroy(mddev); in do_md_stop()
5670 if (mddev->bitmap_info.file) { in do_md_stop()
5671 struct file *f = mddev->bitmap_info.file; in do_md_stop()
5672 spin_lock(&mddev->lock); in do_md_stop()
5673 mddev->bitmap_info.file = NULL; in do_md_stop()
5674 spin_unlock(&mddev->lock); in do_md_stop()
5677 mddev->bitmap_info.offset = 0; in do_md_stop()
5679 export_array(mddev); in do_md_stop()
5681 md_clean(mddev); in do_md_stop()
5682 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); in do_md_stop()
5683 if (mddev->hold_active == UNTIL_STOP) in do_md_stop()
5684 mddev->hold_active = 0; in do_md_stop()
5686 md_new_event(mddev); in do_md_stop()
5687 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_stop()
5692 static void autorun_array(struct mddev *mddev) in autorun_array() argument
5697 if (list_empty(&mddev->disks)) in autorun_array()
5702 rdev_for_each(rdev, mddev) { in autorun_array()
5708 err = do_md_run(mddev); in autorun_array()
5711 do_md_stop(mddev, 0, NULL); in autorun_array()
5730 struct mddev *mddev; in autorun_devices() local
5770 mddev = mddev_find(dev); in autorun_devices()
5771 if (!mddev || !mddev->gendisk) { in autorun_devices()
5772 if (mddev) in autorun_devices()
5773 mddev_put(mddev); in autorun_devices()
5778 if (mddev_lock(mddev)) in autorun_devices()
5780 mdname(mddev)); in autorun_devices()
5781 else if (mddev->raid_disks || mddev->major_version in autorun_devices()
5782 || !list_empty(&mddev->disks)) { in autorun_devices()
5785 mdname(mddev), bdevname(rdev0->bdev,b)); in autorun_devices()
5786 mddev_unlock(mddev); in autorun_devices()
5788 printk(KERN_INFO "md: created %s\n", mdname(mddev)); in autorun_devices()
5789 mddev->persistent = 1; in autorun_devices()
5792 if (bind_rdev_to_array(rdev, mddev)) in autorun_devices()
5795 autorun_array(mddev); in autorun_devices()
5796 mddev_unlock(mddev); in autorun_devices()
5805 mddev_put(mddev); in autorun_devices()
5825 static int get_array_info(struct mddev *mddev, void __user *arg) in get_array_info() argument
5833 rdev_for_each_rcu(rdev, mddev) { in get_array_info()
5847 info.major_version = mddev->major_version; in get_array_info()
5848 info.minor_version = mddev->minor_version; in get_array_info()
5850 info.ctime = mddev->ctime; in get_array_info()
5851 info.level = mddev->level; in get_array_info()
5852 info.size = mddev->dev_sectors / 2; in get_array_info()
5853 if (info.size != mddev->dev_sectors / 2) /* overflow */ in get_array_info()
5856 info.raid_disks = mddev->raid_disks; in get_array_info()
5857 info.md_minor = mddev->md_minor; in get_array_info()
5858 info.not_persistent= !mddev->persistent; in get_array_info()
5860 info.utime = mddev->utime; in get_array_info()
5862 if (mddev->in_sync) in get_array_info()
5864 if (mddev->bitmap && mddev->bitmap_info.offset) in get_array_info()
5866 if (mddev_is_clustered(mddev)) in get_array_info()
5873 info.layout = mddev->layout; in get_array_info()
5874 info.chunk_size = mddev->chunk_sectors << 9; in get_array_info()
5882 static int get_bitmap_file(struct mddev *mddev, void __user * arg) in get_bitmap_file() argument
5893 spin_lock(&mddev->lock); in get_bitmap_file()
5895 if (mddev->bitmap_info.file) { in get_bitmap_file()
5896 ptr = file_path(mddev->bitmap_info.file, file->pathname, in get_bitmap_file()
5904 spin_unlock(&mddev->lock); in get_bitmap_file()
5914 static int get_disk_info(struct mddev *mddev, void __user * arg) in get_disk_info() argument
5923 rdev = md_find_rdev_nr_rcu(mddev, info.number); in get_disk_info()
5952 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) in add_new_disk() argument
5958 if (mddev_is_clustered(mddev) && in add_new_disk()
5961 mdname(mddev)); in add_new_disk()
5968 if (!mddev->raid_disks) { in add_new_disk()
5971 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); in add_new_disk()
5978 if (!list_empty(&mddev->disks)) { in add_new_disk()
5980 = list_entry(mddev->disks.next, in add_new_disk()
5982 err = super_types[mddev->major_version] in add_new_disk()
5983 .load_super(rdev, rdev0, mddev->minor_version); in add_new_disk()
5993 err = bind_rdev_to_array(rdev, mddev); in add_new_disk()
6004 if (mddev->pers) { in add_new_disk()
6006 if (!mddev->pers->hot_add_disk) { in add_new_disk()
6009 mdname(mddev)); in add_new_disk()
6012 if (mddev->persistent) in add_new_disk()
6013 rdev = md_import_device(dev, mddev->major_version, in add_new_disk()
6014 mddev->minor_version); in add_new_disk()
6024 if (!mddev->persistent) { in add_new_disk()
6026 info->raid_disk < mddev->raid_disks) { in add_new_disk()
6034 super_types[mddev->major_version]. in add_new_disk()
6035 validate_super(mddev, rdev); in add_new_disk()
6056 if (mddev_is_clustered(mddev)) { in add_new_disk()
6061 err = md_cluster_ops->add_new_disk(mddev, rdev); in add_new_disk()
6070 err = bind_rdev_to_array(rdev, mddev); in add_new_disk()
6075 if (mddev_is_clustered(mddev)) { in add_new_disk()
6077 md_cluster_ops->new_disk_ack(mddev, (err == 0)); in add_new_disk()
6080 md_cluster_ops->add_new_disk_cancel(mddev); in add_new_disk()
6094 if (mddev->major_version != 0) { in add_new_disk()
6096 mdname(mddev)); in add_new_disk()
6110 if (info->raid_disk < mddev->raid_disks) in add_new_disk()
6115 if (rdev->raid_disk < mddev->raid_disks) in add_new_disk()
6122 if (!mddev->persistent) { in add_new_disk()
6129 err = bind_rdev_to_array(rdev, mddev); in add_new_disk()
6139 static int hot_remove_disk(struct mddev *mddev, dev_t dev) in hot_remove_disk() argument
6145 rdev = find_rdev(mddev, dev); in hot_remove_disk()
6149 if (mddev_is_clustered(mddev)) in hot_remove_disk()
6150 ret = md_cluster_ops->metadata_update_start(mddev); in hot_remove_disk()
6156 remove_and_add_spares(mddev, rdev); in hot_remove_disk()
6162 if (mddev_is_clustered(mddev) && ret == 0) in hot_remove_disk()
6163 md_cluster_ops->remove_disk(mddev, rdev); in hot_remove_disk()
6166 md_update_sb(mddev, 1); in hot_remove_disk()
6167 md_new_event(mddev); in hot_remove_disk()
6171 if (mddev_is_clustered(mddev) && ret == 0) in hot_remove_disk()
6172 md_cluster_ops->metadata_update_cancel(mddev); in hot_remove_disk()
6175 bdevname(rdev->bdev,b), mdname(mddev)); in hot_remove_disk()
6179 static int hot_add_disk(struct mddev *mddev, dev_t dev) in hot_add_disk() argument
6185 if (!mddev->pers) in hot_add_disk()
6188 if (mddev->major_version != 0) { in hot_add_disk()
6191 mdname(mddev)); in hot_add_disk()
6194 if (!mddev->pers->hot_add_disk) { in hot_add_disk()
6197 mdname(mddev)); in hot_add_disk()
6209 if (mddev->persistent) in hot_add_disk()
6219 bdevname(rdev->bdev,b), mdname(mddev)); in hot_add_disk()
6227 err = bind_rdev_to_array(rdev, mddev); in hot_add_disk()
6238 md_update_sb(mddev, 1); in hot_add_disk()
6243 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in hot_add_disk()
6244 md_wakeup_thread(mddev->thread); in hot_add_disk()
6245 md_new_event(mddev); in hot_add_disk()
6253 static int set_bitmap_file(struct mddev *mddev, int fd) in set_bitmap_file() argument
6257 if (mddev->pers) { in set_bitmap_file()
6258 if (!mddev->pers->quiesce || !mddev->thread) in set_bitmap_file()
6260 if (mddev->recovery || mddev->sync_thread) in set_bitmap_file()
6269 if (mddev->bitmap || mddev->bitmap_info.file) in set_bitmap_file()
6275 mdname(mddev)); in set_bitmap_file()
6282 mdname(mddev)); in set_bitmap_file()
6286 mdname(mddev)); in set_bitmap_file()
6290 mdname(mddev)); in set_bitmap_file()
6297 mddev->bitmap_info.file = f; in set_bitmap_file()
6298 mddev->bitmap_info.offset = 0; /* file overrides offset */ in set_bitmap_file()
6299 } else if (mddev->bitmap == NULL) in set_bitmap_file()
6302 if (mddev->pers) { in set_bitmap_file()
6303 mddev->pers->quiesce(mddev, 1); in set_bitmap_file()
6307 bitmap = bitmap_create(mddev, -1); in set_bitmap_file()
6309 mddev->bitmap = bitmap; in set_bitmap_file()
6310 err = bitmap_load(mddev); in set_bitmap_file()
6315 bitmap_destroy(mddev); in set_bitmap_file()
6318 mddev->pers->quiesce(mddev, 0); in set_bitmap_file()
6321 struct file *f = mddev->bitmap_info.file; in set_bitmap_file()
6323 spin_lock(&mddev->lock); in set_bitmap_file()
6324 mddev->bitmap_info.file = NULL; in set_bitmap_file()
6325 spin_unlock(&mddev->lock); in set_bitmap_file()
6346 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) in set_array_info() argument
6360 mddev->major_version = info->major_version; in set_array_info()
6361 mddev->minor_version = info->minor_version; in set_array_info()
6362 mddev->patch_version = info->patch_version; in set_array_info()
6363 mddev->persistent = !info->not_persistent; in set_array_info()
6367 mddev->ctime = get_seconds(); in set_array_info()
6370 mddev->major_version = MD_MAJOR_VERSION; in set_array_info()
6371 mddev->minor_version = MD_MINOR_VERSION; in set_array_info()
6372 mddev->patch_version = MD_PATCHLEVEL_VERSION; in set_array_info()
6373 mddev->ctime = get_seconds(); in set_array_info()
6375 mddev->level = info->level; in set_array_info()
6376 mddev->clevel[0] = 0; in set_array_info()
6377 mddev->dev_sectors = 2 * (sector_t)info->size; in set_array_info()
6378 mddev->raid_disks = info->raid_disks; in set_array_info()
6383 mddev->recovery_cp = MaxSector; in set_array_info()
6385 mddev->recovery_cp = 0; in set_array_info()
6386 mddev->persistent = ! info->not_persistent; in set_array_info()
6387 mddev->external = 0; in set_array_info()
6389 mddev->layout = info->layout; in set_array_info()
6390 mddev->chunk_sectors = info->chunk_size >> 9; in set_array_info()
6392 mddev->max_disks = MD_SB_DISKS; in set_array_info()
6394 if (mddev->persistent) in set_array_info()
6395 mddev->flags = 0; in set_array_info()
6396 set_bit(MD_CHANGE_DEVS, &mddev->flags); in set_array_info()
6398 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; in set_array_info()
6399 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); in set_array_info()
6400 mddev->bitmap_info.offset = 0; in set_array_info()
6402 mddev->reshape_position = MaxSector; in set_array_info()
6407 get_random_bytes(mddev->uuid, 16); in set_array_info()
6409 mddev->new_level = mddev->level; in set_array_info()
6410 mddev->new_chunk_sectors = mddev->chunk_sectors; in set_array_info()
6411 mddev->new_layout = mddev->layout; in set_array_info()
6412 mddev->delta_disks = 0; in set_array_info()
6413 mddev->reshape_backwards = 0; in set_array_info()
6418 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) in md_set_array_sectors() argument
6420 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); in md_set_array_sectors()
6422 if (mddev->external_size) in md_set_array_sectors()
6425 mddev->array_sectors = array_sectors; in md_set_array_sectors()
6429 static int update_size(struct mddev *mddev, sector_t num_sectors) in update_size() argument
6435 if (mddev->pers->resize == NULL) in update_size()
6446 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in update_size()
6447 mddev->sync_thread) in update_size()
6449 if (mddev->ro) in update_size()
6452 rdev_for_each(rdev, mddev) { in update_size()
6460 rv = mddev->pers->resize(mddev, num_sectors); in update_size()
6462 revalidate_disk(mddev->gendisk); in update_size()
6466 static int update_raid_disks(struct mddev *mddev, int raid_disks) in update_raid_disks() argument
6471 if (mddev->pers->check_reshape == NULL) in update_raid_disks()
6473 if (mddev->ro) in update_raid_disks()
6476 (mddev->max_disks && raid_disks >= mddev->max_disks)) in update_raid_disks()
6478 if (mddev->sync_thread || in update_raid_disks()
6479 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in update_raid_disks()
6480 mddev->reshape_position != MaxSector) in update_raid_disks()
6483 rdev_for_each(rdev, mddev) { in update_raid_disks()
6484 if (mddev->raid_disks < raid_disks && in update_raid_disks()
6487 if (mddev->raid_disks > raid_disks && in update_raid_disks()
6492 mddev->delta_disks = raid_disks - mddev->raid_disks; in update_raid_disks()
6493 if (mddev->delta_disks < 0) in update_raid_disks()
6494 mddev->reshape_backwards = 1; in update_raid_disks()
6495 else if (mddev->delta_disks > 0) in update_raid_disks()
6496 mddev->reshape_backwards = 0; in update_raid_disks()
6498 rv = mddev->pers->check_reshape(mddev); in update_raid_disks()
6500 mddev->delta_disks = 0; in update_raid_disks()
6501 mddev->reshape_backwards = 0; in update_raid_disks()
6514 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) in update_array_info() argument
6521 if (mddev->bitmap && mddev->bitmap_info.offset) in update_array_info()
6524 if (mddev->major_version != info->major_version || in update_array_info()
6525 mddev->minor_version != info->minor_version || in update_array_info()
6527 mddev->ctime != info->ctime || in update_array_info()
6528 mddev->level != info->level || in update_array_info()
6530 mddev->persistent != !info->not_persistent || in update_array_info()
6531 mddev->chunk_sectors != info->chunk_size >> 9 || in update_array_info()
6537 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) in update_array_info()
6539 if (mddev->raid_disks != info->raid_disks) in update_array_info()
6541 if (mddev->layout != info->layout) in update_array_info()
6550 if (mddev->layout != info->layout) { in update_array_info()
6555 if (mddev->pers->check_reshape == NULL) in update_array_info()
6558 mddev->new_layout = info->layout; in update_array_info()
6559 rv = mddev->pers->check_reshape(mddev); in update_array_info()
6561 mddev->new_layout = mddev->layout; in update_array_info()
6565 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) in update_array_info()
6566 rv = update_size(mddev, (sector_t)info->size * 2); in update_array_info()
6568 if (mddev->raid_disks != info->raid_disks) in update_array_info()
6569 rv = update_raid_disks(mddev, info->raid_disks); in update_array_info()
6572 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { in update_array_info()
6576 if (mddev->recovery || mddev->sync_thread) { in update_array_info()
6583 if (mddev->bitmap) { in update_array_info()
6587 if (mddev->bitmap_info.default_offset == 0) { in update_array_info()
6591 mddev->bitmap_info.offset = in update_array_info()
6592 mddev->bitmap_info.default_offset; in update_array_info()
6593 mddev->bitmap_info.space = in update_array_info()
6594 mddev->bitmap_info.default_space; in update_array_info()
6595 mddev->pers->quiesce(mddev, 1); in update_array_info()
6596 bitmap = bitmap_create(mddev, -1); in update_array_info()
6598 mddev->bitmap = bitmap; in update_array_info()
6599 rv = bitmap_load(mddev); in update_array_info()
6603 bitmap_destroy(mddev); in update_array_info()
6604 mddev->pers->quiesce(mddev, 0); in update_array_info()
6607 if (!mddev->bitmap) { in update_array_info()
6611 if (mddev->bitmap->storage.file) { in update_array_info()
6615 mddev->pers->quiesce(mddev, 1); in update_array_info()
6616 bitmap_destroy(mddev); in update_array_info()
6617 mddev->pers->quiesce(mddev, 0); in update_array_info()
6618 mddev->bitmap_info.offset = 0; in update_array_info()
6621 md_update_sb(mddev, 1); in update_array_info()
6627 static int set_disk_faulty(struct mddev *mddev, dev_t dev) in set_disk_faulty() argument
6632 if (mddev->pers == NULL) in set_disk_faulty()
6636 rdev = find_rdev_rcu(mddev, dev); in set_disk_faulty()
6640 md_error(mddev, rdev); in set_disk_faulty()
6656 struct mddev *mddev = bdev->bd_disk->private_data; in md_getgeo() local
6660 geo->cylinders = mddev->array_sectors / 8; in md_getgeo()
6695 struct mddev *mddev = NULL; in md_ioctl() local
6733 mddev = bdev->bd_disk->private_data; in md_ioctl()
6735 if (!mddev) { in md_ioctl()
6743 if (!mddev->raid_disks && !mddev->external) in md_ioctl()
6746 err = get_array_info(mddev, argp); in md_ioctl()
6750 if (!mddev->raid_disks && !mddev->external) in md_ioctl()
6753 err = get_disk_info(mddev, argp); in md_ioctl()
6757 err = set_disk_faulty(mddev, new_decode_dev(arg)); in md_ioctl()
6761 err = get_bitmap_file(mddev, argp); in md_ioctl()
6772 wait_event_interruptible_timeout(mddev->sb_wait, in md_ioctl()
6774 &mddev->flags), in md_ioctl()
6780 mutex_lock(&mddev->open_mutex); in md_ioctl()
6781 if (mddev->pers && atomic_read(&mddev->openers) > 1) { in md_ioctl()
6782 mutex_unlock(&mddev->open_mutex); in md_ioctl()
6786 set_bit(MD_STILL_CLOSED, &mddev->flags); in md_ioctl()
6787 mutex_unlock(&mddev->open_mutex); in md_ioctl()
6790 err = mddev_lock(mddev); in md_ioctl()
6806 if (mddev->pers) { in md_ioctl()
6807 err = update_array_info(mddev, &info); in md_ioctl()
6815 if (!list_empty(&mddev->disks)) { in md_ioctl()
6818 mdname(mddev)); in md_ioctl()
6822 if (mddev->raid_disks) { in md_ioctl()
6825 mdname(mddev)); in md_ioctl()
6829 err = set_array_info(mddev, &info); in md_ioctl()
6843 if ((!mddev->raid_disks && !mddev->external) in md_ioctl()
6856 err = restart_array(mddev); in md_ioctl()
6860 err = do_md_stop(mddev, 0, bdev); in md_ioctl()
6864 err = md_set_readonly(mddev, bdev); in md_ioctl()
6868 err = hot_remove_disk(mddev, new_decode_dev(arg)); in md_ioctl()
6876 if (mddev->pers) { in md_ioctl()
6884 err = add_new_disk(mddev, &info); in md_ioctl()
6903 if (mddev->ro != 1) in md_ioctl()
6909 if (mddev->pers) { in md_ioctl()
6910 err = restart_array(mddev); in md_ioctl()
6912 mddev->ro = 2; in md_ioctl()
6913 set_disk_ro(mddev->gendisk, 0); in md_ioctl()
6923 if (mddev->ro && mddev->pers) { in md_ioctl()
6924 if (mddev->ro == 2) { in md_ioctl()
6925 mddev->ro = 0; in md_ioctl()
6926 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_ioctl()
6927 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_ioctl()
6932 if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) { in md_ioctl()
6933 mddev_unlock(mddev); in md_ioctl()
6934 wait_event(mddev->sb_wait, in md_ioctl()
6935 !test_bit(MD_CHANGE_DEVS, &mddev->flags) && in md_ioctl()
6936 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); in md_ioctl()
6937 mddev_lock_nointr(mddev); in md_ioctl()
6952 err = add_new_disk(mddev, &info); in md_ioctl()
6957 if (mddev_is_clustered(mddev)) in md_ioctl()
6958 md_cluster_ops->new_disk_ack(mddev, false); in md_ioctl()
6964 err = hot_add_disk(mddev, new_decode_dev(arg)); in md_ioctl()
6968 err = do_md_run(mddev); in md_ioctl()
6972 err = set_bitmap_file(mddev, (int)arg); in md_ioctl()
6981 if (mddev->hold_active == UNTIL_IOCTL && in md_ioctl()
6983 mddev->hold_active = 0; in md_ioctl()
6984 mddev_unlock(mddev); in md_ioctl()
7014 struct mddev *mddev = mddev_find(bdev->bd_dev); in md_open() local
7017 if (!mddev) in md_open()
7020 if (mddev->gendisk != bdev->bd_disk) { in md_open()
7024 mddev_put(mddev); in md_open()
7030 BUG_ON(mddev != bdev->bd_disk->private_data); in md_open()
7032 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) in md_open()
7036 atomic_inc(&mddev->openers); in md_open()
7037 clear_bit(MD_STILL_CLOSED, &mddev->flags); in md_open()
7038 mutex_unlock(&mddev->open_mutex); in md_open()
7047 struct mddev *mddev = disk->private_data; in md_release() local
7049 BUG_ON(!mddev); in md_release()
7050 atomic_dec(&mddev->openers); in md_release()
7051 mddev_put(mddev); in md_release()
7056 struct mddev *mddev = disk->private_data; in md_media_changed() local
7058 return mddev->changed; in md_media_changed()
7063 struct mddev *mddev = disk->private_data; in md_revalidate() local
7065 mddev->changed = 0; in md_revalidate()
7134 struct mddev *mddev, const char *name) in md_register_thread() argument
7145 thread->mddev = mddev; in md_register_thread()
7149 mdname(thread->mddev), in md_register_thread()
7177 void md_error(struct mddev *mddev, struct md_rdev *rdev) in md_error() argument
7182 if (!mddev->pers || !mddev->pers->error_handler) in md_error()
7184 mddev->pers->error_handler(mddev,rdev); in md_error()
7185 if (mddev->degraded) in md_error()
7186 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_error()
7188 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_error()
7189 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_error()
7190 md_wakeup_thread(mddev->thread); in md_error()
7191 if (mddev->event_work.func) in md_error()
7192 queue_work(md_misc_wq, &mddev->event_work); in md_error()
7193 md_new_event_inintr(mddev); in md_error()
7218 static int status_resync(struct seq_file *seq, struct mddev *mddev) in status_resync() argument
7226 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in status_resync()
7227 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in status_resync()
7228 max_sectors = mddev->resync_max_sectors; in status_resync()
7230 max_sectors = mddev->dev_sectors; in status_resync()
7232 resync = mddev->curr_resync; in status_resync()
7234 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) in status_resync()
7238 resync -= atomic_read(&mddev->recovery_active); in status_resync()
7241 if (mddev->recovery_cp < MaxSector) { in status_resync()
7278 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? in status_resync()
7280 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? in status_resync()
7282 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? in status_resync()
7302 dt = ((jiffies - mddev->resync_mark) / HZ); in status_resync()
7304 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) in status_resync()
7305 - mddev->resync_mark_cnt; in status_resync()
7323 struct mddev *mddev; in md_seq_start() local
7334 mddev = list_entry(tmp, struct mddev, all_mddevs); in md_seq_start()
7335 mddev_get(mddev); in md_seq_start()
7337 return mddev; in md_seq_start()
7348 struct mddev *next_mddev, *mddev = v; in md_seq_next() local
7358 tmp = mddev->all_mddevs.next; in md_seq_next()
7360 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); in md_seq_next()
7368 mddev_put(mddev); in md_seq_next()
7375 struct mddev *mddev = v; in md_seq_stop() local
7377 if (mddev && v != (void*)1 && v != (void*)2) in md_seq_stop()
7378 mddev_put(mddev); in md_seq_stop()
7383 struct mddev *mddev = v; in md_seq_show() local
7404 spin_lock(&mddev->lock); in md_seq_show()
7405 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { in md_seq_show()
7406 seq_printf(seq, "%s : %sactive", mdname(mddev), in md_seq_show()
7407 mddev->pers ? "" : "in"); in md_seq_show()
7408 if (mddev->pers) { in md_seq_show()
7409 if (mddev->ro==1) in md_seq_show()
7411 if (mddev->ro==2) in md_seq_show()
7413 seq_printf(seq, " %s", mddev->pers->name); in md_seq_show()
7418 rdev_for_each_rcu(rdev, mddev) { in md_seq_show()
7438 if (!list_empty(&mddev->disks)) { in md_seq_show()
7439 if (mddev->pers) in md_seq_show()
7442 mddev->array_sectors / 2); in md_seq_show()
7447 if (mddev->persistent) { in md_seq_show()
7448 if (mddev->major_version != 0 || in md_seq_show()
7449 mddev->minor_version != 90) { in md_seq_show()
7451 mddev->major_version, in md_seq_show()
7452 mddev->minor_version); in md_seq_show()
7454 } else if (mddev->external) in md_seq_show()
7456 mddev->metadata_type); in md_seq_show()
7460 if (mddev->pers) { in md_seq_show()
7461 mddev->pers->status(seq, mddev); in md_seq_show()
7463 if (mddev->pers->sync_request) { in md_seq_show()
7464 if (status_resync(seq, mddev)) in md_seq_show()
7470 bitmap_status(seq, mddev->bitmap); in md_seq_show()
7474 spin_unlock(&mddev->lock); in md_seq_show()
7573 int md_setup_cluster(struct mddev *mddev, int nodes) in md_setup_cluster() argument
7590 return md_cluster_ops->join(mddev, nodes); in md_setup_cluster()
7593 void md_cluster_stop(struct mddev *mddev) in md_cluster_stop() argument
7597 md_cluster_ops->leave(mddev); in md_cluster_stop()
7601 static int is_mddev_idle(struct mddev *mddev, int init) in is_mddev_idle() argument
7609 rdev_for_each_rcu(rdev, mddev) { in is_mddev_idle()
7645 void md_done_sync(struct mddev *mddev, int blocks, int ok) in md_done_sync() argument
7648 atomic_sub(blocks, &mddev->recovery_active); in md_done_sync()
7649 wake_up(&mddev->recovery_wait); in md_done_sync()
7651 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_done_sync()
7652 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); in md_done_sync()
7653 md_wakeup_thread(mddev->thread); in md_done_sync()
7664 void md_write_start(struct mddev *mddev, struct bio *bi) in md_write_start() argument
7670 BUG_ON(mddev->ro == 1); in md_write_start()
7671 if (mddev->ro == 2) { in md_write_start()
7673 mddev->ro = 0; in md_write_start()
7674 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_write_start()
7675 md_wakeup_thread(mddev->thread); in md_write_start()
7676 md_wakeup_thread(mddev->sync_thread); in md_write_start()
7679 atomic_inc(&mddev->writes_pending); in md_write_start()
7680 if (mddev->safemode == 1) in md_write_start()
7681 mddev->safemode = 0; in md_write_start()
7682 if (mddev->in_sync) { in md_write_start()
7683 spin_lock(&mddev->lock); in md_write_start()
7684 if (mddev->in_sync) { in md_write_start()
7685 mddev->in_sync = 0; in md_write_start()
7686 set_bit(MD_CHANGE_CLEAN, &mddev->flags); in md_write_start()
7687 set_bit(MD_CHANGE_PENDING, &mddev->flags); in md_write_start()
7688 md_wakeup_thread(mddev->thread); in md_write_start()
7691 spin_unlock(&mddev->lock); in md_write_start()
7694 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_write_start()
7695 wait_event(mddev->sb_wait, in md_write_start()
7696 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); in md_write_start()
7700 void md_write_end(struct mddev *mddev) in md_write_end() argument
7702 if (atomic_dec_and_test(&mddev->writes_pending)) { in md_write_end()
7703 if (mddev->safemode == 2) in md_write_end()
7704 md_wakeup_thread(mddev->thread); in md_write_end()
7705 else if (mddev->safemode_delay) in md_write_end()
7706 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); in md_write_end()
7720 int md_allow_write(struct mddev *mddev) in md_allow_write() argument
7722 if (!mddev->pers) in md_allow_write()
7724 if (mddev->ro) in md_allow_write()
7726 if (!mddev->pers->sync_request) in md_allow_write()
7729 spin_lock(&mddev->lock); in md_allow_write()
7730 if (mddev->in_sync) { in md_allow_write()
7731 mddev->in_sync = 0; in md_allow_write()
7732 set_bit(MD_CHANGE_CLEAN, &mddev->flags); in md_allow_write()
7733 set_bit(MD_CHANGE_PENDING, &mddev->flags); in md_allow_write()
7734 if (mddev->safemode_delay && in md_allow_write()
7735 mddev->safemode == 0) in md_allow_write()
7736 mddev->safemode = 1; in md_allow_write()
7737 spin_unlock(&mddev->lock); in md_allow_write()
7738 md_update_sb(mddev, 0); in md_allow_write()
7739 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_allow_write()
7741 spin_unlock(&mddev->lock); in md_allow_write()
7743 if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) in md_allow_write()
7755 struct mddev *mddev = thread->mddev; in md_do_sync() local
7756 struct mddev *mddev2; in md_do_sync()
7773 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) in md_do_sync()
7775 if (mddev->ro) {/* never try to sync a read-only array */ in md_do_sync()
7776 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_do_sync()
7780 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
7781 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { in md_do_sync()
7784 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in md_do_sync()
7789 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in md_do_sync()
7794 mddev->last_sync_action = action ?: desc; in md_do_sync()
7813 mddev->curr_resync = 2; in md_do_sync()
7816 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
7819 if (mddev2 == mddev) in md_do_sync()
7821 if (!mddev->parallel_resync in md_do_sync()
7823 && match_mddev_units(mddev, mddev2)) { in md_do_sync()
7825 if (mddev < mddev2 && mddev->curr_resync == 2) { in md_do_sync()
7827 mddev->curr_resync = 1; in md_do_sync()
7830 if (mddev > mddev2 && mddev->curr_resync == 1) in md_do_sync()
7840 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
7841 mddev2->curr_resync >= mddev->curr_resync) { in md_do_sync()
7845 desc, mdname(mddev), mdname(mddev2)); in md_do_sync()
7856 } while (mddev->curr_resync < 2); in md_do_sync()
7859 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
7863 max_sectors = mddev->resync_max_sectors; in md_do_sync()
7864 atomic64_set(&mddev->resync_mismatches, 0); in md_do_sync()
7866 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
7867 j = mddev->resync_min; in md_do_sync()
7868 else if (!mddev->bitmap) in md_do_sync()
7869 j = mddev->recovery_cp; in md_do_sync()
7871 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in md_do_sync()
7872 max_sectors = mddev->resync_max_sectors; in md_do_sync()
7875 max_sectors = mddev->dev_sectors; in md_do_sync()
7878 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
7895 if (mddev->bitmap) { in md_do_sync()
7896 mddev->pers->quiesce(mddev, 1); in md_do_sync()
7897 mddev->pers->quiesce(mddev, 0); in md_do_sync()
7901 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); in md_do_sync()
7903 " %d KB/sec/disk.\n", speed_min(mddev)); in md_do_sync()
7906 speed_max(mddev), desc); in md_do_sync()
7908 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ in md_do_sync()
7916 mddev->resync_mark = mark[last_mark]; in md_do_sync()
7917 mddev->resync_mark_cnt = mark_cnt[last_mark]; in md_do_sync()
7926 atomic_set(&mddev->recovery_active, 0); in md_do_sync()
7932 desc, mdname(mddev)); in md_do_sync()
7933 mddev->curr_resync = j; in md_do_sync()
7935 mddev->curr_resync = 3; /* no longer delayed */ in md_do_sync()
7936 mddev->curr_resync_completed = j; in md_do_sync()
7937 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in md_do_sync()
7938 md_new_event(mddev); in md_do_sync()
7947 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
7948 ((mddev->curr_resync > mddev->curr_resync_completed && in md_do_sync()
7949 (mddev->curr_resync - mddev->curr_resync_completed) in md_do_sync()
7952 (j - mddev->curr_resync_completed)*2 in md_do_sync()
7953 >= mddev->resync_max - mddev->curr_resync_completed || in md_do_sync()
7954 mddev->curr_resync_completed > mddev->resync_max in md_do_sync()
7957 wait_event(mddev->recovery_wait, in md_do_sync()
7958 atomic_read(&mddev->recovery_active) == 0); in md_do_sync()
7959 mddev->curr_resync_completed = j; in md_do_sync()
7960 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in md_do_sync()
7961 j > mddev->recovery_cp) in md_do_sync()
7962 mddev->recovery_cp = j; in md_do_sync()
7964 set_bit(MD_CHANGE_CLEAN, &mddev->flags); in md_do_sync()
7965 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in md_do_sync()
7968 while (j >= mddev->resync_max && in md_do_sync()
7969 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
7975 wait_event_interruptible(mddev->recovery_wait, in md_do_sync()
7976 mddev->resync_max > j in md_do_sync()
7978 &mddev->recovery)); in md_do_sync()
7981 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
7984 sectors = mddev->pers->sync_request(mddev, j, &skipped); in md_do_sync()
7986 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_do_sync()
7992 atomic_add(sectors, &mddev->recovery_active); in md_do_sync()
7995 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8003 mddev->curr_resync = j; in md_do_sync()
8004 mddev->curr_mark_cnt = io_sectors; in md_do_sync()
8009 md_new_event(mddev); in md_do_sync()
8020 mddev->resync_mark = mark[next]; in md_do_sync()
8021 mddev->resync_mark_cnt = mark_cnt[next]; in md_do_sync()
8023 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); in md_do_sync()
8027 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8040 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); in md_do_sync()
8041 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 in md_do_sync()
8042 /((jiffies-mddev->resync_mark)/HZ +1) +1; in md_do_sync()
8044 if (currspeed > speed_min(mddev)) { in md_do_sync()
8045 if (currspeed > speed_max(mddev)) { in md_do_sync()
8049 if (!is_mddev_idle(mddev, 0)) { in md_do_sync()
8054 wait_event(mddev->recovery_wait, in md_do_sync()
8055 !atomic_read(&mddev->recovery_active)); in md_do_sync()
8059 printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc, in md_do_sync()
8060 test_bit(MD_RECOVERY_INTR, &mddev->recovery) in md_do_sync()
8066 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); in md_do_sync()
8068 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
8069 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
8070 mddev->curr_resync > 2) { in md_do_sync()
8071 mddev->curr_resync_completed = mddev->curr_resync; in md_do_sync()
8072 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in md_do_sync()
8075 if (mddev_is_clustered(mddev)) { in md_do_sync()
8076 md_cluster_ops->resync_finish(mddev); in md_do_sync()
8079 mddev->pers->sync_request(mddev, max_sectors, &skipped); in md_do_sync()
8081 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && in md_do_sync()
8082 mddev->curr_resync > 2) { in md_do_sync()
8083 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
8084 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
8085 if (mddev->curr_resync >= mddev->recovery_cp) { in md_do_sync()
8088 desc, mdname(mddev)); in md_do_sync()
8090 &mddev->recovery)) in md_do_sync()
8091 mddev->recovery_cp = in md_do_sync()
8092 mddev->curr_resync_completed; in md_do_sync()
8094 mddev->recovery_cp = in md_do_sync()
8095 mddev->curr_resync; in md_do_sync()
8098 mddev->recovery_cp = MaxSector; in md_do_sync()
8100 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8101 mddev->curr_resync = MaxSector; in md_do_sync()
8103 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
8105 mddev->delta_disks >= 0 && in md_do_sync()
8109 rdev->recovery_offset < mddev->curr_resync) in md_do_sync()
8110 rdev->recovery_offset = mddev->curr_resync; in md_do_sync()
8115 set_bit(MD_CHANGE_DEVS, &mddev->flags); in md_do_sync()
8117 if (mddev_is_clustered(mddev) && in md_do_sync()
8118 test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
8120 md_cluster_ops->resync_finish(mddev); in md_do_sync()
8122 spin_lock(&mddev->lock); in md_do_sync()
8123 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
8125 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
8126 mddev->resync_min = 0; in md_do_sync()
8127 mddev->resync_max = MaxSector; in md_do_sync()
8128 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
8129 mddev->resync_min = mddev->curr_resync_completed; in md_do_sync()
8130 set_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_do_sync()
8131 mddev->curr_resync = 0; in md_do_sync()
8132 spin_unlock(&mddev->lock); in md_do_sync()
8135 md_wakeup_thread(mddev->thread); in md_do_sync()
8140 static int remove_and_add_spares(struct mddev *mddev, in remove_and_add_spares() argument
8147 rdev_for_each(rdev, mddev) in remove_and_add_spares()
8155 if (mddev->pers->hot_remove_disk( in remove_and_add_spares()
8156 mddev, rdev) == 0) { in remove_and_add_spares()
8157 sysfs_unlink_rdev(mddev, rdev); in remove_and_add_spares()
8162 if (removed && mddev->kobj.sd) in remove_and_add_spares()
8163 sysfs_notify(&mddev->kobj, NULL, "degraded"); in remove_and_add_spares()
8168 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
8184 if (mddev->ro && in remove_and_add_spares()
8190 if (mddev->pers-> in remove_and_add_spares()
8191 hot_add_disk(mddev, rdev) == 0) { in remove_and_add_spares()
8192 if (sysfs_link_rdev(mddev, rdev)) in remove_and_add_spares()
8195 md_new_event(mddev); in remove_and_add_spares()
8196 set_bit(MD_CHANGE_DEVS, &mddev->flags); in remove_and_add_spares()
8201 set_bit(MD_CHANGE_DEVS, &mddev->flags); in remove_and_add_spares()
8207 struct mddev *mddev = container_of(ws, struct mddev, del_work); in md_start_sync() local
8210 if (mddev_is_clustered(mddev)) { in md_start_sync()
8211 ret = md_cluster_ops->resync_start(mddev); in md_start_sync()
8213 mddev->sync_thread = NULL; in md_start_sync()
8218 mddev->sync_thread = md_register_thread(md_do_sync, in md_start_sync()
8219 mddev, in md_start_sync()
8222 if (!mddev->sync_thread) { in md_start_sync()
8223 if (!(mddev_is_clustered(mddev) && ret == -EAGAIN)) in md_start_sync()
8226 mdname(mddev)); in md_start_sync()
8228 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_start_sync()
8229 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_start_sync()
8230 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_start_sync()
8231 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_start_sync()
8232 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_start_sync()
8235 &mddev->recovery)) in md_start_sync()
8236 if (mddev->sysfs_action) in md_start_sync()
8237 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_start_sync()
8239 md_wakeup_thread(mddev->sync_thread); in md_start_sync()
8240 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_start_sync()
8241 md_new_event(mddev); in md_start_sync()
8266 void md_check_recovery(struct mddev *mddev) in md_check_recovery() argument
8268 if (mddev->suspended) in md_check_recovery()
8271 if (mddev->bitmap) in md_check_recovery()
8272 bitmap_daemon_work(mddev); in md_check_recovery()
8275 if (mddev->pers->sync_request && !mddev->external) { in md_check_recovery()
8277 mdname(mddev)); in md_check_recovery()
8278 mddev->safemode = 2; in md_check_recovery()
8283 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) in md_check_recovery()
8286 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) || in md_check_recovery()
8287 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || in md_check_recovery()
8288 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || in md_check_recovery()
8289 (mddev->external == 0 && mddev->safemode == 1) || in md_check_recovery()
8290 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) in md_check_recovery()
8291 && !mddev->in_sync && mddev->recovery_cp == MaxSector) in md_check_recovery()
8295 if (mddev_trylock(mddev)) { in md_check_recovery()
8298 if (mddev->ro) { in md_check_recovery()
8300 if (!mddev->external && mddev->in_sync) in md_check_recovery()
8306 rdev_for_each(rdev, mddev) in md_check_recovery()
8315 remove_and_add_spares(mddev, NULL); in md_check_recovery()
8319 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_check_recovery()
8320 md_reap_sync_thread(mddev); in md_check_recovery()
8321 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
8322 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_check_recovery()
8323 clear_bit(MD_CHANGE_PENDING, &mddev->flags); in md_check_recovery()
8327 if (!mddev->external) { in md_check_recovery()
8329 spin_lock(&mddev->lock); in md_check_recovery()
8330 if (mddev->safemode && in md_check_recovery()
8331 !atomic_read(&mddev->writes_pending) && in md_check_recovery()
8332 !mddev->in_sync && in md_check_recovery()
8333 mddev->recovery_cp == MaxSector) { in md_check_recovery()
8334 mddev->in_sync = 1; in md_check_recovery()
8336 set_bit(MD_CHANGE_CLEAN, &mddev->flags); in md_check_recovery()
8338 if (mddev->safemode == 1) in md_check_recovery()
8339 mddev->safemode = 0; in md_check_recovery()
8340 spin_unlock(&mddev->lock); in md_check_recovery()
8342 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_check_recovery()
8345 if (mddev->flags & MD_UPDATE_SB_FLAGS) in md_check_recovery()
8346 md_update_sb(mddev, 0); in md_check_recovery()
8348 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in md_check_recovery()
8349 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { in md_check_recovery()
8351 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_check_recovery()
8354 if (mddev->sync_thread) { in md_check_recovery()
8355 md_reap_sync_thread(mddev); in md_check_recovery()
8361 mddev->curr_resync_completed = 0; in md_check_recovery()
8362 spin_lock(&mddev->lock); in md_check_recovery()
8363 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_check_recovery()
8364 spin_unlock(&mddev->lock); in md_check_recovery()
8368 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_check_recovery()
8369 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_check_recovery()
8371 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || in md_check_recovery()
8372 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) in md_check_recovery()
8381 if (mddev->reshape_position != MaxSector) { in md_check_recovery()
8382 if (mddev->pers->check_reshape == NULL || in md_check_recovery()
8383 mddev->pers->check_reshape(mddev) != 0) in md_check_recovery()
8386 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_check_recovery()
8387 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
8388 } else if ((spares = remove_and_add_spares(mddev, NULL))) { in md_check_recovery()
8389 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_check_recovery()
8390 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_check_recovery()
8391 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_check_recovery()
8392 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
8393 } else if (mddev->recovery_cp < MaxSector) { in md_check_recovery()
8394 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_check_recovery()
8395 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
8396 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) in md_check_recovery()
8400 if (mddev->pers->sync_request) { in md_check_recovery()
8406 bitmap_write_all(mddev->bitmap); in md_check_recovery()
8408 INIT_WORK(&mddev->del_work, md_start_sync); in md_check_recovery()
8409 queue_work(md_misc_wq, &mddev->del_work); in md_check_recovery()
8413 if (!mddev->sync_thread) { in md_check_recovery()
8414 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_check_recovery()
8417 &mddev->recovery)) in md_check_recovery()
8418 if (mddev->sysfs_action) in md_check_recovery()
8419 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_check_recovery()
8422 wake_up(&mddev->sb_wait); in md_check_recovery()
8423 mddev_unlock(mddev); in md_check_recovery()
8428 void md_reap_sync_thread(struct mddev *mddev) in md_reap_sync_thread() argument
8433 md_unregister_thread(&mddev->sync_thread); in md_reap_sync_thread()
8434 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_reap_sync_thread()
8435 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in md_reap_sync_thread()
8438 if (mddev->pers->spare_active(mddev)) { in md_reap_sync_thread()
8439 sysfs_notify(&mddev->kobj, NULL, in md_reap_sync_thread()
8441 set_bit(MD_CHANGE_DEVS, &mddev->flags); in md_reap_sync_thread()
8444 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_reap_sync_thread()
8445 mddev->pers->finish_reshape) in md_reap_sync_thread()
8446 mddev->pers->finish_reshape(mddev); in md_reap_sync_thread()
8451 if (!mddev->degraded) in md_reap_sync_thread()
8452 rdev_for_each(rdev, mddev) in md_reap_sync_thread()
8455 md_update_sb(mddev, 1); in md_reap_sync_thread()
8456 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_reap_sync_thread()
8457 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_reap_sync_thread()
8458 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_reap_sync_thread()
8459 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_reap_sync_thread()
8460 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_reap_sync_thread()
8461 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_reap_sync_thread()
8464 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_reap_sync_thread()
8465 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_reap_sync_thread()
8466 md_new_event(mddev); in md_reap_sync_thread()
8467 if (mddev->event_work.func) in md_reap_sync_thread()
8468 queue_work(md_misc_wq, &mddev->event_work); in md_reap_sync_thread()
8472 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_wait_for_blocked_rdev() argument
8479 rdev_dec_pending(rdev, mddev); in md_wait_for_blocked_rdev()
8483 void md_finish_reshape(struct mddev *mddev) in md_finish_reshape() argument
8488 rdev_for_each(rdev, mddev) { in md_finish_reshape()
8759 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); in rdev_set_badblocks()
8760 set_bit(MD_CHANGE_PENDING, &rdev->mddev->flags); in rdev_set_badblocks()
8761 md_wakeup_thread(rdev->mddev->thread); in rdev_set_badblocks()
9000 struct mddev *mddev; in md_notify_reboot() local
9003 for_each_mddev(mddev, tmp) { in md_notify_reboot()
9004 if (mddev_trylock(mddev)) { in md_notify_reboot()
9005 if (mddev->pers) in md_notify_reboot()
9006 __md_stop_writes(mddev); in md_notify_reboot()
9007 if (mddev->persistent) in md_notify_reboot()
9008 mddev->safemode = 2; in md_notify_reboot()
9009 mddev_unlock(mddev); in md_notify_reboot()
9078 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) in check_sb_changes() argument
9086 rdev_for_each(rdev2, mddev) { in check_sb_changes()
9107 ret = remove_and_add_spares(mddev, rdev2); in check_sb_changes()
9118 md_error(mddev, rdev2); in check_sb_changes()
9124 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) in check_sb_changes()
9125 update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); in check_sb_changes()
9128 mddev->events = le64_to_cpu(sb->events); in check_sb_changes()
9131 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) in read_rdev() argument
9144 err = super_types[mddev->major_version].load_super(rdev, NULL, mddev->minor_version); in read_rdev()
9168 mddev->pers->spare_active(mddev)) in read_rdev()
9169 sysfs_notify(&mddev->kobj, NULL, "degraded"); in read_rdev()
9175 void md_reload_sb(struct mddev *mddev, int nr) in md_reload_sb() argument
9181 rdev_for_each_rcu(rdev, mddev) { in md_reload_sb()
9191 err = read_rdev(mddev, rdev); in md_reload_sb()
9195 check_sb_changes(mddev, rdev); in md_reload_sb()
9198 rdev_for_each_rcu(rdev, mddev) in md_reload_sb()
9199 read_rdev(mddev, rdev); in md_reload_sb()
9271 struct mddev *mddev; in md_exit() local
9295 for_each_mddev(mddev, tmp) { in md_exit()
9296 export_array(mddev); in md_exit()
9297 mddev->hold_active = 0; in md_exit()