Lines Matching refs:rdev

368 	struct md_rdev *rdev = bio->bi_private;  in md_end_flush()  local
369 struct mddev *mddev = rdev->mddev; in md_end_flush()
371 rdev_dec_pending(rdev, mddev); in md_end_flush()
385 struct md_rdev *rdev; in submit_flushes() local
390 rdev_for_each_rcu(rdev, mddev) in submit_flushes()
391 if (rdev->raid_disk >= 0 && in submit_flushes()
392 !test_bit(Faulty, &rdev->flags)) { in submit_flushes()
398 atomic_inc(&rdev->nr_pending); in submit_flushes()
399 atomic_inc(&rdev->nr_pending); in submit_flushes()
403 bi->bi_private = rdev; in submit_flushes()
404 bi->bi_bdev = rdev->bdev; in submit_flushes()
408 rdev_dec_pending(rdev, mddev); in submit_flushes()
640 struct md_rdev *rdev; in md_find_rdev_nr_rcu() local
642 rdev_for_each_rcu(rdev, mddev) in md_find_rdev_nr_rcu()
643 if (rdev->desc_nr == nr) in md_find_rdev_nr_rcu()
644 return rdev; in md_find_rdev_nr_rcu()
652 struct md_rdev *rdev; in find_rdev() local
654 rdev_for_each(rdev, mddev) in find_rdev()
655 if (rdev->bdev->bd_dev == dev) in find_rdev()
656 return rdev; in find_rdev()
663 struct md_rdev *rdev; in find_rdev_rcu() local
665 rdev_for_each_rcu(rdev, mddev) in find_rdev_rcu()
666 if (rdev->bdev->bd_dev == dev) in find_rdev_rcu()
667 return rdev; in find_rdev_rcu()
685 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) in calc_dev_sboffset() argument
687 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; in calc_dev_sboffset()
691 static int alloc_disk_sb(struct md_rdev *rdev) in alloc_disk_sb() argument
693 rdev->sb_page = alloc_page(GFP_KERNEL); in alloc_disk_sb()
694 if (!rdev->sb_page) { in alloc_disk_sb()
702 void md_rdev_clear(struct md_rdev *rdev) in md_rdev_clear() argument
704 if (rdev->sb_page) { in md_rdev_clear()
705 put_page(rdev->sb_page); in md_rdev_clear()
706 rdev->sb_loaded = 0; in md_rdev_clear()
707 rdev->sb_page = NULL; in md_rdev_clear()
708 rdev->sb_start = 0; in md_rdev_clear()
709 rdev->sectors = 0; in md_rdev_clear()
711 if (rdev->bb_page) { in md_rdev_clear()
712 put_page(rdev->bb_page); in md_rdev_clear()
713 rdev->bb_page = NULL; in md_rdev_clear()
715 kfree(rdev->badblocks.page); in md_rdev_clear()
716 rdev->badblocks.page = NULL; in md_rdev_clear()
722 struct md_rdev *rdev = bio->bi_private; in super_written() local
723 struct mddev *mddev = rdev->mddev; in super_written()
727 md_error(mddev, rdev); in super_written()
735 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, in md_super_write() argument
746 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; in md_super_write()
749 bio->bi_private = rdev; in md_super_write()
762 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, in sync_page_io() argument
765 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); in sync_page_io()
768 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? in sync_page_io()
769 rdev->meta_bdev : rdev->bdev; in sync_page_io()
771 bio->bi_iter.bi_sector = sector + rdev->sb_start; in sync_page_io()
772 else if (rdev->mddev->reshape_position != MaxSector && in sync_page_io()
773 (rdev->mddev->reshape_backwards == in sync_page_io()
774 (sector >= rdev->mddev->reshape_position))) in sync_page_io()
775 bio->bi_iter.bi_sector = sector + rdev->new_data_offset; in sync_page_io()
777 bio->bi_iter.bi_sector = sector + rdev->data_offset; in sync_page_io()
787 static int read_disk_sb(struct md_rdev *rdev, int size) in read_disk_sb() argument
791 if (rdev->sb_loaded) in read_disk_sb()
794 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true)) in read_disk_sb()
796 rdev->sb_loaded = 1; in read_disk_sb()
801 bdevname(rdev->bdev,b)); in read_disk_sb()
912 int (*load_super)(struct md_rdev *rdev,
916 struct md_rdev *rdev);
918 struct md_rdev *rdev);
919 unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
921 int (*allow_new_offset)(struct md_rdev *rdev,
946 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) in super_90_load() argument
958 rdev->sb_start = calc_dev_sboffset(rdev); in super_90_load()
960 ret = read_disk_sb(rdev, MD_SB_BYTES); in super_90_load()
965 bdevname(rdev->bdev, b); in super_90_load()
966 sb = page_address(rdev->sb_page); in super_90_load()
992 rdev->preferred_minor = sb->md_minor; in super_90_load()
993 rdev->data_offset = 0; in super_90_load()
994 rdev->new_data_offset = 0; in super_90_load()
995 rdev->sb_size = MD_SB_BYTES; in super_90_load()
996 rdev->badblocks.shift = -1; in super_90_load()
999 rdev->desc_nr = -1; in super_90_load()
1001 rdev->desc_nr = sb->this_disk.number; in super_90_load()
1026 rdev->sectors = rdev->sb_start; in super_90_load()
1031 if (rdev->sectors >= (2ULL << 32) && sb->level >= 1) in super_90_load()
1032 rdev->sectors = (2ULL << 32) - 2; in super_90_load()
1034 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) in super_90_load()
1045 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) in super_90_validate() argument
1048 mdp_super_t *sb = page_address(rdev->sb_page); in super_90_validate()
1051 rdev->raid_disk = -1; in super_90_validate()
1052 clear_bit(Faulty, &rdev->flags); in super_90_validate()
1053 clear_bit(In_sync, &rdev->flags); in super_90_validate()
1054 clear_bit(Bitmap_sync, &rdev->flags); in super_90_validate()
1055 clear_bit(WriteMostly, &rdev->flags); in super_90_validate()
1123 if (sb->disks[rdev->desc_nr].state & ( in super_90_validate()
1134 set_bit(Bitmap_sync, &rdev->flags); in super_90_validate()
1142 desc = sb->disks + rdev->desc_nr; in super_90_validate()
1145 set_bit(Faulty, &rdev->flags); in super_90_validate()
1148 set_bit(In_sync, &rdev->flags); in super_90_validate()
1149 rdev->raid_disk = desc->raid_disk; in super_90_validate()
1150 rdev->saved_raid_disk = desc->raid_disk; in super_90_validate()
1156 rdev->recovery_offset = 0; in super_90_validate()
1157 rdev->raid_disk = desc->raid_disk; in super_90_validate()
1161 set_bit(WriteMostly, &rdev->flags); in super_90_validate()
1163 set_bit(In_sync, &rdev->flags); in super_90_validate()
1170 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) in super_90_sync() argument
1189 rdev->sb_size = MD_SB_BYTES; in super_90_sync()
1191 sb = page_address(rdev->sb_page); in super_90_sync()
1305 sb->this_disk = sb->disks[rdev->desc_nr]; in super_90_sync()
1313 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) in super_90_rdev_size_change() argument
1315 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_90_rdev_size_change()
1317 if (rdev->mddev->bitmap_info.offset) in super_90_rdev_size_change()
1319 rdev->sb_start = calc_dev_sboffset(rdev); in super_90_rdev_size_change()
1320 if (!num_sectors || num_sectors > rdev->sb_start) in super_90_rdev_size_change()
1321 num_sectors = rdev->sb_start; in super_90_rdev_size_change()
1325 if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) in super_90_rdev_size_change()
1327 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_90_rdev_size_change()
1328 rdev->sb_page); in super_90_rdev_size_change()
1329 md_super_wait(rdev->mddev); in super_90_rdev_size_change()
1334 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) in super_90_allow_new_offset() argument
1368 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) in super_1_load() argument
1387 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; in super_1_load()
1400 rdev->sb_start = sb_start; in super_1_load()
1405 ret = read_disk_sb(rdev, 4096); in super_1_load()
1408 sb = page_address(rdev->sb_page); in super_1_load()
1413 le64_to_cpu(sb->super_offset) != rdev->sb_start || in super_1_load()
1419 bdevname(rdev->bdev,b)); in super_1_load()
1424 bdevname(rdev->bdev,b)); in super_1_load()
1433 rdev->preferred_minor = 0xffff; in super_1_load()
1434 rdev->data_offset = le64_to_cpu(sb->data_offset); in super_1_load()
1435 rdev->new_data_offset = rdev->data_offset; in super_1_load()
1438 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); in super_1_load()
1439 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); in super_1_load()
1441 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; in super_1_load()
1442 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; in super_1_load()
1443 if (rdev->sb_size & bmask) in super_1_load()
1444 rdev->sb_size = (rdev->sb_size | bmask) + 1; in super_1_load()
1447 && rdev->data_offset < sb_start + (rdev->sb_size/512)) in super_1_load()
1450 && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) in super_1_load()
1454 rdev->desc_nr = -1; in super_1_load()
1456 rdev->desc_nr = le32_to_cpu(sb->dev_number); in super_1_load()
1458 if (!rdev->bb_page) { in super_1_load()
1459 rdev->bb_page = alloc_page(GFP_KERNEL); in super_1_load()
1460 if (!rdev->bb_page) in super_1_load()
1464 rdev->badblocks.count == 0) { in super_1_load()
1479 if (!sync_page_io(rdev, bb_sector, sectors << 9, in super_1_load()
1480 rdev->bb_page, READ, true)) in super_1_load()
1482 bbp = (u64 *)page_address(rdev->bb_page); in super_1_load()
1483 rdev->badblocks.shift = sb->bblog_shift; in super_1_load()
1492 if (md_set_badblocks(&rdev->badblocks, in super_1_load()
1497 rdev->badblocks.shift = 0; in super_1_load()
1511 bdevname(rdev->bdev,b), in super_1_load()
1524 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); in super_1_load()
1525 sectors -= rdev->data_offset; in super_1_load()
1527 sectors = rdev->sb_start; in super_1_load()
1530 rdev->sectors = le64_to_cpu(sb->data_size); in super_1_load()
1534 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) in super_1_validate() argument
1536 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); in super_1_validate()
1539 rdev->raid_disk = -1; in super_1_validate()
1540 clear_bit(Faulty, &rdev->flags); in super_1_validate()
1541 clear_bit(In_sync, &rdev->flags); in super_1_validate()
1542 clear_bit(Bitmap_sync, &rdev->flags); in super_1_validate()
1543 clear_bit(WriteMostly, &rdev->flags); in super_1_validate()
1614 if (rdev->desc_nr >= 0 && in super_1_validate()
1615 rdev->desc_nr < le32_to_cpu(sb->max_dev) && in super_1_validate()
1616 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || in super_1_validate()
1617 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)) in super_1_validate()
1627 set_bit(Bitmap_sync, &rdev->flags); in super_1_validate()
1635 if (rdev->desc_nr < 0 || in super_1_validate()
1636 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { in super_1_validate()
1638 rdev->desc_nr = -1; in super_1_validate()
1640 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); in super_1_validate()
1645 set_bit(Faulty, &rdev->flags); in super_1_validate()
1654 set_bit(Journal, &rdev->flags); in super_1_validate()
1655 rdev->journal_tail = le64_to_cpu(sb->journal_tail); in super_1_validate()
1658 rdev->raid_disk = 0; in super_1_validate()
1661 rdev->saved_raid_disk = role; in super_1_validate()
1664 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); in super_1_validate()
1667 rdev->saved_raid_disk = -1; in super_1_validate()
1669 set_bit(In_sync, &rdev->flags); in super_1_validate()
1670 rdev->raid_disk = role; in super_1_validate()
1674 set_bit(WriteMostly, &rdev->flags); in super_1_validate()
1676 set_bit(Replacement, &rdev->flags); in super_1_validate()
1680 set_bit(In_sync, &rdev->flags); in super_1_validate()
1685 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) in super_1_sync() argument
1692 sb = page_address(rdev->sb_page); in super_1_sync()
1708 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); in super_1_sync()
1716 if (test_bit(WriteMostly, &rdev->flags)) in super_1_sync()
1720 sb->data_offset = cpu_to_le64(rdev->data_offset); in super_1_sync()
1721 sb->data_size = cpu_to_le64(rdev->sectors); in super_1_sync()
1728 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) && in super_1_sync()
1729 !test_bit(In_sync, &rdev->flags)) { in super_1_sync()
1733 cpu_to_le64(rdev->recovery_offset); in super_1_sync()
1734 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) in super_1_sync()
1739 if (test_bit(Journal, &rdev->flags)) in super_1_sync()
1740 sb->journal_tail = cpu_to_le64(rdev->journal_tail); in super_1_sync()
1741 if (test_bit(Replacement, &rdev->flags)) in super_1_sync()
1756 if (rdev->new_data_offset != rdev->data_offset) { in super_1_sync()
1759 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset in super_1_sync()
1760 - rdev->data_offset)); in super_1_sync()
1767 if (rdev->badblocks.count == 0) in super_1_sync()
1771 md_error(mddev, rdev); in super_1_sync()
1773 struct badblocks *bb = &rdev->badblocks; in super_1_sync()
1774 u64 *bbp = (u64 *)page_address(rdev->bb_page); in super_1_sync()
1795 bb->sector = (rdev->sb_start + in super_1_sync()
1809 rdev->sb_size = max_dev * 2 + 256; in super_1_sync()
1810 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; in super_1_sync()
1811 if (rdev->sb_size & bmask) in super_1_sync()
1812 rdev->sb_size = (rdev->sb_size | bmask) + 1; in super_1_sync()
1840 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) in super_1_rdev_size_change() argument
1844 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_1_rdev_size_change()
1846 if (rdev->data_offset != rdev->new_data_offset) in super_1_rdev_size_change()
1848 if (rdev->sb_start < rdev->data_offset) { in super_1_rdev_size_change()
1850 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; in super_1_rdev_size_change()
1851 max_sectors -= rdev->data_offset; in super_1_rdev_size_change()
1854 } else if (rdev->mddev->bitmap_info.offset) { in super_1_rdev_size_change()
1860 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; in super_1_rdev_size_change()
1862 max_sectors = rdev->sectors + sb_start - rdev->sb_start; in super_1_rdev_size_change()
1865 rdev->sb_start = sb_start; in super_1_rdev_size_change()
1867 sb = page_address(rdev->sb_page); in super_1_rdev_size_change()
1869 sb->super_offset = rdev->sb_start; in super_1_rdev_size_change()
1871 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_1_rdev_size_change()
1872 rdev->sb_page); in super_1_rdev_size_change()
1873 md_super_wait(rdev->mddev); in super_1_rdev_size_change()
1879 super_1_allow_new_offset(struct md_rdev *rdev, in super_1_allow_new_offset() argument
1884 if (new_offset >= rdev->data_offset) in super_1_allow_new_offset()
1889 if (rdev->mddev->minor_version == 0) in super_1_allow_new_offset()
1898 if (rdev->sb_start + (32+4)*2 > new_offset) in super_1_allow_new_offset()
1900 bitmap = rdev->mddev->bitmap; in super_1_allow_new_offset()
1901 if (bitmap && !rdev->mddev->bitmap_info.file && in super_1_allow_new_offset()
1902 rdev->sb_start + rdev->mddev->bitmap_info.offset + in super_1_allow_new_offset()
1905 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) in super_1_allow_new_offset()
1932 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) in sync_super() argument
1935 mddev->sync_super(mddev, rdev); in sync_super()
1941 super_types[mddev->major_version].sync_super(mddev, rdev); in sync_super()
1946 struct md_rdev *rdev, *rdev2; in match_mddev_units() local
1949 rdev_for_each_rcu(rdev, mddev1) { in match_mddev_units()
1950 if (test_bit(Faulty, &rdev->flags) || in match_mddev_units()
1951 test_bit(Journal, &rdev->flags) || in match_mddev_units()
1952 rdev->raid_disk == -1) in match_mddev_units()
1959 if (rdev->bdev->bd_contains == in match_mddev_units()
1981 struct md_rdev *rdev, *reference = NULL; in md_integrity_register() local
1987 rdev_for_each(rdev, mddev) { in md_integrity_register()
1989 if (test_bit(Faulty, &rdev->flags)) in md_integrity_register()
1991 if (rdev->raid_disk < 0) in md_integrity_register()
1995 reference = rdev; in md_integrity_register()
2000 rdev->bdev->bd_disk) < 0) in md_integrity_register()
2026 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_integrity_add_rdev() argument
2035 bi_rdev = bdev_get_integrity(rdev->bdev); in md_integrity_add_rdev()
2041 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { in md_integrity_add_rdev()
2043 mdname(mddev), bdevname(rdev->bdev, name)); in md_integrity_add_rdev()
2051 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) in bind_rdev_to_array() argument
2058 if (find_rdev(mddev, rdev->bdev->bd_dev)) in bind_rdev_to_array()
2062 if (rdev->sectors && (mddev->dev_sectors == 0 || in bind_rdev_to_array()
2063 rdev->sectors < mddev->dev_sectors)) { in bind_rdev_to_array()
2072 mddev->dev_sectors = rdev->sectors; in bind_rdev_to_array()
2080 if (rdev->desc_nr < 0) { in bind_rdev_to_array()
2086 rdev->desc_nr = choice; in bind_rdev_to_array()
2088 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { in bind_rdev_to_array()
2094 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { in bind_rdev_to_array()
2099 bdevname(rdev->bdev,b); in bind_rdev_to_array()
2102 rdev->mddev = mddev; in bind_rdev_to_array()
2105 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) in bind_rdev_to_array()
2108 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; in bind_rdev_to_array()
2109 if (sysfs_create_link(&rdev->kobj, ko, "block")) in bind_rdev_to_array()
2111 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); in bind_rdev_to_array()
2113 list_add_rcu(&rdev->same_set, &mddev->disks); in bind_rdev_to_array()
2114 bd_link_disk_holder(rdev->bdev, mddev->gendisk); in bind_rdev_to_array()
2129 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); in md_delayed_delete() local
2130 kobject_del(&rdev->kobj); in md_delayed_delete()
2131 kobject_put(&rdev->kobj); in md_delayed_delete()
2134 static void unbind_rdev_from_array(struct md_rdev *rdev) in unbind_rdev_from_array() argument
2138 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); in unbind_rdev_from_array()
2139 list_del_rcu(&rdev->same_set); in unbind_rdev_from_array()
2140 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); in unbind_rdev_from_array()
2141 rdev->mddev = NULL; in unbind_rdev_from_array()
2142 sysfs_remove_link(&rdev->kobj, "block"); in unbind_rdev_from_array()
2143 sysfs_put(rdev->sysfs_state); in unbind_rdev_from_array()
2144 rdev->sysfs_state = NULL; in unbind_rdev_from_array()
2145 rdev->badblocks.count = 0; in unbind_rdev_from_array()
2151 INIT_WORK(&rdev->del_work, md_delayed_delete); in unbind_rdev_from_array()
2152 kobject_get(&rdev->kobj); in unbind_rdev_from_array()
2153 queue_work(md_misc_wq, &rdev->del_work); in unbind_rdev_from_array()
2161 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) in lock_rdev() argument
2168 shared ? (struct md_rdev *)lock_rdev : rdev); in lock_rdev()
2174 rdev->bdev = bdev; in lock_rdev()
2178 static void unlock_rdev(struct md_rdev *rdev) in unlock_rdev() argument
2180 struct block_device *bdev = rdev->bdev; in unlock_rdev()
2181 rdev->bdev = NULL; in unlock_rdev()
2187 static void export_rdev(struct md_rdev *rdev) in export_rdev() argument
2192 bdevname(rdev->bdev,b)); in export_rdev()
2193 md_rdev_clear(rdev); in export_rdev()
2195 if (test_bit(AutoDetected, &rdev->flags)) in export_rdev()
2196 md_autodetect_dev(rdev->bdev->bd_dev); in export_rdev()
2198 unlock_rdev(rdev); in export_rdev()
2199 kobject_put(&rdev->kobj); in export_rdev()
2202 void md_kick_rdev_from_array(struct md_rdev *rdev) in md_kick_rdev_from_array() argument
2204 unbind_rdev_from_array(rdev); in md_kick_rdev_from_array()
2205 export_rdev(rdev); in md_kick_rdev_from_array()
2211 struct md_rdev *rdev; in export_array() local
2214 rdev = list_first_entry(&mddev->disks, struct md_rdev, in export_array()
2216 md_kick_rdev_from_array(rdev); in export_array()
2230 struct md_rdev *rdev; in sync_sbs() local
2231 rdev_for_each(rdev, mddev) { in sync_sbs()
2232 if (rdev->sb_events == mddev->events || in sync_sbs()
2234 rdev->raid_disk < 0 && in sync_sbs()
2235 rdev->sb_events+1 == mddev->events)) { in sync_sbs()
2237 rdev->sb_loaded = 2; in sync_sbs()
2239 sync_super(mddev, rdev); in sync_sbs()
2240 rdev->sb_loaded = 1; in sync_sbs()
2247 struct md_rdev *rdev; in does_sb_need_changing() local
2252 rdev_for_each(rdev, mddev) in does_sb_need_changing()
2253 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags)) in does_sb_need_changing()
2257 if (!rdev) in does_sb_need_changing()
2260 sb = page_address(rdev->sb_page); in does_sb_need_changing()
2262 rdev_for_each(rdev, mddev) { in does_sb_need_changing()
2263 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); in does_sb_need_changing()
2265 if (role == 0xffff && rdev->raid_disk >=0 && in does_sb_need_changing()
2266 !test_bit(Faulty, &rdev->flags)) in does_sb_need_changing()
2269 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd)) in does_sb_need_changing()
2286 struct md_rdev *rdev; in md_update_sb() local
2312 rdev_for_each(rdev, mddev) { in md_update_sb()
2313 if (rdev->raid_disk >= 0 && in md_update_sb()
2315 !test_bit(Journal, &rdev->flags) && in md_update_sb()
2316 !test_bit(In_sync, &rdev->flags) && in md_update_sb()
2317 mddev->curr_resync_completed > rdev->recovery_offset) in md_update_sb()
2318 rdev->recovery_offset = mddev->curr_resync_completed; in md_update_sb()
2326 rdev_for_each(rdev, mddev) { in md_update_sb()
2327 if (rdev->badblocks.changed) { in md_update_sb()
2328 rdev->badblocks.changed = 0; in md_update_sb()
2329 md_ack_all_badblocks(&rdev->badblocks); in md_update_sb()
2330 md_error(mddev, rdev); in md_update_sb()
2332 clear_bit(Blocked, &rdev->flags); in md_update_sb()
2333 clear_bit(BlockedBadBlocks, &rdev->flags); in md_update_sb()
2334 wake_up(&rdev->blocked_wait); in md_update_sb()
2390 rdev_for_each(rdev, mddev) { in md_update_sb()
2391 if (rdev->badblocks.changed) in md_update_sb()
2393 if (test_bit(Faulty, &rdev->flags)) in md_update_sb()
2394 set_bit(FaultRecorded, &rdev->flags); in md_update_sb()
2404 rdev_for_each(rdev, mddev) { in md_update_sb()
2407 if (rdev->sb_loaded != 1) in md_update_sb()
2410 if (!test_bit(Faulty, &rdev->flags)) { in md_update_sb()
2411 md_super_write(mddev,rdev, in md_update_sb()
2412 rdev->sb_start, rdev->sb_size, in md_update_sb()
2413 rdev->sb_page); in md_update_sb()
2415 bdevname(rdev->bdev, b), in md_update_sb()
2416 (unsigned long long)rdev->sb_start); in md_update_sb()
2417 rdev->sb_events = mddev->events; in md_update_sb()
2418 if (rdev->badblocks.size) { in md_update_sb()
2419 md_super_write(mddev, rdev, in md_update_sb()
2420 rdev->badblocks.sector, in md_update_sb()
2421 rdev->badblocks.size << 9, in md_update_sb()
2422 rdev->bb_page); in md_update_sb()
2423 rdev->badblocks.size = 0; in md_update_sb()
2428 bdevname(rdev->bdev, b)); in md_update_sb()
2450 rdev_for_each(rdev, mddev) { in md_update_sb()
2451 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) in md_update_sb()
2452 clear_bit(Blocked, &rdev->flags); in md_update_sb()
2455 md_ack_all_badblocks(&rdev->badblocks); in md_update_sb()
2456 clear_bit(BlockedBadBlocks, &rdev->flags); in md_update_sb()
2457 wake_up(&rdev->blocked_wait); in md_update_sb()
2465 static int add_bound_rdev(struct md_rdev *rdev) in add_bound_rdev() argument
2467 struct mddev *mddev = rdev->mddev; in add_bound_rdev()
2476 validate_super(mddev, rdev); in add_bound_rdev()
2477 err = mddev->pers->hot_add_disk(mddev, rdev); in add_bound_rdev()
2479 unbind_rdev_from_array(rdev); in add_bound_rdev()
2480 export_rdev(rdev); in add_bound_rdev()
2484 sysfs_notify_dirent_safe(rdev->sysfs_state); in add_bound_rdev()
2522 state_show(struct md_rdev *rdev, char *page) in state_show() argument
2526 unsigned long flags = ACCESS_ONCE(rdev->flags); in state_show()
2529 rdev->badblocks.unacked_exist) { in state_show()
2546 (rdev->badblocks.unacked_exist in state_show()
2574 state_store(struct md_rdev *rdev, const char *buf, size_t len) in state_store() argument
2590 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { in state_store()
2591 md_error(rdev->mddev, rdev); in state_store()
2592 if (test_bit(Faulty, &rdev->flags)) in state_store()
2597 if (rdev->raid_disk >= 0) in state_store()
2600 struct mddev *mddev = rdev->mddev; in state_store()
2603 err = md_cluster_ops->remove_disk(mddev, rdev); in state_store()
2606 md_kick_rdev_from_array(rdev); in state_store()
2613 set_bit(WriteMostly, &rdev->flags); in state_store()
2616 clear_bit(WriteMostly, &rdev->flags); in state_store()
2619 set_bit(Blocked, &rdev->flags); in state_store()
2622 if (!test_bit(Faulty, &rdev->flags) && in state_store()
2623 rdev->badblocks.unacked_exist) { in state_store()
2627 md_error(rdev->mddev, rdev); in state_store()
2629 clear_bit(Blocked, &rdev->flags); in state_store()
2630 clear_bit(BlockedBadBlocks, &rdev->flags); in state_store()
2631 wake_up(&rdev->blocked_wait); in state_store()
2632 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
2633 md_wakeup_thread(rdev->mddev->thread); in state_store()
2636 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { in state_store()
2637 set_bit(In_sync, &rdev->flags); in state_store()
2639 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 && in state_store()
2640 !test_bit(Journal, &rdev->flags)) { in state_store()
2641 if (rdev->mddev->pers == NULL) { in state_store()
2642 clear_bit(In_sync, &rdev->flags); in state_store()
2643 rdev->saved_raid_disk = rdev->raid_disk; in state_store()
2644 rdev->raid_disk = -1; in state_store()
2648 set_bit(WriteErrorSeen, &rdev->flags); in state_store()
2651 clear_bit(WriteErrorSeen, &rdev->flags); in state_store()
2658 if (rdev->raid_disk >= 0 && in state_store()
2659 !test_bit(Journal, &rdev->flags) && in state_store()
2660 !test_bit(Replacement, &rdev->flags)) in state_store()
2661 set_bit(WantReplacement, &rdev->flags); in state_store()
2662 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
2663 md_wakeup_thread(rdev->mddev->thread); in state_store()
2670 clear_bit(WantReplacement, &rdev->flags); in state_store()
2676 if (rdev->mddev->pers) in state_store()
2679 set_bit(Replacement, &rdev->flags); in state_store()
2684 if (rdev->mddev->pers) in state_store()
2687 clear_bit(Replacement, &rdev->flags); in state_store()
2691 if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) { in state_store()
2698 if (!mddev_is_clustered(rdev->mddev) || in state_store()
2699 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) { in state_store()
2700 clear_bit(Faulty, &rdev->flags); in state_store()
2701 err = add_bound_rdev(rdev); in state_store()
2707 sysfs_notify_dirent_safe(rdev->sysfs_state); in state_store()
2714 errors_show(struct md_rdev *rdev, char *page) in errors_show() argument
2716 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); in errors_show()
2720 errors_store(struct md_rdev *rdev, const char *buf, size_t len) in errors_store() argument
2728 atomic_set(&rdev->corrected_errors, n); in errors_store()
2735 slot_show(struct md_rdev *rdev, char *page) in slot_show() argument
2737 if (test_bit(Journal, &rdev->flags)) in slot_show()
2739 else if (rdev->raid_disk < 0) in slot_show()
2742 return sprintf(page, "%d\n", rdev->raid_disk); in slot_show()
2746 slot_store(struct md_rdev *rdev, const char *buf, size_t len) in slot_store() argument
2751 if (test_bit(Journal, &rdev->flags)) in slot_store()
2760 if (rdev->mddev->pers && slot == -1) { in slot_store()
2768 if (rdev->raid_disk == -1) in slot_store()
2771 if (rdev->mddev->pers->hot_remove_disk == NULL) in slot_store()
2773 clear_bit(Blocked, &rdev->flags); in slot_store()
2774 remove_and_add_spares(rdev->mddev, rdev); in slot_store()
2775 if (rdev->raid_disk >= 0) in slot_store()
2777 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in slot_store()
2778 md_wakeup_thread(rdev->mddev->thread); in slot_store()
2779 } else if (rdev->mddev->pers) { in slot_store()
2785 if (rdev->raid_disk != -1) in slot_store()
2788 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) in slot_store()
2791 if (rdev->mddev->pers->hot_add_disk == NULL) in slot_store()
2794 if (slot >= rdev->mddev->raid_disks && in slot_store()
2795 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
2798 rdev->raid_disk = slot; in slot_store()
2799 if (test_bit(In_sync, &rdev->flags)) in slot_store()
2800 rdev->saved_raid_disk = slot; in slot_store()
2802 rdev->saved_raid_disk = -1; in slot_store()
2803 clear_bit(In_sync, &rdev->flags); in slot_store()
2804 clear_bit(Bitmap_sync, &rdev->flags); in slot_store()
2805 err = rdev->mddev->pers-> in slot_store()
2806 hot_add_disk(rdev->mddev, rdev); in slot_store()
2808 rdev->raid_disk = -1; in slot_store()
2811 sysfs_notify_dirent_safe(rdev->sysfs_state); in slot_store()
2812 if (sysfs_link_rdev(rdev->mddev, rdev)) in slot_store()
2816 if (slot >= rdev->mddev->raid_disks && in slot_store()
2817 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
2819 rdev->raid_disk = slot; in slot_store()
2821 clear_bit(Faulty, &rdev->flags); in slot_store()
2822 clear_bit(WriteMostly, &rdev->flags); in slot_store()
2823 set_bit(In_sync, &rdev->flags); in slot_store()
2824 sysfs_notify_dirent_safe(rdev->sysfs_state); in slot_store()
2833 offset_show(struct md_rdev *rdev, char *page) in offset_show() argument
2835 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); in offset_show()
2839 offset_store(struct md_rdev *rdev, const char *buf, size_t len) in offset_store() argument
2844 if (rdev->mddev->pers && rdev->raid_disk >= 0) in offset_store()
2846 if (rdev->sectors && rdev->mddev->external) in offset_store()
2850 rdev->data_offset = offset; in offset_store()
2851 rdev->new_data_offset = offset; in offset_store()
2858 static ssize_t new_offset_show(struct md_rdev *rdev, char *page) in new_offset_show() argument
2861 (unsigned long long)rdev->new_data_offset); in new_offset_show()
2864 static ssize_t new_offset_store(struct md_rdev *rdev, in new_offset_store() argument
2868 struct mddev *mddev = rdev->mddev; in new_offset_store()
2876 if (new_offset == rdev->data_offset) in new_offset_store()
2879 else if (new_offset > rdev->data_offset) { in new_offset_store()
2881 if (new_offset - rdev->data_offset in new_offset_store()
2882 + mddev->dev_sectors > rdev->sectors) in new_offset_store()
2890 if (new_offset < rdev->data_offset && in new_offset_store()
2897 if (new_offset > rdev->data_offset && in new_offset_store()
2903 .allow_new_offset(rdev, new_offset)) in new_offset_store()
2905 rdev->new_data_offset = new_offset; in new_offset_store()
2906 if (new_offset > rdev->data_offset) in new_offset_store()
2908 else if (new_offset < rdev->data_offset) in new_offset_store()
2917 rdev_size_show(struct md_rdev *rdev, char *page) in rdev_size_show() argument
2919 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); in rdev_size_show()
2952 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) in rdev_size_store() argument
2954 struct mddev *my_mddev = rdev->mddev; in rdev_size_store()
2955 sector_t oldsectors = rdev->sectors; in rdev_size_store()
2958 if (test_bit(Journal, &rdev->flags)) in rdev_size_store()
2962 if (rdev->data_offset != rdev->new_data_offset) in rdev_size_store()
2964 if (my_mddev->pers && rdev->raid_disk >= 0) { in rdev_size_store()
2967 rdev_size_change(rdev, sectors); in rdev_size_store()
2971 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - in rdev_size_store()
2972 rdev->data_offset; in rdev_size_store()
2980 rdev->sectors = sectors; in rdev_size_store()
2997 if (rdev->bdev == rdev2->bdev && in rdev_size_store()
2998 rdev != rdev2 && in rdev_size_store()
2999 overlaps(rdev->data_offset, rdev->sectors, in rdev_size_store()
3018 rdev->sectors = oldsectors; in rdev_size_store()
3028 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) in recovery_start_show() argument
3030 unsigned long long recovery_start = rdev->recovery_offset; in recovery_start_show()
3032 if (test_bit(In_sync, &rdev->flags) || in recovery_start_show()
3039 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) in recovery_start_store() argument
3048 if (rdev->mddev->pers && in recovery_start_store()
3049 rdev->raid_disk >= 0) in recovery_start_store()
3052 rdev->recovery_offset = recovery_start; in recovery_start_store()
3054 set_bit(In_sync, &rdev->flags); in recovery_start_store()
3056 clear_bit(In_sync, &rdev->flags); in recovery_start_store()
3068 static ssize_t bb_show(struct md_rdev *rdev, char *page) in bb_show() argument
3070 return badblocks_show(&rdev->badblocks, page, 0); in bb_show()
3072 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) in bb_store() argument
3074 int rv = badblocks_store(&rdev->badblocks, page, len, 0); in bb_store()
3076 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) in bb_store()
3077 wake_up(&rdev->blocked_wait); in bb_store()
3083 static ssize_t ubb_show(struct md_rdev *rdev, char *page) in ubb_show() argument
3085 return badblocks_show(&rdev->badblocks, page, 1); in ubb_show()
3087 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) in ubb_store() argument
3089 return badblocks_store(&rdev->badblocks, page, len, 1); in ubb_store()
3110 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); in rdev_attr_show() local
3114 if (!rdev->mddev) in rdev_attr_show()
3116 return entry->show(rdev, page); in rdev_attr_show()
3124 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); in rdev_attr_store() local
3126 struct mddev *mddev = rdev->mddev; in rdev_attr_store()
3134 if (rdev->mddev == NULL) in rdev_attr_store()
3137 rv = entry->store(rdev, page, length); in rdev_attr_store()
3145 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); in rdev_free() local
3146 kfree(rdev); in rdev_free()
3158 int md_rdev_init(struct md_rdev *rdev) in md_rdev_init() argument
3160 rdev->desc_nr = -1; in md_rdev_init()
3161 rdev->saved_raid_disk = -1; in md_rdev_init()
3162 rdev->raid_disk = -1; in md_rdev_init()
3163 rdev->flags = 0; in md_rdev_init()
3164 rdev->data_offset = 0; in md_rdev_init()
3165 rdev->new_data_offset = 0; in md_rdev_init()
3166 rdev->sb_events = 0; in md_rdev_init()
3167 rdev->last_read_error.tv_sec = 0; in md_rdev_init()
3168 rdev->last_read_error.tv_nsec = 0; in md_rdev_init()
3169 rdev->sb_loaded = 0; in md_rdev_init()
3170 rdev->bb_page = NULL; in md_rdev_init()
3171 atomic_set(&rdev->nr_pending, 0); in md_rdev_init()
3172 atomic_set(&rdev->read_errors, 0); in md_rdev_init()
3173 atomic_set(&rdev->corrected_errors, 0); in md_rdev_init()
3175 INIT_LIST_HEAD(&rdev->same_set); in md_rdev_init()
3176 init_waitqueue_head(&rdev->blocked_wait); in md_rdev_init()
3182 rdev->badblocks.count = 0; in md_rdev_init()
3183 rdev->badblocks.shift = -1; /* disabled until explicitly enabled */ in md_rdev_init()
3184 rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL); in md_rdev_init()
3185 seqlock_init(&rdev->badblocks.lock); in md_rdev_init()
3186 if (rdev->badblocks.page == NULL) in md_rdev_init()
3206 struct md_rdev *rdev; in md_import_device() local
3209 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); in md_import_device()
3210 if (!rdev) { in md_import_device()
3215 err = md_rdev_init(rdev); in md_import_device()
3218 err = alloc_disk_sb(rdev); in md_import_device()
3222 err = lock_rdev(rdev, newdev, super_format == -2); in md_import_device()
3226 kobject_init(&rdev->kobj, &rdev_ktype); in md_import_device()
3228 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; in md_import_device()
3232 bdevname(rdev->bdev,b)); in md_import_device()
3239 load_super(rdev, NULL, super_minor); in md_import_device()
3244 bdevname(rdev->bdev,b), in md_import_device()
3251 bdevname(rdev->bdev,b)); in md_import_device()
3256 return rdev; in md_import_device()
3259 if (rdev->bdev) in md_import_device()
3260 unlock_rdev(rdev); in md_import_device()
3261 md_rdev_clear(rdev); in md_import_device()
3262 kfree(rdev); in md_import_device()
3273 struct md_rdev *rdev, *freshest, *tmp; in analyze_sbs() local
3277 rdev_for_each_safe(rdev, tmp, mddev) in analyze_sbs()
3279 load_super(rdev, freshest, mddev->minor_version)) { in analyze_sbs()
3281 freshest = rdev; in analyze_sbs()
3289 bdevname(rdev->bdev,b)); in analyze_sbs()
3290 md_kick_rdev_from_array(rdev); in analyze_sbs()
3297 rdev_for_each_safe(rdev, tmp, mddev) { in analyze_sbs()
3299 (rdev->desc_nr >= mddev->max_disks || in analyze_sbs()
3303 mdname(mddev), bdevname(rdev->bdev, b), in analyze_sbs()
3305 md_kick_rdev_from_array(rdev); in analyze_sbs()
3308 if (rdev != freshest) { in analyze_sbs()
3310 validate_super(mddev, rdev)) { in analyze_sbs()
3313 bdevname(rdev->bdev,b)); in analyze_sbs()
3314 md_kick_rdev_from_array(rdev); in analyze_sbs()
3319 rdev->desc_nr = i++; in analyze_sbs()
3320 rdev->raid_disk = rdev->desc_nr; in analyze_sbs()
3321 set_bit(In_sync, &rdev->flags); in analyze_sbs()
3322 } else if (rdev->raid_disk >= in analyze_sbs()
3324 !test_bit(Journal, &rdev->flags)) { in analyze_sbs()
3325 rdev->raid_disk = -1; in analyze_sbs()
3326 clear_bit(In_sync, &rdev->flags); in analyze_sbs()
3434 struct md_rdev *rdev; in level_store() local
3510 rdev_for_each(rdev, mddev) in level_store()
3511 rdev->new_raid_disk = rdev->raid_disk; in level_store()
3581 rdev_for_each(rdev, mddev) { in level_store()
3582 if (rdev->raid_disk < 0) in level_store()
3584 if (rdev->new_raid_disk >= mddev->raid_disks) in level_store()
3585 rdev->new_raid_disk = -1; in level_store()
3586 if (rdev->new_raid_disk == rdev->raid_disk) in level_store()
3588 sysfs_unlink_rdev(mddev, rdev); in level_store()
3590 rdev_for_each(rdev, mddev) { in level_store()
3591 if (rdev->raid_disk < 0) in level_store()
3593 if (rdev->new_raid_disk == rdev->raid_disk) in level_store()
3595 rdev->raid_disk = rdev->new_raid_disk; in level_store()
3596 if (rdev->raid_disk < 0) in level_store()
3597 clear_bit(In_sync, &rdev->flags); in level_store()
3599 if (sysfs_link_rdev(mddev, rdev)) in level_store()
3602 rdev->raid_disk, mdname(mddev)); in level_store()
3706 struct md_rdev *rdev; in raid_disks_store() local
3710 rdev_for_each(rdev, mddev) { in raid_disks_store()
3712 rdev->data_offset < rdev->new_data_offset) in raid_disks_store()
3715 rdev->data_offset > rdev->new_data_offset) in raid_disks_store()
4079 struct md_rdev *rdev; in new_dev_store() local
4098 rdev = md_import_device(dev, mddev->major_version, in new_dev_store()
4100 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { in new_dev_store()
4105 .load_super(rdev, rdev0, mddev->minor_version); in new_dev_store()
4110 rdev = md_import_device(dev, -2, -1); in new_dev_store()
4112 rdev = md_import_device(dev, -1, -1); in new_dev_store()
4114 if (IS_ERR(rdev)) { in new_dev_store()
4116 return PTR_ERR(rdev); in new_dev_store()
4118 err = bind_rdev_to_array(rdev, mddev); in new_dev_store()
4121 export_rdev(rdev); in new_dev_store()
4725 struct md_rdev *rdev; in reshape_position_store() local
4746 rdev_for_each(rdev, mddev) in reshape_position_store()
4747 rdev->new_data_offset = rdev->data_offset; in reshape_position_store()
5121 struct md_rdev *rdev; in md_run() local
5153 rdev_for_each(rdev, mddev) { in md_run()
5154 if (test_bit(Faulty, &rdev->flags)) in md_run()
5156 sync_blockdev(rdev->bdev); in md_run()
5157 invalidate_bdev(rdev->bdev); in md_run()
5163 if (rdev->meta_bdev) { in md_run()
5165 } else if (rdev->data_offset < rdev->sb_start) { in md_run()
5167 rdev->data_offset + mddev->dev_sectors in md_run()
5168 > rdev->sb_start) { in md_run()
5174 if (rdev->sb_start + rdev->sb_size/512 in md_run()
5175 > rdev->data_offset) { in md_run()
5181 sysfs_notify_dirent_safe(rdev->sysfs_state); in md_run()
5221 rdev_for_each(rdev, mddev) in md_run()
5223 if (rdev < rdev2 && in md_run()
5224 rdev->bdev->bd_contains == in md_run()
5231 bdevname(rdev->bdev,b), in md_run()
5314 rdev_for_each(rdev, mddev) in md_run()
5315 if (rdev->raid_disk >= 0) in md_run()
5316 if (sysfs_link_rdev(mddev, rdev)) in md_run()
5376 struct md_rdev *rdev; in restart_array() local
5380 rdev_for_each_rcu(rdev, mddev) { in restart_array()
5381 if (test_bit(Journal, &rdev->flags) && in restart_array()
5382 !test_bit(Faulty, &rdev->flags)) { in restart_array()
5603 struct md_rdev *rdev; in do_md_stop() local
5650 rdev_for_each(rdev, mddev) in do_md_stop()
5651 if (rdev->raid_disk >= 0) in do_md_stop()
5652 sysfs_unlink_rdev(mddev, rdev); in do_md_stop()
5694 struct md_rdev *rdev; in autorun_array() local
5702 rdev_for_each(rdev, mddev) { in autorun_array()
5704 printk("<%s>", bdevname(rdev->bdev,b)); in autorun_array()
5729 struct md_rdev *rdev0, *rdev, *tmp; in autorun_devices() local
5744 rdev_for_each_list(rdev, tmp, &pending_raid_disks) in autorun_devices()
5745 if (super_90_load(rdev, rdev0, 0) >= 0) { in autorun_devices()
5747 bdevname(rdev->bdev,b)); in autorun_devices()
5748 list_move(&rdev->same_set, &candidates); in autorun_devices()
5790 rdev_for_each_list(rdev, tmp, &candidates) { in autorun_devices()
5791 list_del_init(&rdev->same_set); in autorun_devices()
5792 if (bind_rdev_to_array(rdev, mddev)) in autorun_devices()
5793 export_rdev(rdev); in autorun_devices()
5801 rdev_for_each_list(rdev, tmp, &candidates) { in autorun_devices()
5802 list_del_init(&rdev->same_set); in autorun_devices()
5803 export_rdev(rdev); in autorun_devices()
5829 struct md_rdev *rdev; in get_array_info() local
5833 rdev_for_each_rcu(rdev, mddev) { in get_array_info()
5835 if (test_bit(Faulty, &rdev->flags)) in get_array_info()
5839 if (test_bit(In_sync, &rdev->flags)) in get_array_info()
5917 struct md_rdev *rdev; in get_disk_info() local
5923 rdev = md_find_rdev_nr_rcu(mddev, info.number); in get_disk_info()
5924 if (rdev) { in get_disk_info()
5925 info.major = MAJOR(rdev->bdev->bd_dev); in get_disk_info()
5926 info.minor = MINOR(rdev->bdev->bd_dev); in get_disk_info()
5927 info.raid_disk = rdev->raid_disk; in get_disk_info()
5929 if (test_bit(Faulty, &rdev->flags)) in get_disk_info()
5931 else if (test_bit(In_sync, &rdev->flags)) { in get_disk_info()
5935 if (test_bit(Journal, &rdev->flags)) in get_disk_info()
5937 if (test_bit(WriteMostly, &rdev->flags)) in get_disk_info()
5955 struct md_rdev *rdev; in add_new_disk() local
5971 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); in add_new_disk()
5972 if (IS_ERR(rdev)) { in add_new_disk()
5975 PTR_ERR(rdev)); in add_new_disk()
5976 return PTR_ERR(rdev); in add_new_disk()
5983 .load_super(rdev, rdev0, mddev->minor_version); in add_new_disk()
5987 bdevname(rdev->bdev,b), in add_new_disk()
5989 export_rdev(rdev); in add_new_disk()
5993 err = bind_rdev_to_array(rdev, mddev); in add_new_disk()
5995 export_rdev(rdev); in add_new_disk()
6013 rdev = md_import_device(dev, mddev->major_version, in add_new_disk()
6016 rdev = md_import_device(dev, -1, -1); in add_new_disk()
6017 if (IS_ERR(rdev)) { in add_new_disk()
6020 PTR_ERR(rdev)); in add_new_disk()
6021 return PTR_ERR(rdev); in add_new_disk()
6027 rdev->raid_disk = info->raid_disk; in add_new_disk()
6028 set_bit(In_sync, &rdev->flags); in add_new_disk()
6029 clear_bit(Bitmap_sync, &rdev->flags); in add_new_disk()
6031 rdev->raid_disk = -1; in add_new_disk()
6032 rdev->saved_raid_disk = rdev->raid_disk; in add_new_disk()
6035 validate_super(mddev, rdev); in add_new_disk()
6037 rdev->raid_disk != info->raid_disk) { in add_new_disk()
6041 export_rdev(rdev); in add_new_disk()
6045 clear_bit(In_sync, &rdev->flags); /* just to be sure */ in add_new_disk()
6047 set_bit(WriteMostly, &rdev->flags); in add_new_disk()
6049 clear_bit(WriteMostly, &rdev->flags); in add_new_disk()
6052 set_bit(Journal, &rdev->flags); in add_new_disk()
6058 set_bit(Candidate, &rdev->flags); in add_new_disk()
6061 err = md_cluster_ops->add_new_disk(mddev, rdev); in add_new_disk()
6063 export_rdev(rdev); in add_new_disk()
6069 rdev->raid_disk = -1; in add_new_disk()
6070 err = bind_rdev_to_array(rdev, mddev); in add_new_disk()
6073 export_rdev(rdev); in add_new_disk()
6082 err = add_bound_rdev(rdev); in add_new_disk()
6086 err = add_bound_rdev(rdev); in add_new_disk()
6102 rdev = md_import_device(dev, -1, 0); in add_new_disk()
6103 if (IS_ERR(rdev)) { in add_new_disk()
6106 PTR_ERR(rdev)); in add_new_disk()
6107 return PTR_ERR(rdev); in add_new_disk()
6109 rdev->desc_nr = info->number; in add_new_disk()
6111 rdev->raid_disk = info->raid_disk; in add_new_disk()
6113 rdev->raid_disk = -1; in add_new_disk()
6115 if (rdev->raid_disk < mddev->raid_disks) in add_new_disk()
6117 set_bit(In_sync, &rdev->flags); in add_new_disk()
6120 set_bit(WriteMostly, &rdev->flags); in add_new_disk()
6124 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; in add_new_disk()
6126 rdev->sb_start = calc_dev_sboffset(rdev); in add_new_disk()
6127 rdev->sectors = rdev->sb_start; in add_new_disk()
6129 err = bind_rdev_to_array(rdev, mddev); in add_new_disk()
6131 export_rdev(rdev); in add_new_disk()
6142 struct md_rdev *rdev; in hot_remove_disk() local
6145 rdev = find_rdev(mddev, dev); in hot_remove_disk()
6146 if (!rdev) in hot_remove_disk()
6152 if (rdev->raid_disk < 0) in hot_remove_disk()
6155 clear_bit(Blocked, &rdev->flags); in hot_remove_disk()
6156 remove_and_add_spares(mddev, rdev); in hot_remove_disk()
6158 if (rdev->raid_disk >= 0) in hot_remove_disk()
6163 md_cluster_ops->remove_disk(mddev, rdev); in hot_remove_disk()
6165 md_kick_rdev_from_array(rdev); in hot_remove_disk()
6175 bdevname(rdev->bdev,b), mdname(mddev)); in hot_remove_disk()
6183 struct md_rdev *rdev; in hot_add_disk() local
6201 rdev = md_import_device(dev, -1, 0); in hot_add_disk()
6202 if (IS_ERR(rdev)) { in hot_add_disk()
6205 PTR_ERR(rdev)); in hot_add_disk()
6210 rdev->sb_start = calc_dev_sboffset(rdev); in hot_add_disk()
6212 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; in hot_add_disk()
6214 rdev->sectors = rdev->sb_start; in hot_add_disk()
6216 if (test_bit(Faulty, &rdev->flags)) { in hot_add_disk()
6219 bdevname(rdev->bdev,b), mdname(mddev)); in hot_add_disk()
6224 clear_bit(In_sync, &rdev->flags); in hot_add_disk()
6225 rdev->desc_nr = -1; in hot_add_disk()
6226 rdev->saved_raid_disk = -1; in hot_add_disk()
6227 err = bind_rdev_to_array(rdev, mddev); in hot_add_disk()
6236 rdev->raid_disk = -1; in hot_add_disk()
6249 export_rdev(rdev); in hot_add_disk()
6431 struct md_rdev *rdev; in update_size() local
6452 rdev_for_each(rdev, mddev) { in update_size()
6453 sector_t avail = rdev->sectors; in update_size()
6469 struct md_rdev *rdev; in update_raid_disks() local
6483 rdev_for_each(rdev, mddev) { in update_raid_disks()
6485 rdev->data_offset < rdev->new_data_offset) in update_raid_disks()
6488 rdev->data_offset > rdev->new_data_offset) in update_raid_disks()
6629 struct md_rdev *rdev; in set_disk_faulty() local
6636 rdev = find_rdev_rcu(mddev, dev); in set_disk_faulty()
6637 if (!rdev) in set_disk_faulty()
6640 md_error(mddev, rdev); in set_disk_faulty()
6641 if (!test_bit(Faulty, &rdev->flags)) in set_disk_faulty()
7177 void md_error(struct mddev *mddev, struct md_rdev *rdev) in md_error() argument
7179 if (!rdev || test_bit(Faulty, &rdev->flags)) in md_error()
7184 mddev->pers->error_handler(mddev,rdev); in md_error()
7187 sysfs_notify_dirent_safe(rdev->sysfs_state); in md_error()
7202 struct md_rdev *rdev; in status_unused() local
7206 list_for_each_entry(rdev, &pending_raid_disks, same_set) { in status_unused()
7210 bdevname(rdev->bdev,b)); in status_unused()
7385 struct md_rdev *rdev; in md_seq_show() local
7418 rdev_for_each_rcu(rdev, mddev) { in md_seq_show()
7421 bdevname(rdev->bdev,b), rdev->desc_nr); in md_seq_show()
7422 if (test_bit(WriteMostly, &rdev->flags)) in md_seq_show()
7424 if (test_bit(Journal, &rdev->flags)) in md_seq_show()
7426 if (test_bit(Faulty, &rdev->flags)) { in md_seq_show()
7430 if (rdev->raid_disk < 0) in md_seq_show()
7432 if (test_bit(Replacement, &rdev->flags)) in md_seq_show()
7434 sectors += rdev->sectors; in md_seq_show()
7603 struct md_rdev *rdev; in is_mddev_idle() local
7609 rdev_for_each_rcu(rdev, mddev) { in is_mddev_idle()
7610 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; in is_mddev_idle()
7636 if (init || curr_events - rdev->last_events > 64) { in is_mddev_idle()
7637 rdev->last_events = curr_events; in is_mddev_idle()
7767 struct md_rdev *rdev; in md_do_sync() local
7878 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
7879 if (rdev->raid_disk >= 0 && in md_do_sync()
7880 !test_bit(Journal, &rdev->flags) && in md_do_sync()
7881 !test_bit(Faulty, &rdev->flags) && in md_do_sync()
7882 !test_bit(In_sync, &rdev->flags) && in md_do_sync()
7883 rdev->recovery_offset < j) in md_do_sync()
7884 j = rdev->recovery_offset; in md_do_sync()
8103 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
8104 if (rdev->raid_disk >= 0 && in md_do_sync()
8106 !test_bit(Journal, &rdev->flags) && in md_do_sync()
8107 !test_bit(Faulty, &rdev->flags) && in md_do_sync()
8108 !test_bit(In_sync, &rdev->flags) && in md_do_sync()
8109 rdev->recovery_offset < mddev->curr_resync) in md_do_sync()
8110 rdev->recovery_offset = mddev->curr_resync; in md_do_sync()
8143 struct md_rdev *rdev; in remove_and_add_spares() local
8147 rdev_for_each(rdev, mddev) in remove_and_add_spares()
8148 if ((this == NULL || rdev == this) && in remove_and_add_spares()
8149 rdev->raid_disk >= 0 && in remove_and_add_spares()
8150 !test_bit(Blocked, &rdev->flags) && in remove_and_add_spares()
8151 (test_bit(Faulty, &rdev->flags) || in remove_and_add_spares()
8152 (!test_bit(In_sync, &rdev->flags) && in remove_and_add_spares()
8153 !test_bit(Journal, &rdev->flags))) && in remove_and_add_spares()
8154 atomic_read(&rdev->nr_pending)==0) { in remove_and_add_spares()
8156 mddev, rdev) == 0) { in remove_and_add_spares()
8157 sysfs_unlink_rdev(mddev, rdev); in remove_and_add_spares()
8158 rdev->raid_disk = -1; in remove_and_add_spares()
8168 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
8169 if (this && this != rdev) in remove_and_add_spares()
8171 if (test_bit(Candidate, &rdev->flags)) in remove_and_add_spares()
8173 if (rdev->raid_disk >= 0 && in remove_and_add_spares()
8174 !test_bit(In_sync, &rdev->flags) && in remove_and_add_spares()
8175 !test_bit(Journal, &rdev->flags) && in remove_and_add_spares()
8176 !test_bit(Faulty, &rdev->flags)) in remove_and_add_spares()
8178 if (rdev->raid_disk >= 0) in remove_and_add_spares()
8180 if (test_bit(Faulty, &rdev->flags)) in remove_and_add_spares()
8182 if (test_bit(Journal, &rdev->flags)) in remove_and_add_spares()
8185 ! (rdev->saved_raid_disk >= 0 && in remove_and_add_spares()
8186 !test_bit(Bitmap_sync, &rdev->flags))) in remove_and_add_spares()
8189 rdev->recovery_offset = 0; in remove_and_add_spares()
8191 hot_add_disk(mddev, rdev) == 0) { in remove_and_add_spares()
8192 if (sysfs_link_rdev(mddev, rdev)) in remove_and_add_spares()
8299 struct md_rdev *rdev; in md_check_recovery() local
8306 rdev_for_each(rdev, mddev) in md_check_recovery()
8307 clear_bit(Blocked, &rdev->flags); in md_check_recovery()
8430 struct md_rdev *rdev; in md_reap_sync_thread() local
8452 rdev_for_each(rdev, mddev) in md_reap_sync_thread()
8453 rdev->saved_raid_disk = -1; in md_reap_sync_thread()
8472 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_wait_for_blocked_rdev() argument
8474 sysfs_notify_dirent_safe(rdev->sysfs_state); in md_wait_for_blocked_rdev()
8475 wait_event_timeout(rdev->blocked_wait, in md_wait_for_blocked_rdev()
8476 !test_bit(Blocked, &rdev->flags) && in md_wait_for_blocked_rdev()
8477 !test_bit(BlockedBadBlocks, &rdev->flags), in md_wait_for_blocked_rdev()
8479 rdev_dec_pending(rdev, mddev); in md_wait_for_blocked_rdev()
8486 struct md_rdev *rdev; in md_finish_reshape() local
8488 rdev_for_each(rdev, mddev) { in md_finish_reshape()
8489 if (rdev->data_offset > rdev->new_data_offset) in md_finish_reshape()
8490 rdev->sectors += rdev->data_offset - rdev->new_data_offset; in md_finish_reshape()
8492 rdev->sectors -= rdev->new_data_offset - rdev->data_offset; in md_finish_reshape()
8493 rdev->data_offset = rdev->new_data_offset; in md_finish_reshape()
8746 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, in rdev_set_badblocks() argument
8751 s += rdev->new_data_offset; in rdev_set_badblocks()
8753 s += rdev->data_offset; in rdev_set_badblocks()
8754 rv = md_set_badblocks(&rdev->badblocks, in rdev_set_badblocks()
8758 sysfs_notify_dirent_safe(rdev->sysfs_state); in rdev_set_badblocks()
8759 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); in rdev_set_badblocks()
8760 set_bit(MD_CHANGE_PENDING, &rdev->mddev->flags); in rdev_set_badblocks()
8761 md_wakeup_thread(rdev->mddev->thread); in rdev_set_badblocks()
8862 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, in rdev_clear_badblocks() argument
8866 s += rdev->new_data_offset; in rdev_clear_badblocks()
8868 s += rdev->data_offset; in rdev_clear_badblocks()
8869 return md_clear_badblocks(&rdev->badblocks, in rdev_clear_badblocks()
9078 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) in check_sb_changes() argument
9080 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); in check_sb_changes()
9131 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) in read_rdev() argument
9134 struct page *swapout = rdev->sb_page; in read_rdev()
9140 rdev->sb_page = NULL; in read_rdev()
9141 alloc_disk_sb(rdev); in read_rdev()
9142 ClearPageUptodate(rdev->sb_page); in read_rdev()
9143 rdev->sb_loaded = 0; in read_rdev()
9144 err = super_types[mddev->major_version].load_super(rdev, NULL, mddev->minor_version); in read_rdev()
9148 __func__, __LINE__, rdev->desc_nr, err); in read_rdev()
9149 put_page(rdev->sb_page); in read_rdev()
9150 rdev->sb_page = swapout; in read_rdev()
9151 rdev->sb_loaded = 1; in read_rdev()
9155 sb = page_address(rdev->sb_page); in read_rdev()
9161 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); in read_rdev()
9166 if (rdev->recovery_offset == MaxSector && in read_rdev()
9167 !test_bit(In_sync, &rdev->flags) && in read_rdev()
9177 struct md_rdev *rdev; in md_reload_sb() local
9181 rdev_for_each_rcu(rdev, mddev) { in md_reload_sb()
9182 if (rdev->desc_nr == nr) in md_reload_sb()
9186 if (!rdev || rdev->desc_nr != nr) { in md_reload_sb()
9191 err = read_rdev(mddev, rdev); in md_reload_sb()
9195 check_sb_changes(mddev, rdev); in md_reload_sb()
9198 rdev_for_each_rcu(rdev, mddev) in md_reload_sb()
9199 read_rdev(mddev, rdev); in md_reload_sb()
9232 struct md_rdev *rdev; in autostart_arrays() local
9249 rdev = md_import_device(dev,0, 90); in autostart_arrays()
9250 if (IS_ERR(rdev)) in autostart_arrays()
9253 if (test_bit(Faulty, &rdev->flags)) in autostart_arrays()
9256 set_bit(AutoDetected, &rdev->flags); in autostart_arrays()
9257 list_add(&rdev->same_set, &pending_raid_disks); in autostart_arrays()