Lines Matching refs:rdev

384 	struct md_rdev *rdev = bio->bi_private;  in md_end_flush()  local
385 struct mddev *mddev = rdev->mddev; in md_end_flush()
387 rdev_dec_pending(rdev, mddev); in md_end_flush()
401 struct md_rdev *rdev; in submit_flushes() local
406 rdev_for_each_rcu(rdev, mddev) in submit_flushes()
407 if (rdev->raid_disk >= 0 && in submit_flushes()
408 !test_bit(Faulty, &rdev->flags)) { in submit_flushes()
414 atomic_inc(&rdev->nr_pending); in submit_flushes()
415 atomic_inc(&rdev->nr_pending); in submit_flushes()
419 bi->bi_private = rdev; in submit_flushes()
420 bi->bi_bdev = rdev->bdev; in submit_flushes()
424 rdev_dec_pending(rdev, mddev); in submit_flushes()
653 struct md_rdev *rdev; in md_find_rdev_nr_rcu() local
655 rdev_for_each_rcu(rdev, mddev) in md_find_rdev_nr_rcu()
656 if (rdev->desc_nr == nr) in md_find_rdev_nr_rcu()
657 return rdev; in md_find_rdev_nr_rcu()
665 struct md_rdev *rdev; in find_rdev() local
667 rdev_for_each(rdev, mddev) in find_rdev()
668 if (rdev->bdev->bd_dev == dev) in find_rdev()
669 return rdev; in find_rdev()
676 struct md_rdev *rdev; in find_rdev_rcu() local
678 rdev_for_each_rcu(rdev, mddev) in find_rdev_rcu()
679 if (rdev->bdev->bd_dev == dev) in find_rdev_rcu()
680 return rdev; in find_rdev_rcu()
698 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) in calc_dev_sboffset() argument
700 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; in calc_dev_sboffset()
704 static int alloc_disk_sb(struct md_rdev *rdev) in alloc_disk_sb() argument
706 rdev->sb_page = alloc_page(GFP_KERNEL); in alloc_disk_sb()
707 if (!rdev->sb_page) { in alloc_disk_sb()
715 void md_rdev_clear(struct md_rdev *rdev) in md_rdev_clear() argument
717 if (rdev->sb_page) { in md_rdev_clear()
718 put_page(rdev->sb_page); in md_rdev_clear()
719 rdev->sb_loaded = 0; in md_rdev_clear()
720 rdev->sb_page = NULL; in md_rdev_clear()
721 rdev->sb_start = 0; in md_rdev_clear()
722 rdev->sectors = 0; in md_rdev_clear()
724 if (rdev->bb_page) { in md_rdev_clear()
725 put_page(rdev->bb_page); in md_rdev_clear()
726 rdev->bb_page = NULL; in md_rdev_clear()
728 kfree(rdev->badblocks.page); in md_rdev_clear()
729 rdev->badblocks.page = NULL; in md_rdev_clear()
735 struct md_rdev *rdev = bio->bi_private; in super_written() local
736 struct mddev *mddev = rdev->mddev; in super_written()
742 md_error(mddev, rdev); in super_written()
750 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, in md_super_write() argument
761 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; in md_super_write()
764 bio->bi_private = rdev; in md_super_write()
777 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, in sync_page_io() argument
780 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); in sync_page_io()
783 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? in sync_page_io()
784 rdev->meta_bdev : rdev->bdev; in sync_page_io()
786 bio->bi_iter.bi_sector = sector + rdev->sb_start; in sync_page_io()
787 else if (rdev->mddev->reshape_position != MaxSector && in sync_page_io()
788 (rdev->mddev->reshape_backwards == in sync_page_io()
789 (sector >= rdev->mddev->reshape_position))) in sync_page_io()
790 bio->bi_iter.bi_sector = sector + rdev->new_data_offset; in sync_page_io()
792 bio->bi_iter.bi_sector = sector + rdev->data_offset; in sync_page_io()
802 static int read_disk_sb(struct md_rdev *rdev, int size) in read_disk_sb() argument
806 if (rdev->sb_loaded) in read_disk_sb()
809 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true)) in read_disk_sb()
811 rdev->sb_loaded = 1; in read_disk_sb()
816 bdevname(rdev->bdev,b)); in read_disk_sb()
927 int (*load_super)(struct md_rdev *rdev,
931 struct md_rdev *rdev);
933 struct md_rdev *rdev);
934 unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
936 int (*allow_new_offset)(struct md_rdev *rdev,
961 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) in super_90_load() argument
973 rdev->sb_start = calc_dev_sboffset(rdev); in super_90_load()
975 ret = read_disk_sb(rdev, MD_SB_BYTES); in super_90_load()
980 bdevname(rdev->bdev, b); in super_90_load()
981 sb = page_address(rdev->sb_page); in super_90_load()
1007 rdev->preferred_minor = sb->md_minor; in super_90_load()
1008 rdev->data_offset = 0; in super_90_load()
1009 rdev->new_data_offset = 0; in super_90_load()
1010 rdev->sb_size = MD_SB_BYTES; in super_90_load()
1011 rdev->badblocks.shift = -1; in super_90_load()
1014 rdev->desc_nr = -1; in super_90_load()
1016 rdev->desc_nr = sb->this_disk.number; in super_90_load()
1041 rdev->sectors = rdev->sb_start; in super_90_load()
1046 if (rdev->sectors >= (2ULL << 32) && sb->level >= 1) in super_90_load()
1047 rdev->sectors = (2ULL << 32) - 2; in super_90_load()
1049 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) in super_90_load()
1060 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) in super_90_validate() argument
1063 mdp_super_t *sb = page_address(rdev->sb_page); in super_90_validate()
1066 rdev->raid_disk = -1; in super_90_validate()
1067 clear_bit(Faulty, &rdev->flags); in super_90_validate()
1068 clear_bit(In_sync, &rdev->flags); in super_90_validate()
1069 clear_bit(Bitmap_sync, &rdev->flags); in super_90_validate()
1070 clear_bit(WriteMostly, &rdev->flags); in super_90_validate()
1138 if (sb->disks[rdev->desc_nr].state & ( in super_90_validate()
1149 set_bit(Bitmap_sync, &rdev->flags); in super_90_validate()
1157 desc = sb->disks + rdev->desc_nr; in super_90_validate()
1160 set_bit(Faulty, &rdev->flags); in super_90_validate()
1163 set_bit(In_sync, &rdev->flags); in super_90_validate()
1164 rdev->raid_disk = desc->raid_disk; in super_90_validate()
1165 rdev->saved_raid_disk = desc->raid_disk; in super_90_validate()
1171 rdev->recovery_offset = 0; in super_90_validate()
1172 rdev->raid_disk = desc->raid_disk; in super_90_validate()
1176 set_bit(WriteMostly, &rdev->flags); in super_90_validate()
1178 set_bit(In_sync, &rdev->flags); in super_90_validate()
1185 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) in super_90_sync() argument
1204 rdev->sb_size = MD_SB_BYTES; in super_90_sync()
1206 sb = page_address(rdev->sb_page); in super_90_sync()
1320 sb->this_disk = sb->disks[rdev->desc_nr]; in super_90_sync()
1328 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) in super_90_rdev_size_change() argument
1330 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_90_rdev_size_change()
1332 if (rdev->mddev->bitmap_info.offset) in super_90_rdev_size_change()
1334 rdev->sb_start = calc_dev_sboffset(rdev); in super_90_rdev_size_change()
1335 if (!num_sectors || num_sectors > rdev->sb_start) in super_90_rdev_size_change()
1336 num_sectors = rdev->sb_start; in super_90_rdev_size_change()
1340 if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) in super_90_rdev_size_change()
1342 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_90_rdev_size_change()
1343 rdev->sb_page); in super_90_rdev_size_change()
1344 md_super_wait(rdev->mddev); in super_90_rdev_size_change()
1349 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) in super_90_allow_new_offset() argument
1383 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) in super_1_load() argument
1402 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; in super_1_load()
1415 rdev->sb_start = sb_start; in super_1_load()
1420 ret = read_disk_sb(rdev, 4096); in super_1_load()
1423 sb = page_address(rdev->sb_page); in super_1_load()
1428 le64_to_cpu(sb->super_offset) != rdev->sb_start || in super_1_load()
1434 bdevname(rdev->bdev,b)); in super_1_load()
1439 bdevname(rdev->bdev,b)); in super_1_load()
1448 rdev->preferred_minor = 0xffff; in super_1_load()
1449 rdev->data_offset = le64_to_cpu(sb->data_offset); in super_1_load()
1450 rdev->new_data_offset = rdev->data_offset; in super_1_load()
1453 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); in super_1_load()
1454 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); in super_1_load()
1456 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; in super_1_load()
1457 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; in super_1_load()
1458 if (rdev->sb_size & bmask) in super_1_load()
1459 rdev->sb_size = (rdev->sb_size | bmask) + 1; in super_1_load()
1462 && rdev->data_offset < sb_start + (rdev->sb_size/512)) in super_1_load()
1465 && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) in super_1_load()
1469 rdev->desc_nr = -1; in super_1_load()
1471 rdev->desc_nr = le32_to_cpu(sb->dev_number); in super_1_load()
1473 if (!rdev->bb_page) { in super_1_load()
1474 rdev->bb_page = alloc_page(GFP_KERNEL); in super_1_load()
1475 if (!rdev->bb_page) in super_1_load()
1479 rdev->badblocks.count == 0) { in super_1_load()
1494 if (!sync_page_io(rdev, bb_sector, sectors << 9, in super_1_load()
1495 rdev->bb_page, READ, true)) in super_1_load()
1497 bbp = (u64 *)page_address(rdev->bb_page); in super_1_load()
1498 rdev->badblocks.shift = sb->bblog_shift; in super_1_load()
1507 if (md_set_badblocks(&rdev->badblocks, in super_1_load()
1512 rdev->badblocks.shift = 0; in super_1_load()
1526 bdevname(rdev->bdev,b), in super_1_load()
1539 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); in super_1_load()
1540 sectors -= rdev->data_offset; in super_1_load()
1542 sectors = rdev->sb_start; in super_1_load()
1545 rdev->sectors = le64_to_cpu(sb->data_size); in super_1_load()
1549 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) in super_1_validate() argument
1551 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); in super_1_validate()
1554 rdev->raid_disk = -1; in super_1_validate()
1555 clear_bit(Faulty, &rdev->flags); in super_1_validate()
1556 clear_bit(In_sync, &rdev->flags); in super_1_validate()
1557 clear_bit(Bitmap_sync, &rdev->flags); in super_1_validate()
1558 clear_bit(WriteMostly, &rdev->flags); in super_1_validate()
1629 if (rdev->desc_nr >= 0 && in super_1_validate()
1630 rdev->desc_nr < le32_to_cpu(sb->max_dev) && in super_1_validate()
1631 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe) in super_1_validate()
1641 set_bit(Bitmap_sync, &rdev->flags); in super_1_validate()
1649 if (rdev->desc_nr < 0 || in super_1_validate()
1650 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { in super_1_validate()
1652 rdev->desc_nr = -1; in super_1_validate()
1654 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); in super_1_validate()
1659 set_bit(Faulty, &rdev->flags); in super_1_validate()
1662 rdev->saved_raid_disk = role; in super_1_validate()
1665 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); in super_1_validate()
1668 rdev->saved_raid_disk = -1; in super_1_validate()
1670 set_bit(In_sync, &rdev->flags); in super_1_validate()
1671 rdev->raid_disk = role; in super_1_validate()
1675 set_bit(WriteMostly, &rdev->flags); in super_1_validate()
1677 set_bit(Replacement, &rdev->flags); in super_1_validate()
1679 set_bit(In_sync, &rdev->flags); in super_1_validate()
1684 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) in super_1_sync() argument
1691 sb = page_address(rdev->sb_page); in super_1_sync()
1705 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); in super_1_sync()
1713 if (test_bit(WriteMostly, &rdev->flags)) in super_1_sync()
1717 sb->data_offset = cpu_to_le64(rdev->data_offset); in super_1_sync()
1718 sb->data_size = cpu_to_le64(rdev->sectors); in super_1_sync()
1725 if (rdev->raid_disk >= 0 && in super_1_sync()
1726 !test_bit(In_sync, &rdev->flags)) { in super_1_sync()
1730 cpu_to_le64(rdev->recovery_offset); in super_1_sync()
1731 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) in super_1_sync()
1735 if (test_bit(Replacement, &rdev->flags)) in super_1_sync()
1750 if (rdev->new_data_offset != rdev->data_offset) { in super_1_sync()
1753 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset in super_1_sync()
1754 - rdev->data_offset)); in super_1_sync()
1758 if (rdev->badblocks.count == 0) in super_1_sync()
1762 md_error(mddev, rdev); in super_1_sync()
1764 struct badblocks *bb = &rdev->badblocks; in super_1_sync()
1765 u64 *bbp = (u64 *)page_address(rdev->bb_page); in super_1_sync()
1786 bb->sector = (rdev->sb_start + in super_1_sync()
1800 rdev->sb_size = max_dev * 2 + 256; in super_1_sync()
1801 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; in super_1_sync()
1802 if (rdev->sb_size & bmask) in super_1_sync()
1803 rdev->sb_size = (rdev->sb_size | bmask) + 1; in super_1_sync()
1826 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) in super_1_rdev_size_change() argument
1830 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_1_rdev_size_change()
1832 if (rdev->data_offset != rdev->new_data_offset) in super_1_rdev_size_change()
1834 if (rdev->sb_start < rdev->data_offset) { in super_1_rdev_size_change()
1836 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; in super_1_rdev_size_change()
1837 max_sectors -= rdev->data_offset; in super_1_rdev_size_change()
1840 } else if (rdev->mddev->bitmap_info.offset) { in super_1_rdev_size_change()
1846 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; in super_1_rdev_size_change()
1848 max_sectors = rdev->sectors + sb_start - rdev->sb_start; in super_1_rdev_size_change()
1851 rdev->sb_start = sb_start; in super_1_rdev_size_change()
1853 sb = page_address(rdev->sb_page); in super_1_rdev_size_change()
1855 sb->super_offset = rdev->sb_start; in super_1_rdev_size_change()
1857 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_1_rdev_size_change()
1858 rdev->sb_page); in super_1_rdev_size_change()
1859 md_super_wait(rdev->mddev); in super_1_rdev_size_change()
1865 super_1_allow_new_offset(struct md_rdev *rdev, in super_1_allow_new_offset() argument
1870 if (new_offset >= rdev->data_offset) in super_1_allow_new_offset()
1875 if (rdev->mddev->minor_version == 0) in super_1_allow_new_offset()
1884 if (rdev->sb_start + (32+4)*2 > new_offset) in super_1_allow_new_offset()
1886 bitmap = rdev->mddev->bitmap; in super_1_allow_new_offset()
1887 if (bitmap && !rdev->mddev->bitmap_info.file && in super_1_allow_new_offset()
1888 rdev->sb_start + rdev->mddev->bitmap_info.offset + in super_1_allow_new_offset()
1891 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) in super_1_allow_new_offset()
1918 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) in sync_super() argument
1921 mddev->sync_super(mddev, rdev); in sync_super()
1927 super_types[mddev->major_version].sync_super(mddev, rdev); in sync_super()
1932 struct md_rdev *rdev, *rdev2; in match_mddev_units() local
1935 rdev_for_each_rcu(rdev, mddev1) in match_mddev_units()
1937 if (rdev->bdev->bd_contains == in match_mddev_units()
1957 struct md_rdev *rdev, *reference = NULL; in md_integrity_register() local
1963 rdev_for_each(rdev, mddev) { in md_integrity_register()
1965 if (test_bit(Faulty, &rdev->flags)) in md_integrity_register()
1967 if (rdev->raid_disk < 0) in md_integrity_register()
1971 reference = rdev; in md_integrity_register()
1976 rdev->bdev->bd_disk) < 0) in md_integrity_register()
2002 void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_integrity_add_rdev() argument
2010 bi_rdev = bdev_get_integrity(rdev->bdev); in md_integrity_add_rdev()
2015 if (rdev->raid_disk < 0) /* skip spares */ in md_integrity_add_rdev()
2018 rdev->bdev->bd_disk) >= 0) in md_integrity_add_rdev()
2025 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) in bind_rdev_to_array() argument
2033 if (find_rdev(mddev, rdev->bdev->bd_dev)) in bind_rdev_to_array()
2037 if (rdev->sectors && (mddev->dev_sectors == 0 || in bind_rdev_to_array()
2038 rdev->sectors < mddev->dev_sectors)) { in bind_rdev_to_array()
2047 mddev->dev_sectors = rdev->sectors; in bind_rdev_to_array()
2055 if (rdev->desc_nr < 0) { in bind_rdev_to_array()
2061 rdev->desc_nr = choice; in bind_rdev_to_array()
2063 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { in bind_rdev_to_array()
2069 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { in bind_rdev_to_array()
2074 bdevname(rdev->bdev,b); in bind_rdev_to_array()
2078 rdev->mddev = mddev; in bind_rdev_to_array()
2081 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) in bind_rdev_to_array()
2084 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; in bind_rdev_to_array()
2085 if (sysfs_create_link(&rdev->kobj, ko, "block")) in bind_rdev_to_array()
2087 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); in bind_rdev_to_array()
2089 list_add_rcu(&rdev->same_set, &mddev->disks); in bind_rdev_to_array()
2090 bd_link_disk_holder(rdev->bdev, mddev->gendisk); in bind_rdev_to_array()
2105 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); in md_delayed_delete() local
2106 kobject_del(&rdev->kobj); in md_delayed_delete()
2107 kobject_put(&rdev->kobj); in md_delayed_delete()
2110 static void unbind_rdev_from_array(struct md_rdev *rdev) in unbind_rdev_from_array() argument
2114 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); in unbind_rdev_from_array()
2115 list_del_rcu(&rdev->same_set); in unbind_rdev_from_array()
2116 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); in unbind_rdev_from_array()
2117 rdev->mddev = NULL; in unbind_rdev_from_array()
2118 sysfs_remove_link(&rdev->kobj, "block"); in unbind_rdev_from_array()
2119 sysfs_put(rdev->sysfs_state); in unbind_rdev_from_array()
2120 rdev->sysfs_state = NULL; in unbind_rdev_from_array()
2121 rdev->badblocks.count = 0; in unbind_rdev_from_array()
2127 INIT_WORK(&rdev->del_work, md_delayed_delete); in unbind_rdev_from_array()
2128 kobject_get(&rdev->kobj); in unbind_rdev_from_array()
2129 queue_work(md_misc_wq, &rdev->del_work); in unbind_rdev_from_array()
2137 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) in lock_rdev() argument
2144 shared ? (struct md_rdev *)lock_rdev : rdev); in lock_rdev()
2150 rdev->bdev = bdev; in lock_rdev()
2154 static void unlock_rdev(struct md_rdev *rdev) in unlock_rdev() argument
2156 struct block_device *bdev = rdev->bdev; in unlock_rdev()
2157 rdev->bdev = NULL; in unlock_rdev()
2163 static void export_rdev(struct md_rdev *rdev) in export_rdev() argument
2168 bdevname(rdev->bdev,b)); in export_rdev()
2169 md_rdev_clear(rdev); in export_rdev()
2171 if (test_bit(AutoDetected, &rdev->flags)) in export_rdev()
2172 md_autodetect_dev(rdev->bdev->bd_dev); in export_rdev()
2174 unlock_rdev(rdev); in export_rdev()
2175 kobject_put(&rdev->kobj); in export_rdev()
2178 void md_kick_rdev_from_array(struct md_rdev *rdev) in md_kick_rdev_from_array() argument
2180 unbind_rdev_from_array(rdev); in md_kick_rdev_from_array()
2181 export_rdev(rdev); in md_kick_rdev_from_array()
2187 struct md_rdev *rdev; in export_array() local
2190 rdev = list_first_entry(&mddev->disks, struct md_rdev, in export_array()
2192 md_kick_rdev_from_array(rdev); in export_array()
2206 struct md_rdev *rdev; in sync_sbs() local
2207 rdev_for_each(rdev, mddev) { in sync_sbs()
2208 if (rdev->sb_events == mddev->events || in sync_sbs()
2210 rdev->raid_disk < 0 && in sync_sbs()
2211 rdev->sb_events+1 == mddev->events)) { in sync_sbs()
2213 rdev->sb_loaded = 2; in sync_sbs()
2215 sync_super(mddev, rdev); in sync_sbs()
2216 rdev->sb_loaded = 1; in sync_sbs()
2223 struct md_rdev *rdev; in md_update_sb() local
2235 rdev_for_each(rdev, mddev) { in md_update_sb()
2236 if (rdev->raid_disk >= 0 && in md_update_sb()
2238 !test_bit(In_sync, &rdev->flags) && in md_update_sb()
2239 mddev->curr_resync_completed > rdev->recovery_offset) in md_update_sb()
2240 rdev->recovery_offset = mddev->curr_resync_completed; in md_update_sb()
2248 rdev_for_each(rdev, mddev) { in md_update_sb()
2249 if (rdev->badblocks.changed) { in md_update_sb()
2250 rdev->badblocks.changed = 0; in md_update_sb()
2251 md_ack_all_badblocks(&rdev->badblocks); in md_update_sb()
2252 md_error(mddev, rdev); in md_update_sb()
2254 clear_bit(Blocked, &rdev->flags); in md_update_sb()
2255 clear_bit(BlockedBadBlocks, &rdev->flags); in md_update_sb()
2256 wake_up(&rdev->blocked_wait); in md_update_sb()
2312 rdev_for_each(rdev, mddev) { in md_update_sb()
2313 if (rdev->badblocks.changed) in md_update_sb()
2315 if (test_bit(Faulty, &rdev->flags)) in md_update_sb()
2316 set_bit(FaultRecorded, &rdev->flags); in md_update_sb()
2326 rdev_for_each(rdev, mddev) { in md_update_sb()
2329 if (rdev->sb_loaded != 1) in md_update_sb()
2332 if (!test_bit(Faulty, &rdev->flags)) { in md_update_sb()
2333 md_super_write(mddev,rdev, in md_update_sb()
2334 rdev->sb_start, rdev->sb_size, in md_update_sb()
2335 rdev->sb_page); in md_update_sb()
2337 bdevname(rdev->bdev, b), in md_update_sb()
2338 (unsigned long long)rdev->sb_start); in md_update_sb()
2339 rdev->sb_events = mddev->events; in md_update_sb()
2340 if (rdev->badblocks.size) { in md_update_sb()
2341 md_super_write(mddev, rdev, in md_update_sb()
2342 rdev->badblocks.sector, in md_update_sb()
2343 rdev->badblocks.size << 9, in md_update_sb()
2344 rdev->bb_page); in md_update_sb()
2345 rdev->badblocks.size = 0; in md_update_sb()
2350 bdevname(rdev->bdev, b)); in md_update_sb()
2372 rdev_for_each(rdev, mddev) { in md_update_sb()
2373 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) in md_update_sb()
2374 clear_bit(Blocked, &rdev->flags); in md_update_sb()
2377 md_ack_all_badblocks(&rdev->badblocks); in md_update_sb()
2378 clear_bit(BlockedBadBlocks, &rdev->flags); in md_update_sb()
2379 wake_up(&rdev->blocked_wait); in md_update_sb()
2384 static int add_bound_rdev(struct md_rdev *rdev) in add_bound_rdev() argument
2386 struct mddev *mddev = rdev->mddev; in add_bound_rdev()
2395 validate_super(mddev, rdev); in add_bound_rdev()
2396 err = mddev->pers->hot_add_disk(mddev, rdev); in add_bound_rdev()
2398 unbind_rdev_from_array(rdev); in add_bound_rdev()
2399 export_rdev(rdev); in add_bound_rdev()
2403 sysfs_notify_dirent_safe(rdev->sysfs_state); in add_bound_rdev()
2441 state_show(struct md_rdev *rdev, char *page) in state_show() argument
2445 unsigned long flags = ACCESS_ONCE(rdev->flags); in state_show()
2448 rdev->badblocks.unacked_exist) { in state_show()
2461 (rdev->badblocks.unacked_exist in state_show()
2488 state_store(struct md_rdev *rdev, const char *buf, size_t len) in state_store() argument
2504 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { in state_store()
2505 md_error(rdev->mddev, rdev); in state_store()
2506 if (test_bit(Faulty, &rdev->flags)) in state_store()
2511 if (rdev->raid_disk >= 0) in state_store()
2514 struct mddev *mddev = rdev->mddev; in state_store()
2516 md_cluster_ops->remove_disk(mddev, rdev); in state_store()
2517 md_kick_rdev_from_array(rdev); in state_store()
2528 set_bit(WriteMostly, &rdev->flags); in state_store()
2531 clear_bit(WriteMostly, &rdev->flags); in state_store()
2534 set_bit(Blocked, &rdev->flags); in state_store()
2537 if (!test_bit(Faulty, &rdev->flags) && in state_store()
2538 rdev->badblocks.unacked_exist) { in state_store()
2542 md_error(rdev->mddev, rdev); in state_store()
2544 clear_bit(Blocked, &rdev->flags); in state_store()
2545 clear_bit(BlockedBadBlocks, &rdev->flags); in state_store()
2546 wake_up(&rdev->blocked_wait); in state_store()
2547 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
2548 md_wakeup_thread(rdev->mddev->thread); in state_store()
2551 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { in state_store()
2552 set_bit(In_sync, &rdev->flags); in state_store()
2554 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) { in state_store()
2555 if (rdev->mddev->pers == NULL) { in state_store()
2556 clear_bit(In_sync, &rdev->flags); in state_store()
2557 rdev->saved_raid_disk = rdev->raid_disk; in state_store()
2558 rdev->raid_disk = -1; in state_store()
2562 set_bit(WriteErrorSeen, &rdev->flags); in state_store()
2565 clear_bit(WriteErrorSeen, &rdev->flags); in state_store()
2572 if (rdev->raid_disk >= 0 && in state_store()
2573 !test_bit(Replacement, &rdev->flags)) in state_store()
2574 set_bit(WantReplacement, &rdev->flags); in state_store()
2575 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
2576 md_wakeup_thread(rdev->mddev->thread); in state_store()
2583 clear_bit(WantReplacement, &rdev->flags); in state_store()
2589 if (rdev->mddev->pers) in state_store()
2592 set_bit(Replacement, &rdev->flags); in state_store()
2597 if (rdev->mddev->pers) in state_store()
2600 clear_bit(Replacement, &rdev->flags); in state_store()
2604 if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) { in state_store()
2611 if (!mddev_is_clustered(rdev->mddev) || in state_store()
2612 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) { in state_store()
2613 clear_bit(Faulty, &rdev->flags); in state_store()
2614 err = add_bound_rdev(rdev); in state_store()
2620 sysfs_notify_dirent_safe(rdev->sysfs_state); in state_store()
2627 errors_show(struct md_rdev *rdev, char *page) in errors_show() argument
2629 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); in errors_show()
2633 errors_store(struct md_rdev *rdev, const char *buf, size_t len) in errors_store() argument
2638 atomic_set(&rdev->corrected_errors, n); in errors_store()
2647 slot_show(struct md_rdev *rdev, char *page) in slot_show() argument
2649 if (rdev->raid_disk < 0) in slot_show()
2652 return sprintf(page, "%d\n", rdev->raid_disk); in slot_show()
2656 slot_store(struct md_rdev *rdev, const char *buf, size_t len) in slot_store() argument
2665 if (rdev->mddev->pers && slot == -1) { in slot_store()
2673 if (rdev->raid_disk == -1) in slot_store()
2676 if (rdev->mddev->pers->hot_remove_disk == NULL) in slot_store()
2678 clear_bit(Blocked, &rdev->flags); in slot_store()
2679 remove_and_add_spares(rdev->mddev, rdev); in slot_store()
2680 if (rdev->raid_disk >= 0) in slot_store()
2682 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in slot_store()
2683 md_wakeup_thread(rdev->mddev->thread); in slot_store()
2684 } else if (rdev->mddev->pers) { in slot_store()
2689 if (rdev->raid_disk != -1) in slot_store()
2692 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) in slot_store()
2695 if (rdev->mddev->pers->hot_add_disk == NULL) in slot_store()
2698 if (slot >= rdev->mddev->raid_disks && in slot_store()
2699 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
2702 rdev->raid_disk = slot; in slot_store()
2703 if (test_bit(In_sync, &rdev->flags)) in slot_store()
2704 rdev->saved_raid_disk = slot; in slot_store()
2706 rdev->saved_raid_disk = -1; in slot_store()
2707 clear_bit(In_sync, &rdev->flags); in slot_store()
2708 clear_bit(Bitmap_sync, &rdev->flags); in slot_store()
2709 err = rdev->mddev->pers-> in slot_store()
2710 hot_add_disk(rdev->mddev, rdev); in slot_store()
2712 rdev->raid_disk = -1; in slot_store()
2715 sysfs_notify_dirent_safe(rdev->sysfs_state); in slot_store()
2716 if (sysfs_link_rdev(rdev->mddev, rdev)) in slot_store()
2720 if (slot >= rdev->mddev->raid_disks && in slot_store()
2721 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
2723 rdev->raid_disk = slot; in slot_store()
2725 clear_bit(Faulty, &rdev->flags); in slot_store()
2726 clear_bit(WriteMostly, &rdev->flags); in slot_store()
2727 set_bit(In_sync, &rdev->flags); in slot_store()
2728 sysfs_notify_dirent_safe(rdev->sysfs_state); in slot_store()
2737 offset_show(struct md_rdev *rdev, char *page) in offset_show() argument
2739 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); in offset_show()
2743 offset_store(struct md_rdev *rdev, const char *buf, size_t len) in offset_store() argument
2748 if (rdev->mddev->pers && rdev->raid_disk >= 0) in offset_store()
2750 if (rdev->sectors && rdev->mddev->external) in offset_store()
2754 rdev->data_offset = offset; in offset_store()
2755 rdev->new_data_offset = offset; in offset_store()
2762 static ssize_t new_offset_show(struct md_rdev *rdev, char *page) in new_offset_show() argument
2765 (unsigned long long)rdev->new_data_offset); in new_offset_show()
2768 static ssize_t new_offset_store(struct md_rdev *rdev, in new_offset_store() argument
2772 struct mddev *mddev = rdev->mddev; in new_offset_store()
2780 if (new_offset == rdev->data_offset) in new_offset_store()
2783 else if (new_offset > rdev->data_offset) { in new_offset_store()
2785 if (new_offset - rdev->data_offset in new_offset_store()
2786 + mddev->dev_sectors > rdev->sectors) in new_offset_store()
2794 if (new_offset < rdev->data_offset && in new_offset_store()
2801 if (new_offset > rdev->data_offset && in new_offset_store()
2807 .allow_new_offset(rdev, new_offset)) in new_offset_store()
2809 rdev->new_data_offset = new_offset; in new_offset_store()
2810 if (new_offset > rdev->data_offset) in new_offset_store()
2812 else if (new_offset < rdev->data_offset) in new_offset_store()
2821 rdev_size_show(struct md_rdev *rdev, char *page) in rdev_size_show() argument
2823 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); in rdev_size_show()
2856 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) in rdev_size_store() argument
2858 struct mddev *my_mddev = rdev->mddev; in rdev_size_store()
2859 sector_t oldsectors = rdev->sectors; in rdev_size_store()
2864 if (rdev->data_offset != rdev->new_data_offset) in rdev_size_store()
2866 if (my_mddev->pers && rdev->raid_disk >= 0) { in rdev_size_store()
2869 rdev_size_change(rdev, sectors); in rdev_size_store()
2873 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - in rdev_size_store()
2874 rdev->data_offset; in rdev_size_store()
2882 rdev->sectors = sectors; in rdev_size_store()
2899 if (rdev->bdev == rdev2->bdev && in rdev_size_store()
2900 rdev != rdev2 && in rdev_size_store()
2901 overlaps(rdev->data_offset, rdev->sectors, in rdev_size_store()
2920 rdev->sectors = oldsectors; in rdev_size_store()
2930 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) in recovery_start_show() argument
2932 unsigned long long recovery_start = rdev->recovery_offset; in recovery_start_show()
2934 if (test_bit(In_sync, &rdev->flags) || in recovery_start_show()
2941 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) in recovery_start_store() argument
2950 if (rdev->mddev->pers && in recovery_start_store()
2951 rdev->raid_disk >= 0) in recovery_start_store()
2954 rdev->recovery_offset = recovery_start; in recovery_start_store()
2956 set_bit(In_sync, &rdev->flags); in recovery_start_store()
2958 clear_bit(In_sync, &rdev->flags); in recovery_start_store()
2970 static ssize_t bb_show(struct md_rdev *rdev, char *page) in bb_show() argument
2972 return badblocks_show(&rdev->badblocks, page, 0); in bb_show()
2974 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) in bb_store() argument
2976 int rv = badblocks_store(&rdev->badblocks, page, len, 0); in bb_store()
2978 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) in bb_store()
2979 wake_up(&rdev->blocked_wait); in bb_store()
2985 static ssize_t ubb_show(struct md_rdev *rdev, char *page) in ubb_show() argument
2987 return badblocks_show(&rdev->badblocks, page, 1); in ubb_show()
2989 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) in ubb_store() argument
2991 return badblocks_store(&rdev->badblocks, page, len, 1); in ubb_store()
3012 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); in rdev_attr_show() local
3016 if (!rdev->mddev) in rdev_attr_show()
3018 return entry->show(rdev, page); in rdev_attr_show()
3026 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); in rdev_attr_store() local
3028 struct mddev *mddev = rdev->mddev; in rdev_attr_store()
3036 if (rdev->mddev == NULL) in rdev_attr_store()
3039 rv = entry->store(rdev, page, length); in rdev_attr_store()
3047 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); in rdev_free() local
3048 kfree(rdev); in rdev_free()
3060 int md_rdev_init(struct md_rdev *rdev) in md_rdev_init() argument
3062 rdev->desc_nr = -1; in md_rdev_init()
3063 rdev->saved_raid_disk = -1; in md_rdev_init()
3064 rdev->raid_disk = -1; in md_rdev_init()
3065 rdev->flags = 0; in md_rdev_init()
3066 rdev->data_offset = 0; in md_rdev_init()
3067 rdev->new_data_offset = 0; in md_rdev_init()
3068 rdev->sb_events = 0; in md_rdev_init()
3069 rdev->last_read_error.tv_sec = 0; in md_rdev_init()
3070 rdev->last_read_error.tv_nsec = 0; in md_rdev_init()
3071 rdev->sb_loaded = 0; in md_rdev_init()
3072 rdev->bb_page = NULL; in md_rdev_init()
3073 atomic_set(&rdev->nr_pending, 0); in md_rdev_init()
3074 atomic_set(&rdev->read_errors, 0); in md_rdev_init()
3075 atomic_set(&rdev->corrected_errors, 0); in md_rdev_init()
3077 INIT_LIST_HEAD(&rdev->same_set); in md_rdev_init()
3078 init_waitqueue_head(&rdev->blocked_wait); in md_rdev_init()
3084 rdev->badblocks.count = 0; in md_rdev_init()
3085 rdev->badblocks.shift = -1; /* disabled until explicitly enabled */ in md_rdev_init()
3086 rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL); in md_rdev_init()
3087 seqlock_init(&rdev->badblocks.lock); in md_rdev_init()
3088 if (rdev->badblocks.page == NULL) in md_rdev_init()
3108 struct md_rdev *rdev; in md_import_device() local
3111 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); in md_import_device()
3112 if (!rdev) { in md_import_device()
3117 err = md_rdev_init(rdev); in md_import_device()
3120 err = alloc_disk_sb(rdev); in md_import_device()
3124 err = lock_rdev(rdev, newdev, super_format == -2); in md_import_device()
3128 kobject_init(&rdev->kobj, &rdev_ktype); in md_import_device()
3130 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; in md_import_device()
3134 bdevname(rdev->bdev,b)); in md_import_device()
3141 load_super(rdev, NULL, super_minor); in md_import_device()
3146 bdevname(rdev->bdev,b), in md_import_device()
3153 bdevname(rdev->bdev,b)); in md_import_device()
3158 return rdev; in md_import_device()
3161 if (rdev->bdev) in md_import_device()
3162 unlock_rdev(rdev); in md_import_device()
3163 md_rdev_clear(rdev); in md_import_device()
3164 kfree(rdev); in md_import_device()
3175 struct md_rdev *rdev, *freshest, *tmp; in analyze_sbs() local
3179 rdev_for_each_safe(rdev, tmp, mddev) in analyze_sbs()
3181 load_super(rdev, freshest, mddev->minor_version)) { in analyze_sbs()
3183 freshest = rdev; in analyze_sbs()
3191 bdevname(rdev->bdev,b)); in analyze_sbs()
3192 md_kick_rdev_from_array(rdev); in analyze_sbs()
3199 rdev_for_each_safe(rdev, tmp, mddev) { in analyze_sbs()
3201 (rdev->desc_nr >= mddev->max_disks || in analyze_sbs()
3205 mdname(mddev), bdevname(rdev->bdev, b), in analyze_sbs()
3207 md_kick_rdev_from_array(rdev); in analyze_sbs()
3210 if (rdev != freshest) { in analyze_sbs()
3212 validate_super(mddev, rdev)) { in analyze_sbs()
3215 bdevname(rdev->bdev,b)); in analyze_sbs()
3216 md_kick_rdev_from_array(rdev); in analyze_sbs()
3222 if (test_bit(Candidate, &rdev->flags)) { in analyze_sbs()
3224 bdevname(rdev->bdev, b)); in analyze_sbs()
3225 md_kick_rdev_from_array(rdev); in analyze_sbs()
3229 rdev->desc_nr = i++; in analyze_sbs()
3230 rdev->raid_disk = rdev->desc_nr; in analyze_sbs()
3231 set_bit(In_sync, &rdev->flags); in analyze_sbs()
3232 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { in analyze_sbs()
3233 rdev->raid_disk = -1; in analyze_sbs()
3234 clear_bit(In_sync, &rdev->flags); in analyze_sbs()
3339 struct md_rdev *rdev; in level_store() local
3415 rdev_for_each(rdev, mddev) in level_store()
3416 rdev->new_raid_disk = rdev->raid_disk; in level_store()
3486 rdev_for_each(rdev, mddev) { in level_store()
3487 if (rdev->raid_disk < 0) in level_store()
3489 if (rdev->new_raid_disk >= mddev->raid_disks) in level_store()
3490 rdev->new_raid_disk = -1; in level_store()
3491 if (rdev->new_raid_disk == rdev->raid_disk) in level_store()
3493 sysfs_unlink_rdev(mddev, rdev); in level_store()
3495 rdev_for_each(rdev, mddev) { in level_store()
3496 if (rdev->raid_disk < 0) in level_store()
3498 if (rdev->new_raid_disk == rdev->raid_disk) in level_store()
3500 rdev->raid_disk = rdev->new_raid_disk; in level_store()
3501 if (rdev->raid_disk < 0) in level_store()
3502 clear_bit(In_sync, &rdev->flags); in level_store()
3504 if (sysfs_link_rdev(mddev, rdev)) in level_store()
3507 rdev->raid_disk, mdname(mddev)); in level_store()
3611 struct md_rdev *rdev; in raid_disks_store() local
3615 rdev_for_each(rdev, mddev) { in raid_disks_store()
3617 rdev->data_offset < rdev->new_data_offset) in raid_disks_store()
3620 rdev->data_offset > rdev->new_data_offset) in raid_disks_store()
3975 struct md_rdev *rdev; in new_dev_store() local
3994 rdev = md_import_device(dev, mddev->major_version, in new_dev_store()
3996 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { in new_dev_store()
4001 .load_super(rdev, rdev0, mddev->minor_version); in new_dev_store()
4006 rdev = md_import_device(dev, -2, -1); in new_dev_store()
4008 rdev = md_import_device(dev, -1, -1); in new_dev_store()
4010 if (IS_ERR(rdev)) { in new_dev_store()
4012 return PTR_ERR(rdev); in new_dev_store()
4014 err = bind_rdev_to_array(rdev, mddev); in new_dev_store()
4017 export_rdev(rdev); in new_dev_store()
4612 struct md_rdev *rdev; in reshape_position_store() local
4631 rdev_for_each(rdev, mddev) in reshape_position_store()
4632 rdev->new_data_offset = rdev->data_offset; in reshape_position_store()
5006 struct md_rdev *rdev; in md_run() local
5038 rdev_for_each(rdev, mddev) { in md_run()
5039 if (test_bit(Faulty, &rdev->flags)) in md_run()
5041 sync_blockdev(rdev->bdev); in md_run()
5042 invalidate_bdev(rdev->bdev); in md_run()
5048 if (rdev->meta_bdev) { in md_run()
5050 } else if (rdev->data_offset < rdev->sb_start) { in md_run()
5052 rdev->data_offset + mddev->dev_sectors in md_run()
5053 > rdev->sb_start) { in md_run()
5059 if (rdev->sb_start + rdev->sb_size/512 in md_run()
5060 > rdev->data_offset) { in md_run()
5066 sysfs_notify_dirent_safe(rdev->sysfs_state); in md_run()
5106 rdev_for_each(rdev, mddev) in md_run()
5108 if (rdev < rdev2 && in md_run()
5109 rdev->bdev->bd_contains == in md_run()
5116 bdevname(rdev->bdev,b), in md_run()
5199 rdev_for_each(rdev, mddev) in md_run()
5200 if (rdev->raid_disk >= 0) in md_run()
5201 if (sysfs_link_rdev(mddev, rdev)) in md_run()
5460 struct md_rdev *rdev; in do_md_stop() local
5508 rdev_for_each(rdev, mddev) in do_md_stop()
5509 if (rdev->raid_disk >= 0) in do_md_stop()
5510 sysfs_unlink_rdev(mddev, rdev); in do_md_stop()
5553 struct md_rdev *rdev; in autorun_array() local
5561 rdev_for_each(rdev, mddev) { in autorun_array()
5563 printk("<%s>", bdevname(rdev->bdev,b)); in autorun_array()
5588 struct md_rdev *rdev0, *rdev, *tmp; in autorun_devices() local
5603 rdev_for_each_list(rdev, tmp, &pending_raid_disks) in autorun_devices()
5604 if (super_90_load(rdev, rdev0, 0) >= 0) { in autorun_devices()
5606 bdevname(rdev->bdev,b)); in autorun_devices()
5607 list_move(&rdev->same_set, &candidates); in autorun_devices()
5649 rdev_for_each_list(rdev, tmp, &candidates) { in autorun_devices()
5650 list_del_init(&rdev->same_set); in autorun_devices()
5651 if (bind_rdev_to_array(rdev, mddev)) in autorun_devices()
5652 export_rdev(rdev); in autorun_devices()
5660 rdev_for_each_list(rdev, tmp, &candidates) { in autorun_devices()
5661 list_del_init(&rdev->same_set); in autorun_devices()
5662 export_rdev(rdev); in autorun_devices()
5688 struct md_rdev *rdev; in get_array_info() local
5692 rdev_for_each_rcu(rdev, mddev) { in get_array_info()
5694 if (test_bit(Faulty, &rdev->flags)) in get_array_info()
5698 if (test_bit(In_sync, &rdev->flags)) in get_array_info()
5776 struct md_rdev *rdev; in get_disk_info() local
5782 rdev = md_find_rdev_nr_rcu(mddev, info.number); in get_disk_info()
5783 if (rdev) { in get_disk_info()
5784 info.major = MAJOR(rdev->bdev->bd_dev); in get_disk_info()
5785 info.minor = MINOR(rdev->bdev->bd_dev); in get_disk_info()
5786 info.raid_disk = rdev->raid_disk; in get_disk_info()
5788 if (test_bit(Faulty, &rdev->flags)) in get_disk_info()
5790 else if (test_bit(In_sync, &rdev->flags)) { in get_disk_info()
5794 if (test_bit(WriteMostly, &rdev->flags)) in get_disk_info()
5812 struct md_rdev *rdev; in add_new_disk() local
5828 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); in add_new_disk()
5829 if (IS_ERR(rdev)) { in add_new_disk()
5832 PTR_ERR(rdev)); in add_new_disk()
5833 return PTR_ERR(rdev); in add_new_disk()
5840 .load_super(rdev, rdev0, mddev->minor_version); in add_new_disk()
5844 bdevname(rdev->bdev,b), in add_new_disk()
5846 export_rdev(rdev); in add_new_disk()
5850 err = bind_rdev_to_array(rdev, mddev); in add_new_disk()
5852 export_rdev(rdev); in add_new_disk()
5870 rdev = md_import_device(dev, mddev->major_version, in add_new_disk()
5873 rdev = md_import_device(dev, -1, -1); in add_new_disk()
5874 if (IS_ERR(rdev)) { in add_new_disk()
5877 PTR_ERR(rdev)); in add_new_disk()
5878 return PTR_ERR(rdev); in add_new_disk()
5884 rdev->raid_disk = info->raid_disk; in add_new_disk()
5885 set_bit(In_sync, &rdev->flags); in add_new_disk()
5886 clear_bit(Bitmap_sync, &rdev->flags); in add_new_disk()
5888 rdev->raid_disk = -1; in add_new_disk()
5889 rdev->saved_raid_disk = rdev->raid_disk; in add_new_disk()
5892 validate_super(mddev, rdev); in add_new_disk()
5894 rdev->raid_disk != info->raid_disk) { in add_new_disk()
5898 export_rdev(rdev); in add_new_disk()
5902 clear_bit(In_sync, &rdev->flags); /* just to be sure */ in add_new_disk()
5904 set_bit(WriteMostly, &rdev->flags); in add_new_disk()
5906 clear_bit(WriteMostly, &rdev->flags); in add_new_disk()
5914 set_bit(Candidate, &rdev->flags); in add_new_disk()
5917 export_rdev(rdev); in add_new_disk()
5922 err = md_cluster_ops->add_new_disk_start(mddev, rdev); in add_new_disk()
5925 export_rdev(rdev); in add_new_disk()
5931 rdev->raid_disk = -1; in add_new_disk()
5932 err = bind_rdev_to_array(rdev, mddev); in add_new_disk()
5934 export_rdev(rdev); in add_new_disk()
5936 err = add_bound_rdev(rdev); in add_new_disk()
5954 rdev = md_import_device(dev, -1, 0); in add_new_disk()
5955 if (IS_ERR(rdev)) { in add_new_disk()
5958 PTR_ERR(rdev)); in add_new_disk()
5959 return PTR_ERR(rdev); in add_new_disk()
5961 rdev->desc_nr = info->number; in add_new_disk()
5963 rdev->raid_disk = info->raid_disk; in add_new_disk()
5965 rdev->raid_disk = -1; in add_new_disk()
5967 if (rdev->raid_disk < mddev->raid_disks) in add_new_disk()
5969 set_bit(In_sync, &rdev->flags); in add_new_disk()
5972 set_bit(WriteMostly, &rdev->flags); in add_new_disk()
5976 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; in add_new_disk()
5978 rdev->sb_start = calc_dev_sboffset(rdev); in add_new_disk()
5979 rdev->sectors = rdev->sb_start; in add_new_disk()
5981 err = bind_rdev_to_array(rdev, mddev); in add_new_disk()
5983 export_rdev(rdev); in add_new_disk()
5994 struct md_rdev *rdev; in hot_remove_disk() local
5996 rdev = find_rdev(mddev, dev); in hot_remove_disk()
5997 if (!rdev) in hot_remove_disk()
6003 clear_bit(Blocked, &rdev->flags); in hot_remove_disk()
6004 remove_and_add_spares(mddev, rdev); in hot_remove_disk()
6006 if (rdev->raid_disk >= 0) in hot_remove_disk()
6010 md_cluster_ops->remove_disk(mddev, rdev); in hot_remove_disk()
6012 md_kick_rdev_from_array(rdev); in hot_remove_disk()
6024 bdevname(rdev->bdev,b), mdname(mddev)); in hot_remove_disk()
6032 struct md_rdev *rdev; in hot_add_disk() local
6050 rdev = md_import_device(dev, -1, 0); in hot_add_disk()
6051 if (IS_ERR(rdev)) { in hot_add_disk()
6054 PTR_ERR(rdev)); in hot_add_disk()
6059 rdev->sb_start = calc_dev_sboffset(rdev); in hot_add_disk()
6061 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; in hot_add_disk()
6063 rdev->sectors = rdev->sb_start; in hot_add_disk()
6065 if (test_bit(Faulty, &rdev->flags)) { in hot_add_disk()
6068 bdevname(rdev->bdev,b), mdname(mddev)); in hot_add_disk()
6075 clear_bit(In_sync, &rdev->flags); in hot_add_disk()
6076 rdev->desc_nr = -1; in hot_add_disk()
6077 rdev->saved_raid_disk = -1; in hot_add_disk()
6078 err = bind_rdev_to_array(rdev, mddev); in hot_add_disk()
6087 rdev->raid_disk = -1; in hot_add_disk()
6106 export_rdev(rdev); in hot_add_disk()
6288 struct md_rdev *rdev; in update_size() local
6309 rdev_for_each(rdev, mddev) { in update_size()
6310 sector_t avail = rdev->sectors; in update_size()
6326 struct md_rdev *rdev; in update_raid_disks() local
6340 rdev_for_each(rdev, mddev) { in update_raid_disks()
6342 rdev->data_offset < rdev->new_data_offset) in update_raid_disks()
6345 rdev->data_offset > rdev->new_data_offset) in update_raid_disks()
6492 struct md_rdev *rdev; in set_disk_faulty() local
6499 rdev = find_rdev_rcu(mddev, dev); in set_disk_faulty()
6500 if (!rdev) in set_disk_faulty()
6503 md_error(mddev, rdev); in set_disk_faulty()
6504 if (!test_bit(Faulty, &rdev->flags)) in set_disk_faulty()
7040 void md_error(struct mddev *mddev, struct md_rdev *rdev) in md_error() argument
7042 if (!rdev || test_bit(Faulty, &rdev->flags)) in md_error()
7047 mddev->pers->error_handler(mddev,rdev); in md_error()
7050 sysfs_notify_dirent_safe(rdev->sysfs_state); in md_error()
7065 struct md_rdev *rdev; in status_unused() local
7069 list_for_each_entry(rdev, &pending_raid_disks, same_set) { in status_unused()
7073 bdevname(rdev->bdev,b)); in status_unused()
7233 struct md_rdev *rdev; in md_seq_show() local
7266 rdev_for_each_rcu(rdev, mddev) { in md_seq_show()
7269 bdevname(rdev->bdev,b), rdev->desc_nr); in md_seq_show()
7270 if (test_bit(WriteMostly, &rdev->flags)) in md_seq_show()
7272 if (test_bit(Faulty, &rdev->flags)) { in md_seq_show()
7276 if (rdev->raid_disk < 0) in md_seq_show()
7278 if (test_bit(Replacement, &rdev->flags)) in md_seq_show()
7280 sectors += rdev->sectors; in md_seq_show()
7450 struct md_rdev *rdev; in is_mddev_idle() local
7456 rdev_for_each_rcu(rdev, mddev) { in is_mddev_idle()
7457 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; in is_mddev_idle()
7483 if (init || curr_events - rdev->last_events > 64) { in is_mddev_idle()
7484 rdev->last_events = curr_events; in is_mddev_idle()
7618 struct md_rdev *rdev; in md_do_sync() local
7728 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
7729 if (rdev->raid_disk >= 0 && in md_do_sync()
7730 !test_bit(Faulty, &rdev->flags) && in md_do_sync()
7731 !test_bit(In_sync, &rdev->flags) && in md_do_sync()
7732 rdev->recovery_offset < j) in md_do_sync()
7733 j = rdev->recovery_offset; in md_do_sync()
7946 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
7947 if (rdev->raid_disk >= 0 && in md_do_sync()
7949 !test_bit(Faulty, &rdev->flags) && in md_do_sync()
7950 !test_bit(In_sync, &rdev->flags) && in md_do_sync()
7951 rdev->recovery_offset < mddev->curr_resync) in md_do_sync()
7952 rdev->recovery_offset = mddev->curr_resync; in md_do_sync()
7980 struct md_rdev *rdev; in remove_and_add_spares() local
7984 rdev_for_each(rdev, mddev) in remove_and_add_spares()
7985 if ((this == NULL || rdev == this) && in remove_and_add_spares()
7986 rdev->raid_disk >= 0 && in remove_and_add_spares()
7987 !test_bit(Blocked, &rdev->flags) && in remove_and_add_spares()
7988 (test_bit(Faulty, &rdev->flags) || in remove_and_add_spares()
7989 ! test_bit(In_sync, &rdev->flags)) && in remove_and_add_spares()
7990 atomic_read(&rdev->nr_pending)==0) { in remove_and_add_spares()
7992 mddev, rdev) == 0) { in remove_and_add_spares()
7993 sysfs_unlink_rdev(mddev, rdev); in remove_and_add_spares()
7994 rdev->raid_disk = -1; in remove_and_add_spares()
8004 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
8005 if (rdev->raid_disk >= 0 && in remove_and_add_spares()
8006 !test_bit(In_sync, &rdev->flags) && in remove_and_add_spares()
8007 !test_bit(Faulty, &rdev->flags)) in remove_and_add_spares()
8009 if (rdev->raid_disk >= 0) in remove_and_add_spares()
8011 if (test_bit(Faulty, &rdev->flags)) in remove_and_add_spares()
8014 ! (rdev->saved_raid_disk >= 0 && in remove_and_add_spares()
8015 !test_bit(Bitmap_sync, &rdev->flags))) in remove_and_add_spares()
8018 rdev->recovery_offset = 0; in remove_and_add_spares()
8020 hot_add_disk(mddev, rdev) == 0) { in remove_and_add_spares()
8021 if (sysfs_link_rdev(mddev, rdev)) in remove_and_add_spares()
8242 struct md_rdev *rdev; in md_reap_sync_thread() local
8266 rdev_for_each(rdev, mddev) in md_reap_sync_thread()
8267 rdev->saved_raid_disk = -1; in md_reap_sync_thread()
8288 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_wait_for_blocked_rdev() argument
8290 sysfs_notify_dirent_safe(rdev->sysfs_state); in md_wait_for_blocked_rdev()
8291 wait_event_timeout(rdev->blocked_wait, in md_wait_for_blocked_rdev()
8292 !test_bit(Blocked, &rdev->flags) && in md_wait_for_blocked_rdev()
8293 !test_bit(BlockedBadBlocks, &rdev->flags), in md_wait_for_blocked_rdev()
8295 rdev_dec_pending(rdev, mddev); in md_wait_for_blocked_rdev()
8302 struct md_rdev *rdev; in md_finish_reshape() local
8304 rdev_for_each(rdev, mddev) { in md_finish_reshape()
8305 if (rdev->data_offset > rdev->new_data_offset) in md_finish_reshape()
8306 rdev->sectors += rdev->data_offset - rdev->new_data_offset; in md_finish_reshape()
8308 rdev->sectors -= rdev->new_data_offset - rdev->data_offset; in md_finish_reshape()
8309 rdev->data_offset = rdev->new_data_offset; in md_finish_reshape()
8562 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, in rdev_set_badblocks() argument
8567 s += rdev->new_data_offset; in rdev_set_badblocks()
8569 s += rdev->data_offset; in rdev_set_badblocks()
8570 rv = md_set_badblocks(&rdev->badblocks, in rdev_set_badblocks()
8574 sysfs_notify_dirent_safe(rdev->sysfs_state); in rdev_set_badblocks()
8575 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); in rdev_set_badblocks()
8576 md_wakeup_thread(rdev->mddev->thread); in rdev_set_badblocks()
8677 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, in rdev_clear_badblocks() argument
8681 s += rdev->new_data_offset; in rdev_clear_badblocks()
8683 s += rdev->data_offset; in rdev_clear_badblocks()
8684 return md_clear_badblocks(&rdev->badblocks, in rdev_clear_badblocks()
8895 struct md_rdev *rdev, *tmp; in md_reload_sb() local
8897 rdev_for_each_safe(rdev, tmp, mddev) { in md_reload_sb()
8898 rdev->sb_loaded = 0; in md_reload_sb()
8899 ClearPageUptodate(rdev->sb_page); in md_reload_sb()
8903 rdev_for_each_safe(rdev, tmp, mddev) { in md_reload_sb()
8904 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); in md_reload_sb()
8909 set_bit(Faulty, &rdev->flags); in md_reload_sb()
8944 struct md_rdev *rdev; in autostart_arrays() local
8961 rdev = md_import_device(dev,0, 90); in autostart_arrays()
8962 if (IS_ERR(rdev)) in autostart_arrays()
8965 if (test_bit(Faulty, &rdev->flags)) in autostart_arrays()
8968 set_bit(AutoDetected, &rdev->flags); in autostart_arrays()
8969 list_add(&rdev->same_set, &pending_raid_disks); in autostart_arrays()