Lines Matching refs:rdev
45 struct md_rdev rdev; member
178 md_rdev_init(&rs->dev[i].rdev); in context_alloc()
199 md_rdev_clear(&rs->dev[i].rdev); in context_free()
231 rs->dev[i].rdev.raid_disk = i; in dev_parms()
240 rs->dev[i].rdev.data_offset = 0; in dev_parms()
241 rs->dev[i].rdev.mddev = &rs->md; in dev_parms()
251 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); in dev_parms()
252 if (!rs->dev[i].rdev.sb_page) in dev_parms()
257 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && in dev_parms()
258 (!rs->dev[i].rdev.recovery_offset)) { in dev_parms()
280 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev; in dev_parms()
282 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; in dev_parms()
283 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); in dev_parms()
284 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) in dev_parms()
390 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || in validate_raid_redundancy()
391 !rs->dev[i].rdev.sb_page) in validate_raid_redundancy()
429 if ((!rs->dev[d].rdev.sb_page || in validate_raid_redundancy()
430 !test_bit(In_sync, &rs->dev[d].rdev.flags)) && in validate_raid_redundancy()
455 if ((!rs->dev[i].rdev.sb_page || in validate_raid_redundancy()
456 !test_bit(In_sync, &rs->dev[i].rdev.flags)) && in validate_raid_redundancy()
547 set_bit(In_sync, &rs->dev[i].rdev.flags); in parse_raid_params()
548 rs->dev[i].rdev.recovery_offset = MaxSector; in parse_raid_params()
602 clear_bit(In_sync, &rs->dev[value].rdev.flags); in parse_raid_params()
603 rs->dev[value].rdev.recovery_offset = 0; in parse_raid_params()
614 set_bit(WriteMostly, &rs->dev[value].rdev.flags); in parse_raid_params()
788 static int read_disk_sb(struct md_rdev *rdev, int size) in read_disk_sb() argument
790 BUG_ON(!rdev->sb_page); in read_disk_sb()
792 if (rdev->sb_loaded) in read_disk_sb()
795 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) { in read_disk_sb()
797 rdev->raid_disk); in read_disk_sb()
798 md_error(rdev->mddev, rdev); in read_disk_sb()
802 rdev->sb_loaded = 1; in read_disk_sb()
807 static void super_sync(struct mddev *mddev, struct md_rdev *rdev) in super_sync() argument
814 sb = page_address(rdev->sb_page); in super_sync()
819 test_bit(Faulty, &(rs->dev[i].rdev.flags))) in super_sync()
822 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb)); in super_sync()
828 sb->array_position = cpu_to_le32(rdev->raid_disk); in super_sync()
833 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset); in super_sync()
849 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) in super_load() argument
856 rdev->sb_start = 0; in super_load()
857 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); in super_load()
858 if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) { in super_load()
863 ret = read_disk_sb(rdev, rdev->sb_size); in super_load()
867 sb = page_address(rdev->sb_page); in super_load()
875 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) { in super_load()
876 super_sync(rdev->mddev, rdev); in super_load()
878 set_bit(FirstUse, &rdev->flags); in super_load()
881 set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags); in super_load()
898 static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev) in super_init_validation() argument
910 sb = page_address(rdev->sb_page); in super_init_validation()
1028 static int super_validate(struct raid_set *rs, struct md_rdev *rdev) in super_validate() argument
1031 struct dm_raid_superblock *sb = page_address(rdev->sb_page); in super_validate()
1037 if (!mddev->events && super_init_validation(mddev, rdev)) in super_validate()
1042 rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; in super_validate()
1044 if (!test_bit(FirstUse, &rdev->flags)) { in super_validate()
1045 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); in super_validate()
1046 if (rdev->recovery_offset != MaxSector) in super_validate()
1047 clear_bit(In_sync, &rdev->flags); in super_validate()
1053 if (test_bit(Faulty, &rdev->flags)) { in super_validate()
1054 clear_bit(Faulty, &rdev->flags); in super_validate()
1055 clear_bit(In_sync, &rdev->flags); in super_validate()
1056 rdev->saved_raid_disk = rdev->raid_disk; in super_validate()
1057 rdev->recovery_offset = 0; in super_validate()
1060 clear_bit(FirstUse, &rdev->flags); in super_validate()
1072 struct md_rdev *rdev, *tmp, *freshest; in analyse_superblocks() local
1076 rdev_for_each_safe(rdev, tmp, mddev) { in analyse_superblocks()
1087 rdev->sectors = to_sector(i_size_read(rdev->bdev->bd_inode)); in analyse_superblocks()
1092 if (!rdev->meta_bdev) in analyse_superblocks()
1095 ret = super_load(rdev, freshest); in analyse_superblocks()
1099 freshest = rdev; in analyse_superblocks()
1104 dev = container_of(rdev, struct raid_dev, rdev); in analyse_superblocks()
1109 rdev->meta_bdev = NULL; in analyse_superblocks()
1111 if (rdev->sb_page) in analyse_superblocks()
1112 put_page(rdev->sb_page); in analyse_superblocks()
1114 rdev->sb_page = NULL; in analyse_superblocks()
1116 rdev->sb_loaded = 0; in analyse_superblocks()
1128 rdev->bdev = NULL; in analyse_superblocks()
1130 list_del(&rdev->same_set); in analyse_superblocks()
1150 rdev_for_each(rdev, mddev) in analyse_superblocks()
1151 if ((rdev != freshest) && super_validate(rs, rdev)) in analyse_superblocks()
1175 if (!rs->dev[i].rdev.bdev) in configure_discard_support()
1178 q = bdev_get_queue(rs->dev[i].rdev.bdev); in configure_discard_support()
1403 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) in raid_status()
1419 if (test_bit(Faulty, &rs->dev[i].rdev.flags)) in raid_status()
1422 !test_bit(In_sync, &rs->dev[i].rdev.flags)) in raid_status()
1462 !test_bit(In_sync, &rs->dev[i].rdev.flags)) in raid_status()
1465 test_bit(WriteMostly, &rs->dev[i].rdev.flags)) in raid_status()
1485 !test_bit(In_sync, &rs->dev[i].rdev.flags)) in raid_status()
1500 test_bit(WriteMostly, &rs->dev[i].rdev.flags)) in raid_status()
1647 r = &rs->dev[i].rdev; in attempt_restore_of_faulty_devices()