Lines Matching refs:rs

152 	struct raid_set *rs;  in context_alloc()  local
159 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); in context_alloc()
160 if (!rs) { in context_alloc()
165 mddev_init(&rs->md); in context_alloc()
167 rs->ti = ti; in context_alloc()
168 rs->raid_type = raid_type; in context_alloc()
169 rs->md.raid_disks = raid_devs; in context_alloc()
170 rs->md.level = raid_type->level; in context_alloc()
171 rs->md.new_level = rs->md.level; in context_alloc()
172 rs->md.layout = raid_type->algorithm; in context_alloc()
173 rs->md.new_layout = rs->md.layout; in context_alloc()
174 rs->md.delta_disks = 0; in context_alloc()
175 rs->md.recovery_cp = 0; in context_alloc()
178 md_rdev_init(&rs->dev[i].rdev); in context_alloc()
189 return rs; in context_alloc()
192 static void context_free(struct raid_set *rs) in context_free() argument
196 for (i = 0; i < rs->md.raid_disks; i++) { in context_free()
197 if (rs->dev[i].meta_dev) in context_free()
198 dm_put_device(rs->ti, rs->dev[i].meta_dev); in context_free()
199 md_rdev_clear(&rs->dev[i].rdev); in context_free()
200 if (rs->dev[i].data_dev) in context_free()
201 dm_put_device(rs->ti, rs->dev[i].data_dev); in context_free()
204 kfree(rs); in context_free()
223 static int dev_parms(struct raid_set *rs, char **argv) in dev_parms() argument
230 for (i = 0; i < rs->md.raid_disks; i++, argv += 2) { in dev_parms()
231 rs->dev[i].rdev.raid_disk = i; in dev_parms()
233 rs->dev[i].meta_dev = NULL; in dev_parms()
234 rs->dev[i].data_dev = NULL; in dev_parms()
240 rs->dev[i].rdev.data_offset = 0; in dev_parms()
241 rs->dev[i].rdev.mddev = &rs->md; in dev_parms()
244 ret = dm_get_device(rs->ti, argv[0], in dev_parms()
245 dm_table_get_mode(rs->ti->table), in dev_parms()
246 &rs->dev[i].meta_dev); in dev_parms()
247 rs->ti->error = "RAID metadata device lookup failure"; in dev_parms()
251 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); in dev_parms()
252 if (!rs->dev[i].rdev.sb_page) in dev_parms()
257 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && in dev_parms()
258 (!rs->dev[i].rdev.recovery_offset)) { in dev_parms()
259 rs->ti->error = "Drive designated for rebuild not specified"; in dev_parms()
263 rs->ti->error = "No data device supplied with metadata device"; in dev_parms()
264 if (rs->dev[i].meta_dev) in dev_parms()
270 ret = dm_get_device(rs->ti, argv[1], in dev_parms()
271 dm_table_get_mode(rs->ti->table), in dev_parms()
272 &rs->dev[i].data_dev); in dev_parms()
274 rs->ti->error = "RAID device lookup failure"; in dev_parms()
278 if (rs->dev[i].meta_dev) { in dev_parms()
280 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev; in dev_parms()
282 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; in dev_parms()
283 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); in dev_parms()
284 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) in dev_parms()
289 rs->md.external = 0; in dev_parms()
290 rs->md.persistent = 1; in dev_parms()
291 rs->md.major_version = 2; in dev_parms()
292 } else if (rebuild && !rs->md.recovery_cp) { in dev_parms()
305 rs->ti->error = "RAID device lookup failure"; in dev_parms()
322 static int validate_region_size(struct raid_set *rs, unsigned long region_size) in validate_region_size() argument
324 unsigned long min_region_size = rs->ti->len / (1 << 21); in validate_region_size()
343 if (region_size > rs->ti->len) { in validate_region_size()
344 rs->ti->error = "Supplied region size is too large"; in validate_region_size()
351 rs->ti->error = "Supplied region size is too small"; in validate_region_size()
356 rs->ti->error = "Region size is not a power of 2"; in validate_region_size()
360 if (region_size < rs->md.chunk_sectors) { in validate_region_size()
361 rs->ti->error = "Region size is smaller than the chunk size"; in validate_region_size()
369 rs->md.bitmap_info.chunksize = (region_size << 9); in validate_region_size()
383 static int validate_raid_redundancy(struct raid_set *rs) in validate_raid_redundancy() argument
389 for (i = 0; i < rs->md.raid_disks; i++) in validate_raid_redundancy()
390 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || in validate_raid_redundancy()
391 !rs->dev[i].rdev.sb_page) in validate_raid_redundancy()
394 switch (rs->raid_type->level) { in validate_raid_redundancy()
396 if (rebuild_cnt >= rs->md.raid_disks) in validate_raid_redundancy()
402 if (rebuild_cnt > rs->raid_type->parity_devs) in validate_raid_redundancy()
406 copies = raid10_md_layout_to_copies(rs->md.layout); in validate_raid_redundancy()
424 if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) { in validate_raid_redundancy()
425 for (i = 0; i < rs->md.raid_disks * copies; i++) { in validate_raid_redundancy()
428 d = i % rs->md.raid_disks; in validate_raid_redundancy()
429 if ((!rs->dev[d].rdev.sb_page || in validate_raid_redundancy()
430 !test_bit(In_sync, &rs->dev[d].rdev.flags)) && in validate_raid_redundancy()
449 group_size = (rs->md.raid_disks / copies); in validate_raid_redundancy()
450 last_group_start = (rs->md.raid_disks / group_size) - 1; in validate_raid_redundancy()
452 for (i = 0; i < rs->md.raid_disks; i++) { in validate_raid_redundancy()
455 if ((!rs->dev[i].rdev.sb_page || in validate_raid_redundancy()
456 !test_bit(In_sync, &rs->dev[i].rdev.flags)) && in validate_raid_redundancy()
495 static int parse_raid_params(struct raid_set *rs, char **argv, in parse_raid_params() argument
502 sector_t sectors_per_dev = rs->ti->len; in parse_raid_params()
511 rs->ti->error = "Bad chunk size"; in parse_raid_params()
513 } else if (rs->raid_type->level == 1) { in parse_raid_params()
518 rs->ti->error = "Chunk size must be a power of 2"; in parse_raid_params()
521 rs->ti->error = "Chunk size value is too small"; in parse_raid_params()
525 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params()
546 for (i = 0; i < rs->md.raid_disks; i++) { in parse_raid_params()
547 set_bit(In_sync, &rs->dev[i].rdev.flags); in parse_raid_params()
548 rs->dev[i].rdev.recovery_offset = MaxSector; in parse_raid_params()
556 rs->md.recovery_cp = MaxSector; in parse_raid_params()
557 rs->ctr_flags |= CTR_FLAG_NOSYNC; in parse_raid_params()
561 rs->md.recovery_cp = 0; in parse_raid_params()
562 rs->ctr_flags |= CTR_FLAG_SYNC; in parse_raid_params()
568 rs->ti->error = "Wrong number of raid parameters given"; in parse_raid_params()
576 if (rs->raid_type->level != 10) { in parse_raid_params()
577 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; in parse_raid_params()
583 rs->ti->error = "Invalid 'raid10_format' value given"; in parse_raid_params()
587 rs->ctr_flags |= CTR_FLAG_RAID10_FORMAT; in parse_raid_params()
592 rs->ti->error = "Bad numerical argument given in raid params"; in parse_raid_params()
598 if (value >= rs->md.raid_disks) { in parse_raid_params()
599 rs->ti->error = "Invalid rebuild index given"; in parse_raid_params()
602 clear_bit(In_sync, &rs->dev[value].rdev.flags); in parse_raid_params()
603 rs->dev[value].rdev.recovery_offset = 0; in parse_raid_params()
604 rs->ctr_flags |= CTR_FLAG_REBUILD; in parse_raid_params()
606 if (rs->raid_type->level != 1) { in parse_raid_params()
607 rs->ti->error = "write_mostly option is only valid for RAID1"; in parse_raid_params()
610 if (value >= rs->md.raid_disks) { in parse_raid_params()
611 rs->ti->error = "Invalid write_mostly drive index given"; in parse_raid_params()
614 set_bit(WriteMostly, &rs->dev[value].rdev.flags); in parse_raid_params()
616 if (rs->raid_type->level != 1) { in parse_raid_params()
617 rs->ti->error = "max_write_behind option is only valid for RAID1"; in parse_raid_params()
620 rs->ctr_flags |= CTR_FLAG_MAX_WRITE_BEHIND; in parse_raid_params()
628 rs->ti->error = "Max write-behind limit out of range"; in parse_raid_params()
631 rs->md.bitmap_info.max_write_behind = value; in parse_raid_params()
633 rs->ctr_flags |= CTR_FLAG_DAEMON_SLEEP; in parse_raid_params()
635 rs->ti->error = "daemon sleep period out of range"; in parse_raid_params()
638 rs->md.bitmap_info.daemon_sleep = value; in parse_raid_params()
640 rs->ctr_flags |= CTR_FLAG_STRIPE_CACHE; in parse_raid_params()
648 if ((rs->raid_type->level != 5) && in parse_raid_params()
649 (rs->raid_type->level != 6)) { in parse_raid_params()
650 rs->ti->error = "Inappropriate argument: stripe_cache"; in parse_raid_params()
653 if (raid5_set_cache_size(&rs->md, (int)value)) { in parse_raid_params()
654 rs->ti->error = "Bad stripe_cache size"; in parse_raid_params()
658 rs->ctr_flags |= CTR_FLAG_MIN_RECOVERY_RATE; in parse_raid_params()
660 rs->ti->error = "min_recovery_rate out of range"; in parse_raid_params()
663 rs->md.sync_speed_min = (int)value; in parse_raid_params()
665 rs->ctr_flags |= CTR_FLAG_MAX_RECOVERY_RATE; in parse_raid_params()
667 rs->ti->error = "max_recovery_rate out of range"; in parse_raid_params()
670 rs->md.sync_speed_max = (int)value; in parse_raid_params()
672 rs->ctr_flags |= CTR_FLAG_REGION_SIZE; in parse_raid_params()
675 (rs->raid_type->level == 10)) { in parse_raid_params()
677 rs->ti->error = "Bad value for 'raid10_copies'"; in parse_raid_params()
680 rs->ctr_flags |= CTR_FLAG_RAID10_COPIES; in parse_raid_params()
684 rs->ti->error = "Unable to parse RAID parameters"; in parse_raid_params()
689 if (validate_region_size(rs, region_size)) in parse_raid_params()
692 if (rs->md.chunk_sectors) in parse_raid_params()
693 max_io_len = rs->md.chunk_sectors; in parse_raid_params()
697 if (dm_set_target_max_io_len(rs->ti, max_io_len)) in parse_raid_params()
700 if (rs->raid_type->level == 10) { in parse_raid_params()
701 if (raid10_copies > rs->md.raid_disks) { in parse_raid_params()
702 rs->ti->error = "Not enough devices to satisfy specification"; in parse_raid_params()
711 rs->ti->error = "Too many copies for given RAID10 format."; in parse_raid_params()
716 sectors_per_dev = rs->ti->len * raid10_copies; in parse_raid_params()
717 sector_div(sectors_per_dev, rs->md.raid_disks); in parse_raid_params()
719 rs->md.layout = raid10_format_to_md_layout(raid10_format, in parse_raid_params()
721 rs->md.new_layout = rs->md.layout; in parse_raid_params()
722 } else if ((!rs->raid_type->level || rs->raid_type->level > 1) && in parse_raid_params()
724 (rs->md.raid_disks - rs->raid_type->parity_devs))) { in parse_raid_params()
725 rs->ti->error = "Target length not divisible by number of data devices"; in parse_raid_params()
728 rs->md.dev_sectors = sectors_per_dev; in parse_raid_params()
731 rs->md.persistent = 0; in parse_raid_params()
732 rs->md.external = 1; in parse_raid_params()
739 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); in do_table_event() local
741 dm_table_event(rs->ti->table); in do_table_event()
746 struct raid_set *rs = container_of(cb, struct raid_set, callbacks); in raid_is_congested() local
748 return mddev_congested(&rs->md, bits); in raid_is_congested()
812 struct raid_set *rs = container_of(mddev, struct raid_set, md); in super_sync() local
818 if (!rs->dev[i].data_dev || in super_sync()
819 test_bit(Faulty, &(rs->dev[i].rdev.flags))) in super_sync()
901 struct raid_set *rs = container_of(mddev, struct raid_set, md); in super_init_validation() local
943 if ((rs->raid_type->level != 1) && in super_init_validation()
949 if (!(rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC))) in super_init_validation()
1007 if (rs->raid_type->level != 1) { in super_init_validation()
1008 rs->ti->error = "Cannot change device " in super_init_validation()
1028 static int super_validate(struct raid_set *rs, struct md_rdev *rdev) in super_validate() argument
1030 struct mddev *mddev = &rs->md; in super_validate()
1041 mddev->bitmap_info.offset = (rs->raid_type->level) ? to_sector(4096) : 0; in super_validate()
1068 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) in analyse_superblocks() argument
1073 struct mddev *mddev = &rs->md; in analyse_superblocks()
1089 if (rs->ctr_flags & CTR_FLAG_SYNC) in analyse_superblocks()
1137 if (validate_raid_redundancy(rs)) { in analyse_superblocks()
1138 rs->ti->error = "Insufficient redundancy to activate array"; in analyse_superblocks()
1147 if (super_validate(rs, freshest)) in analyse_superblocks()
1151 if ((rdev != freshest) && super_validate(rs, rdev)) in analyse_superblocks()
1161 static void configure_discard_support(struct dm_target *ti, struct raid_set *rs) in configure_discard_support() argument
1170 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); in configure_discard_support()
1172 for (i = 0; i < rs->md.raid_disks; i++) { in configure_discard_support()
1175 if (!rs->dev[i].rdev.bdev) in configure_discard_support()
1178 q = bdev_get_queue(rs->dev[i].rdev.bdev); in configure_discard_support()
1200 ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10); in configure_discard_support()
1218 struct raid_set *rs = NULL; in raid_ctr() local
1261 rs = context_alloc(ti, rt, (unsigned)num_raid_devs); in raid_ctr()
1262 if (IS_ERR(rs)) in raid_ctr()
1263 return PTR_ERR(rs); in raid_ctr()
1265 ret = parse_raid_params(rs, argv, (unsigned)num_raid_params); in raid_ctr()
1271 ret = dev_parms(rs, argv); in raid_ctr()
1275 rs->md.sync_super = super_sync; in raid_ctr()
1276 ret = analyse_superblocks(ti, rs); in raid_ctr()
1280 INIT_WORK(&rs->md.event_work, do_table_event); in raid_ctr()
1281 ti->private = rs; in raid_ctr()
1287 configure_discard_support(ti, rs); in raid_ctr()
1290 mddev_lock_nointr(&rs->md); in raid_ctr()
1291 ret = md_run(&rs->md); in raid_ctr()
1292 rs->md.in_sync = 0; /* Assume already marked dirty */ in raid_ctr()
1293 mddev_unlock(&rs->md); in raid_ctr()
1300 if (ti->len != rs->md.array_sectors) { in raid_ctr()
1305 rs->callbacks.congested_fn = raid_is_congested; in raid_ctr()
1306 dm_table_add_target_callbacks(ti->table, &rs->callbacks); in raid_ctr()
1308 mddev_suspend(&rs->md); in raid_ctr()
1312 md_stop(&rs->md); in raid_ctr()
1314 context_free(rs); in raid_ctr()
1321 struct raid_set *rs = ti->private; in raid_dtr() local
1323 list_del_init(&rs->callbacks.list); in raid_dtr()
1324 md_stop(&rs->md); in raid_dtr()
1325 context_free(rs); in raid_dtr()
1330 struct raid_set *rs = ti->private; in raid_map() local
1331 struct mddev *mddev = &rs->md; in raid_map()
1366 struct raid_set *rs = ti->private; in raid_status() local
1374 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks); in raid_status()
1376 if (rs->raid_type->level) { in raid_status()
1377 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery)) in raid_status()
1378 sync = rs->md.curr_resync_completed; in raid_status()
1380 sync = rs->md.recovery_cp; in raid_status()
1382 if (sync >= rs->md.resync_max_sectors) { in raid_status()
1387 sync = rs->md.resync_max_sectors; in raid_status()
1388 } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) { in raid_status()
1402 for (i = 0; i < rs->md.raid_disks; i++) in raid_status()
1403 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) in raid_status()
1409 sync = rs->md.resync_max_sectors; in raid_status()
1418 for (i = 0; i < rs->md.raid_disks; i++) { in raid_status()
1419 if (test_bit(Faulty, &rs->dev[i].rdev.flags)) in raid_status()
1422 !test_bit(In_sync, &rs->dev[i].rdev.flags)) in raid_status()
1438 (unsigned long long) rs->md.resync_max_sectors); in raid_status()
1445 DMEMIT(" %s", decipher_sync_action(&rs->md)); in raid_status()
1453 (strcmp(rs->md.last_sync_action, "check")) ? 0 : in raid_status()
1455 atomic64_read(&rs->md.resync_mismatches)); in raid_status()
1459 for (i = 0; i < rs->md.raid_disks; i++) { in raid_status()
1460 if ((rs->ctr_flags & CTR_FLAG_REBUILD) && in raid_status()
1461 rs->dev[i].data_dev && in raid_status()
1462 !test_bit(In_sync, &rs->dev[i].rdev.flags)) in raid_status()
1464 if (rs->dev[i].data_dev && in raid_status()
1465 test_bit(WriteMostly, &rs->dev[i].rdev.flags)) in raid_status()
1469 raid_param_cnt += (hweight32(rs->ctr_flags & ~CTR_FLAG_REBUILD) * 2); in raid_status()
1470 if (rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)) in raid_status()
1473 DMEMIT("%s %u %u", rs->raid_type->name, in raid_status()
1474 raid_param_cnt, rs->md.chunk_sectors); in raid_status()
1476 if ((rs->ctr_flags & CTR_FLAG_SYNC) && in raid_status()
1477 (rs->md.recovery_cp == MaxSector)) in raid_status()
1479 if (rs->ctr_flags & CTR_FLAG_NOSYNC) in raid_status()
1482 for (i = 0; i < rs->md.raid_disks; i++) in raid_status()
1483 if ((rs->ctr_flags & CTR_FLAG_REBUILD) && in raid_status()
1484 rs->dev[i].data_dev && in raid_status()
1485 !test_bit(In_sync, &rs->dev[i].rdev.flags)) in raid_status()
1488 if (rs->ctr_flags & CTR_FLAG_DAEMON_SLEEP) in raid_status()
1490 rs->md.bitmap_info.daemon_sleep); in raid_status()
1492 if (rs->ctr_flags & CTR_FLAG_MIN_RECOVERY_RATE) in raid_status()
1493 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min); in raid_status()
1495 if (rs->ctr_flags & CTR_FLAG_MAX_RECOVERY_RATE) in raid_status()
1496 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max); in raid_status()
1498 for (i = 0; i < rs->md.raid_disks; i++) in raid_status()
1499 if (rs->dev[i].data_dev && in raid_status()
1500 test_bit(WriteMostly, &rs->dev[i].rdev.flags)) in raid_status()
1503 if (rs->ctr_flags & CTR_FLAG_MAX_WRITE_BEHIND) in raid_status()
1505 rs->md.bitmap_info.max_write_behind); in raid_status()
1507 if (rs->ctr_flags & CTR_FLAG_STRIPE_CACHE) { in raid_status()
1508 struct r5conf *conf = rs->md.private; in raid_status()
1515 if (rs->ctr_flags & CTR_FLAG_REGION_SIZE) in raid_status()
1517 rs->md.bitmap_info.chunksize >> 9); in raid_status()
1519 if (rs->ctr_flags & CTR_FLAG_RAID10_COPIES) in raid_status()
1521 raid10_md_layout_to_copies(rs->md.layout)); in raid_status()
1523 if (rs->ctr_flags & CTR_FLAG_RAID10_FORMAT) in raid_status()
1525 raid10_md_layout_to_format(rs->md.layout)); in raid_status()
1527 DMEMIT(" %d", rs->md.raid_disks); in raid_status()
1528 for (i = 0; i < rs->md.raid_disks; i++) { in raid_status()
1529 if (rs->dev[i].meta_dev) in raid_status()
1530 DMEMIT(" %s", rs->dev[i].meta_dev->name); in raid_status()
1534 if (rs->dev[i].data_dev) in raid_status()
1535 DMEMIT(" %s", rs->dev[i].data_dev->name); in raid_status()
1544 struct raid_set *rs = ti->private; in raid_message() local
1545 struct mddev *mddev = &rs->md; in raid_message()
1599 struct raid_set *rs = ti->private; in raid_iterate_devices() local
1603 for (i = 0; !ret && i < rs->md.raid_disks; i++) in raid_iterate_devices()
1604 if (rs->dev[i].data_dev) in raid_iterate_devices()
1606 rs->dev[i].data_dev, in raid_iterate_devices()
1608 rs->md.dev_sectors, in raid_iterate_devices()
1616 struct raid_set *rs = ti->private; in raid_io_hints() local
1617 unsigned chunk_size = rs->md.chunk_sectors << 9; in raid_io_hints()
1618 struct r5conf *conf = rs->md.private; in raid_io_hints()
1626 struct raid_set *rs = ti->private; in raid_presuspend() local
1628 md_stop_writes(&rs->md); in raid_presuspend()
1633 struct raid_set *rs = ti->private; in raid_postsuspend() local
1635 mddev_suspend(&rs->md); in raid_postsuspend()
1638 static void attempt_restore_of_faulty_devices(struct raid_set *rs) in attempt_restore_of_faulty_devices() argument
1646 for (i = 0; i < rs->md.raid_disks; i++) { in attempt_restore_of_faulty_devices()
1647 r = &rs->dev[i].rdev; in attempt_restore_of_faulty_devices()
1652 rs->raid_type->name, i); in attempt_restore_of_faulty_devices()
1685 rdev_for_each(r, &rs->md) { in attempt_restore_of_faulty_devices()
1696 struct raid_set *rs = ti->private; in raid_resume() local
1698 if (rs->raid_type->level) { in raid_resume()
1699 set_bit(MD_CHANGE_DEVS, &rs->md.flags); in raid_resume()
1701 if (!rs->bitmap_loaded) { in raid_resume()
1702 bitmap_load(&rs->md); in raid_resume()
1703 rs->bitmap_loaded = 1; in raid_resume()
1710 attempt_restore_of_faulty_devices(rs); in raid_resume()
1713 clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); in raid_resume()
1716 mddev_resume(&rs->md); in raid_resume()