Lines Matching refs:rs
150 struct raid_set *rs; in context_alloc() local
157 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); in context_alloc()
158 if (!rs) { in context_alloc()
163 mddev_init(&rs->md); in context_alloc()
165 rs->ti = ti; in context_alloc()
166 rs->raid_type = raid_type; in context_alloc()
167 rs->md.raid_disks = raid_devs; in context_alloc()
168 rs->md.level = raid_type->level; in context_alloc()
169 rs->md.new_level = rs->md.level; in context_alloc()
170 rs->md.layout = raid_type->algorithm; in context_alloc()
171 rs->md.new_layout = rs->md.layout; in context_alloc()
172 rs->md.delta_disks = 0; in context_alloc()
173 rs->md.recovery_cp = 0; in context_alloc()
176 md_rdev_init(&rs->dev[i].rdev); in context_alloc()
187 return rs; in context_alloc()
190 static void context_free(struct raid_set *rs) in context_free() argument
194 for (i = 0; i < rs->md.raid_disks; i++) { in context_free()
195 if (rs->dev[i].meta_dev) in context_free()
196 dm_put_device(rs->ti, rs->dev[i].meta_dev); in context_free()
197 md_rdev_clear(&rs->dev[i].rdev); in context_free()
198 if (rs->dev[i].data_dev) in context_free()
199 dm_put_device(rs->ti, rs->dev[i].data_dev); in context_free()
202 kfree(rs); in context_free()
221 static int dev_parms(struct raid_set *rs, char **argv) in dev_parms() argument
228 for (i = 0; i < rs->md.raid_disks; i++, argv += 2) { in dev_parms()
229 rs->dev[i].rdev.raid_disk = i; in dev_parms()
231 rs->dev[i].meta_dev = NULL; in dev_parms()
232 rs->dev[i].data_dev = NULL; in dev_parms()
238 rs->dev[i].rdev.data_offset = 0; in dev_parms()
239 rs->dev[i].rdev.mddev = &rs->md; in dev_parms()
242 ret = dm_get_device(rs->ti, argv[0], in dev_parms()
243 dm_table_get_mode(rs->ti->table), in dev_parms()
244 &rs->dev[i].meta_dev); in dev_parms()
245 rs->ti->error = "RAID metadata device lookup failure"; in dev_parms()
249 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); in dev_parms()
250 if (!rs->dev[i].rdev.sb_page) in dev_parms()
255 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && in dev_parms()
256 (!rs->dev[i].rdev.recovery_offset)) { in dev_parms()
257 rs->ti->error = "Drive designated for rebuild not specified"; in dev_parms()
261 rs->ti->error = "No data device supplied with metadata device"; in dev_parms()
262 if (rs->dev[i].meta_dev) in dev_parms()
268 ret = dm_get_device(rs->ti, argv[1], in dev_parms()
269 dm_table_get_mode(rs->ti->table), in dev_parms()
270 &rs->dev[i].data_dev); in dev_parms()
272 rs->ti->error = "RAID device lookup failure"; in dev_parms()
276 if (rs->dev[i].meta_dev) { in dev_parms()
278 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev; in dev_parms()
280 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; in dev_parms()
281 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); in dev_parms()
282 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) in dev_parms()
287 rs->md.external = 0; in dev_parms()
288 rs->md.persistent = 1; in dev_parms()
289 rs->md.major_version = 2; in dev_parms()
290 } else if (rebuild && !rs->md.recovery_cp) { in dev_parms()
303 rs->ti->error = "RAID device lookup failure"; in dev_parms()
320 static int validate_region_size(struct raid_set *rs, unsigned long region_size) in validate_region_size() argument
322 unsigned long min_region_size = rs->ti->len / (1 << 21); in validate_region_size()
341 if (region_size > rs->ti->len) { in validate_region_size()
342 rs->ti->error = "Supplied region size is too large"; in validate_region_size()
349 rs->ti->error = "Supplied region size is too small"; in validate_region_size()
354 rs->ti->error = "Region size is not a power of 2"; in validate_region_size()
358 if (region_size < rs->md.chunk_sectors) { in validate_region_size()
359 rs->ti->error = "Region size is smaller than the chunk size"; in validate_region_size()
367 rs->md.bitmap_info.chunksize = (region_size << 9); in validate_region_size()
381 static int validate_raid_redundancy(struct raid_set *rs) in validate_raid_redundancy() argument
387 for (i = 0; i < rs->md.raid_disks; i++) in validate_raid_redundancy()
388 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || in validate_raid_redundancy()
389 !rs->dev[i].rdev.sb_page) in validate_raid_redundancy()
392 switch (rs->raid_type->level) { in validate_raid_redundancy()
394 if (rebuild_cnt >= rs->md.raid_disks) in validate_raid_redundancy()
400 if (rebuild_cnt > rs->raid_type->parity_devs) in validate_raid_redundancy()
404 copies = raid10_md_layout_to_copies(rs->md.layout); in validate_raid_redundancy()
422 if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) { in validate_raid_redundancy()
423 for (i = 0; i < rs->md.raid_disks * copies; i++) { in validate_raid_redundancy()
426 d = i % rs->md.raid_disks; in validate_raid_redundancy()
427 if ((!rs->dev[d].rdev.sb_page || in validate_raid_redundancy()
428 !test_bit(In_sync, &rs->dev[d].rdev.flags)) && in validate_raid_redundancy()
447 group_size = (rs->md.raid_disks / copies); in validate_raid_redundancy()
448 last_group_start = (rs->md.raid_disks / group_size) - 1; in validate_raid_redundancy()
450 for (i = 0; i < rs->md.raid_disks; i++) { in validate_raid_redundancy()
453 if ((!rs->dev[i].rdev.sb_page || in validate_raid_redundancy()
454 !test_bit(In_sync, &rs->dev[i].rdev.flags)) && in validate_raid_redundancy()
495 static int parse_raid_params(struct raid_set *rs, char **argv, in parse_raid_params() argument
502 sector_t sectors_per_dev = rs->ti->len; in parse_raid_params()
511 rs->ti->error = "Bad chunk size"; in parse_raid_params()
513 } else if (rs->raid_type->level == 1) { in parse_raid_params()
518 rs->ti->error = "Chunk size must be a power of 2"; in parse_raid_params()
521 rs->ti->error = "Chunk size value is too small"; in parse_raid_params()
525 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params()
546 for (i = 0; i < rs->md.raid_disks; i++) { in parse_raid_params()
547 set_bit(In_sync, &rs->dev[i].rdev.flags); in parse_raid_params()
548 rs->dev[i].rdev.recovery_offset = MaxSector; in parse_raid_params()
556 rs->md.recovery_cp = MaxSector; in parse_raid_params()
557 rs->print_flags |= DMPF_NOSYNC; in parse_raid_params()
561 rs->md.recovery_cp = 0; in parse_raid_params()
562 rs->print_flags |= DMPF_SYNC; in parse_raid_params()
568 rs->ti->error = "Wrong number of raid parameters given"; in parse_raid_params()
576 if (rs->raid_type->level != 10) { in parse_raid_params()
577 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; in parse_raid_params()
583 rs->ti->error = "Invalid 'raid10_format' value given"; in parse_raid_params()
587 rs->print_flags |= DMPF_RAID10_FORMAT; in parse_raid_params()
592 rs->ti->error = "Bad numerical argument given in raid params"; in parse_raid_params()
598 if (value >= rs->md.raid_disks) { in parse_raid_params()
599 rs->ti->error = "Invalid rebuild index given"; in parse_raid_params()
602 clear_bit(In_sync, &rs->dev[value].rdev.flags); in parse_raid_params()
603 rs->dev[value].rdev.recovery_offset = 0; in parse_raid_params()
604 rs->print_flags |= DMPF_REBUILD; in parse_raid_params()
606 if (rs->raid_type->level != 1) { in parse_raid_params()
607 rs->ti->error = "write_mostly option is only valid for RAID1"; in parse_raid_params()
610 if (value >= rs->md.raid_disks) { in parse_raid_params()
611 rs->ti->error = "Invalid write_mostly drive index given"; in parse_raid_params()
614 set_bit(WriteMostly, &rs->dev[value].rdev.flags); in parse_raid_params()
616 if (rs->raid_type->level != 1) { in parse_raid_params()
617 rs->ti->error = "max_write_behind option is only valid for RAID1"; in parse_raid_params()
620 rs->print_flags |= DMPF_MAX_WRITE_BEHIND; in parse_raid_params()
628 rs->ti->error = "Max write-behind limit out of range"; in parse_raid_params()
631 rs->md.bitmap_info.max_write_behind = value; in parse_raid_params()
633 rs->print_flags |= DMPF_DAEMON_SLEEP; in parse_raid_params()
635 rs->ti->error = "daemon sleep period out of range"; in parse_raid_params()
638 rs->md.bitmap_info.daemon_sleep = value; in parse_raid_params()
640 rs->print_flags |= DMPF_STRIPE_CACHE; in parse_raid_params()
648 if ((rs->raid_type->level != 5) && in parse_raid_params()
649 (rs->raid_type->level != 6)) { in parse_raid_params()
650 rs->ti->error = "Inappropriate argument: stripe_cache"; in parse_raid_params()
653 if (raid5_set_cache_size(&rs->md, (int)value)) { in parse_raid_params()
654 rs->ti->error = "Bad stripe_cache size"; in parse_raid_params()
658 rs->print_flags |= DMPF_MIN_RECOVERY_RATE; in parse_raid_params()
660 rs->ti->error = "min_recovery_rate out of range"; in parse_raid_params()
663 rs->md.sync_speed_min = (int)value; in parse_raid_params()
665 rs->print_flags |= DMPF_MAX_RECOVERY_RATE; in parse_raid_params()
667 rs->ti->error = "max_recovery_rate out of range"; in parse_raid_params()
670 rs->md.sync_speed_max = (int)value; in parse_raid_params()
672 rs->print_flags |= DMPF_REGION_SIZE; in parse_raid_params()
675 (rs->raid_type->level == 10)) { in parse_raid_params()
677 rs->ti->error = "Bad value for 'raid10_copies'"; in parse_raid_params()
680 rs->print_flags |= DMPF_RAID10_COPIES; in parse_raid_params()
684 rs->ti->error = "Unable to parse RAID parameters"; in parse_raid_params()
689 if (validate_region_size(rs, region_size)) in parse_raid_params()
692 if (rs->md.chunk_sectors) in parse_raid_params()
693 max_io_len = rs->md.chunk_sectors; in parse_raid_params()
697 if (dm_set_target_max_io_len(rs->ti, max_io_len)) in parse_raid_params()
700 if (rs->raid_type->level == 10) { in parse_raid_params()
701 if (raid10_copies > rs->md.raid_disks) { in parse_raid_params()
702 rs->ti->error = "Not enough devices to satisfy specification"; in parse_raid_params()
711 rs->ti->error = "Too many copies for given RAID10 format."; in parse_raid_params()
716 sectors_per_dev = rs->ti->len * raid10_copies; in parse_raid_params()
717 sector_div(sectors_per_dev, rs->md.raid_disks); in parse_raid_params()
719 rs->md.layout = raid10_format_to_md_layout(raid10_format, in parse_raid_params()
721 rs->md.new_layout = rs->md.layout; in parse_raid_params()
722 } else if ((rs->raid_type->level > 1) && in parse_raid_params()
724 (rs->md.raid_disks - rs->raid_type->parity_devs))) { in parse_raid_params()
725 rs->ti->error = "Target length not divisible by number of data devices"; in parse_raid_params()
728 rs->md.dev_sectors = sectors_per_dev; in parse_raid_params()
731 rs->md.persistent = 0; in parse_raid_params()
732 rs->md.external = 1; in parse_raid_params()
739 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); in do_table_event() local
741 dm_table_event(rs->ti->table); in do_table_event()
746 struct raid_set *rs = container_of(cb, struct raid_set, callbacks); in raid_is_congested() local
748 return mddev_congested(&rs->md, bits); in raid_is_congested()
812 struct raid_set *rs = container_of(mddev, struct raid_set, md); in super_sync() local
818 if (!rs->dev[i].data_dev || in super_sync()
819 test_bit(Faulty, &(rs->dev[i].rdev.flags))) in super_sync()
901 struct raid_set *rs = container_of(mddev, struct raid_set, md); in super_init_validation() local
943 if ((rs->raid_type->level != 1) && in super_init_validation()
949 if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))) in super_init_validation()
1007 if (rs->raid_type->level != 1) { in super_init_validation()
1008 rs->ti->error = "Cannot change device " in super_init_validation()
1065 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) in analyse_superblocks() argument
1070 struct mddev *mddev = &rs->md; in analyse_superblocks()
1084 if (rs->print_flags & DMPF_SYNC) in analyse_superblocks()
1132 if (validate_raid_redundancy(rs)) { in analyse_superblocks()
1133 rs->ti->error = "Insufficient redundancy to activate array"; in analyse_superblocks()
1156 static void configure_discard_support(struct dm_target *ti, struct raid_set *rs) in configure_discard_support() argument
1165 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); in configure_discard_support()
1167 for (i = 0; i < rs->md.raid_disks; i++) { in configure_discard_support()
1170 if (!rs->dev[i].rdev.bdev) in configure_discard_support()
1173 q = bdev_get_queue(rs->dev[i].rdev.bdev); in configure_discard_support()
1195 ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10); in configure_discard_support()
1213 struct raid_set *rs = NULL; in raid_ctr() local
1256 rs = context_alloc(ti, rt, (unsigned)num_raid_devs); in raid_ctr()
1257 if (IS_ERR(rs)) in raid_ctr()
1258 return PTR_ERR(rs); in raid_ctr()
1260 ret = parse_raid_params(rs, argv, (unsigned)num_raid_params); in raid_ctr()
1266 ret = dev_parms(rs, argv); in raid_ctr()
1270 rs->md.sync_super = super_sync; in raid_ctr()
1271 ret = analyse_superblocks(ti, rs); in raid_ctr()
1275 INIT_WORK(&rs->md.event_work, do_table_event); in raid_ctr()
1276 ti->private = rs; in raid_ctr()
1282 configure_discard_support(ti, rs); in raid_ctr()
1284 mutex_lock(&rs->md.reconfig_mutex); in raid_ctr()
1285 ret = md_run(&rs->md); in raid_ctr()
1286 rs->md.in_sync = 0; /* Assume already marked dirty */ in raid_ctr()
1287 mutex_unlock(&rs->md.reconfig_mutex); in raid_ctr()
1294 if (ti->len != rs->md.array_sectors) { in raid_ctr()
1299 rs->callbacks.congested_fn = raid_is_congested; in raid_ctr()
1300 dm_table_add_target_callbacks(ti->table, &rs->callbacks); in raid_ctr()
1302 mddev_suspend(&rs->md); in raid_ctr()
1306 md_stop(&rs->md); in raid_ctr()
1308 context_free(rs); in raid_ctr()
1315 struct raid_set *rs = ti->private; in raid_dtr() local
1317 list_del_init(&rs->callbacks.list); in raid_dtr()
1318 md_stop(&rs->md); in raid_dtr()
1319 context_free(rs); in raid_dtr()
1324 struct raid_set *rs = ti->private; in raid_map() local
1325 struct mddev *mddev = &rs->md; in raid_map()
1360 struct raid_set *rs = ti->private; in raid_status() local
1368 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks); in raid_status()
1370 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery)) in raid_status()
1371 sync = rs->md.curr_resync_completed; in raid_status()
1373 sync = rs->md.recovery_cp; in raid_status()
1375 if (sync >= rs->md.resync_max_sectors) { in raid_status()
1380 sync = rs->md.resync_max_sectors; in raid_status()
1381 } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) { in raid_status()
1395 for (i = 0; i < rs->md.raid_disks; i++) in raid_status()
1396 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) in raid_status()
1406 for (i = 0; i < rs->md.raid_disks; i++) { in raid_status()
1407 if (test_bit(Faulty, &rs->dev[i].rdev.flags)) in raid_status()
1410 !test_bit(In_sync, &rs->dev[i].rdev.flags)) in raid_status()
1426 (unsigned long long) rs->md.resync_max_sectors); in raid_status()
1433 DMEMIT(" %s", decipher_sync_action(&rs->md)); in raid_status()
1441 (strcmp(rs->md.last_sync_action, "check")) ? 0 : in raid_status()
1443 atomic64_read(&rs->md.resync_mismatches)); in raid_status()
1447 for (i = 0; i < rs->md.raid_disks; i++) { in raid_status()
1448 if ((rs->print_flags & DMPF_REBUILD) && in raid_status()
1449 rs->dev[i].data_dev && in raid_status()
1450 !test_bit(In_sync, &rs->dev[i].rdev.flags)) in raid_status()
1452 if (rs->dev[i].data_dev && in raid_status()
1453 test_bit(WriteMostly, &rs->dev[i].rdev.flags)) in raid_status()
1457 raid_param_cnt += (hweight32(rs->print_flags & ~DMPF_REBUILD) * 2); in raid_status()
1458 if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)) in raid_status()
1461 DMEMIT("%s %u %u", rs->raid_type->name, in raid_status()
1462 raid_param_cnt, rs->md.chunk_sectors); in raid_status()
1464 if ((rs->print_flags & DMPF_SYNC) && in raid_status()
1465 (rs->md.recovery_cp == MaxSector)) in raid_status()
1467 if (rs->print_flags & DMPF_NOSYNC) in raid_status()
1470 for (i = 0; i < rs->md.raid_disks; i++) in raid_status()
1471 if ((rs->print_flags & DMPF_REBUILD) && in raid_status()
1472 rs->dev[i].data_dev && in raid_status()
1473 !test_bit(In_sync, &rs->dev[i].rdev.flags)) in raid_status()
1476 if (rs->print_flags & DMPF_DAEMON_SLEEP) in raid_status()
1478 rs->md.bitmap_info.daemon_sleep); in raid_status()
1480 if (rs->print_flags & DMPF_MIN_RECOVERY_RATE) in raid_status()
1481 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min); in raid_status()
1483 if (rs->print_flags & DMPF_MAX_RECOVERY_RATE) in raid_status()
1484 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max); in raid_status()
1486 for (i = 0; i < rs->md.raid_disks; i++) in raid_status()
1487 if (rs->dev[i].data_dev && in raid_status()
1488 test_bit(WriteMostly, &rs->dev[i].rdev.flags)) in raid_status()
1491 if (rs->print_flags & DMPF_MAX_WRITE_BEHIND) in raid_status()
1493 rs->md.bitmap_info.max_write_behind); in raid_status()
1495 if (rs->print_flags & DMPF_STRIPE_CACHE) { in raid_status()
1496 struct r5conf *conf = rs->md.private; in raid_status()
1503 if (rs->print_flags & DMPF_REGION_SIZE) in raid_status()
1505 rs->md.bitmap_info.chunksize >> 9); in raid_status()
1507 if (rs->print_flags & DMPF_RAID10_COPIES) in raid_status()
1509 raid10_md_layout_to_copies(rs->md.layout)); in raid_status()
1511 if (rs->print_flags & DMPF_RAID10_FORMAT) in raid_status()
1513 raid10_md_layout_to_format(rs->md.layout)); in raid_status()
1515 DMEMIT(" %d", rs->md.raid_disks); in raid_status()
1516 for (i = 0; i < rs->md.raid_disks; i++) { in raid_status()
1517 if (rs->dev[i].meta_dev) in raid_status()
1518 DMEMIT(" %s", rs->dev[i].meta_dev->name); in raid_status()
1522 if (rs->dev[i].data_dev) in raid_status()
1523 DMEMIT(" %s", rs->dev[i].data_dev->name); in raid_status()
1532 struct raid_set *rs = ti->private; in raid_message() local
1533 struct mddev *mddev = &rs->md; in raid_message()
1587 struct raid_set *rs = ti->private; in raid_iterate_devices() local
1591 for (i = 0; !ret && i < rs->md.raid_disks; i++) in raid_iterate_devices()
1592 if (rs->dev[i].data_dev) in raid_iterate_devices()
1594 rs->dev[i].data_dev, in raid_iterate_devices()
1596 rs->md.dev_sectors, in raid_iterate_devices()
1604 struct raid_set *rs = ti->private; in raid_io_hints() local
1605 unsigned chunk_size = rs->md.chunk_sectors << 9; in raid_io_hints()
1606 struct r5conf *conf = rs->md.private; in raid_io_hints()
1614 struct raid_set *rs = ti->private; in raid_presuspend() local
1616 md_stop_writes(&rs->md); in raid_presuspend()
1621 struct raid_set *rs = ti->private; in raid_postsuspend() local
1623 mddev_suspend(&rs->md); in raid_postsuspend()
1626 static void attempt_restore_of_faulty_devices(struct raid_set *rs) in attempt_restore_of_faulty_devices() argument
1634 for (i = 0; i < rs->md.raid_disks; i++) { in attempt_restore_of_faulty_devices()
1635 r = &rs->dev[i].rdev; in attempt_restore_of_faulty_devices()
1640 rs->raid_type->name, i); in attempt_restore_of_faulty_devices()
1673 rdev_for_each(r, &rs->md) { in attempt_restore_of_faulty_devices()
1684 struct raid_set *rs = ti->private; in raid_resume() local
1686 set_bit(MD_CHANGE_DEVS, &rs->md.flags); in raid_resume()
1687 if (!rs->bitmap_loaded) { in raid_resume()
1688 bitmap_load(&rs->md); in raid_resume()
1689 rs->bitmap_loaded = 1; in raid_resume()
1696 attempt_restore_of_faulty_devices(rs); in raid_resume()
1699 clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); in raid_resume()
1700 mddev_resume(&rs->md); in raid_resume()