1 /*
2  * Copyright (C) 2010-2011 Neil Brown
3  * Copyright (C) 2010-2014 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/module.h>
10 
11 #include "md.h"
12 #include "raid1.h"
13 #include "raid5.h"
14 #include "raid10.h"
15 #include "bitmap.h"
16 
17 #include <linux/device-mapper.h>
18 
19 #define DM_MSG_PREFIX "raid"
20 
21 static bool devices_handle_discard_safely = false;
22 
23 /*
24  * The following flags are used by dm-raid.c to set up the array state.
25  * They must be cleared before md_run is called.
26  */
27 #define FirstUse 10             /* rdev flag */
28 
29 struct raid_dev {
30 	/*
31 	 * Two DM devices, one to hold metadata and one to hold the
32 	 * actual data/parity.  The reason for this is to not confuse
33 	 * ti->len and give more flexibility in altering size and
34 	 * characteristics.
35 	 *
36 	 * While it is possible for this device to be associated
37 	 * with a different physical device than the data_dev, it
38 	 * is intended for it to be the same.
39 	 *    |--------- Physical Device ---------|
40 	 *    |- meta_dev -|------ data_dev ------|
41 	 */
42 	struct dm_dev *meta_dev;
43 	struct dm_dev *data_dev;
44 	struct md_rdev rdev;
45 };
46 
47 /*
48  * Flags for rs->print_flags field.
49  */
50 #define DMPF_SYNC              0x1
51 #define DMPF_NOSYNC            0x2
52 #define DMPF_REBUILD           0x4
53 #define DMPF_DAEMON_SLEEP      0x8
54 #define DMPF_MIN_RECOVERY_RATE 0x10
55 #define DMPF_MAX_RECOVERY_RATE 0x20
56 #define DMPF_MAX_WRITE_BEHIND  0x40
57 #define DMPF_STRIPE_CACHE      0x80
58 #define DMPF_REGION_SIZE       0x100
59 #define DMPF_RAID10_COPIES     0x200
60 #define DMPF_RAID10_FORMAT     0x400
61 
62 struct raid_set {
63 	struct dm_target *ti;
64 
65 	uint32_t bitmap_loaded;
66 	uint32_t print_flags;
67 
68 	struct mddev md;
69 	struct raid_type *raid_type;
70 	struct dm_target_callbacks callbacks;
71 
72 	struct raid_dev dev[0];
73 };
74 
75 /* Supported raid types and properties. */
76 static struct raid_type {
77 	const char *name;		/* RAID algorithm. */
78 	const char *descr;		/* Descriptor text for logging. */
79 	const unsigned parity_devs;	/* # of parity devices. */
80 	const unsigned minimal_devs;	/* minimal # of devices in set. */
81 	const unsigned level;		/* RAID level. */
82 	const unsigned algorithm;	/* RAID algorithm. */
83 } raid_types[] = {
84 	{"raid1",    "RAID1 (mirroring)",               0, 2, 1, 0 /* NONE */},
85 	{"raid10",   "RAID10 (striped mirrors)",        0, 2, 10, UINT_MAX /* Varies */},
86 	{"raid4",    "RAID4 (dedicated parity disk)",	1, 2, 5, ALGORITHM_PARITY_0},
87 	{"raid5_la", "RAID5 (left asymmetric)",		1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
88 	{"raid5_ra", "RAID5 (right asymmetric)",	1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
89 	{"raid5_ls", "RAID5 (left symmetric)",		1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
90 	{"raid5_rs", "RAID5 (right symmetric)",		1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
91 	{"raid6_zr", "RAID6 (zero restart)",		2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
92 	{"raid6_nr", "RAID6 (N restart)",		2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
93 	{"raid6_nc", "RAID6 (N continue)",		2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
94 };
95 
raid10_md_layout_to_format(int layout)96 static char *raid10_md_layout_to_format(int layout)
97 {
98 	/*
99 	 * Bit 16 and 17 stand for "offset" and "use_far_sets"
100 	 * Refer to MD's raid10.c for details
101 	 */
102 	if ((layout & 0x10000) && (layout & 0x20000))
103 		return "offset";
104 
105 	if ((layout & 0xFF) > 1)
106 		return "near";
107 
108 	return "far";
109 }
110 
raid10_md_layout_to_copies(int layout)111 static unsigned raid10_md_layout_to_copies(int layout)
112 {
113 	if ((layout & 0xFF) > 1)
114 		return layout & 0xFF;
115 	return (layout >> 8) & 0xFF;
116 }
117 
raid10_format_to_md_layout(char * format,unsigned copies)118 static int raid10_format_to_md_layout(char *format, unsigned copies)
119 {
120 	unsigned n = 1, f = 1;
121 
122 	if (!strcmp("near", format))
123 		n = copies;
124 	else
125 		f = copies;
126 
127 	if (!strcmp("offset", format))
128 		return 0x30000 | (f << 8) | n;
129 
130 	if (!strcmp("far", format))
131 		return 0x20000 | (f << 8) | n;
132 
133 	return (f << 8) | n;
134 }
135 
get_raid_type(char * name)136 static struct raid_type *get_raid_type(char *name)
137 {
138 	int i;
139 
140 	for (i = 0; i < ARRAY_SIZE(raid_types); i++)
141 		if (!strcmp(raid_types[i].name, name))
142 			return &raid_types[i];
143 
144 	return NULL;
145 }
146 
context_alloc(struct dm_target * ti,struct raid_type * raid_type,unsigned raid_devs)147 static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
148 {
149 	unsigned i;
150 	struct raid_set *rs;
151 
152 	if (raid_devs <= raid_type->parity_devs) {
153 		ti->error = "Insufficient number of devices";
154 		return ERR_PTR(-EINVAL);
155 	}
156 
157 	rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
158 	if (!rs) {
159 		ti->error = "Cannot allocate raid context";
160 		return ERR_PTR(-ENOMEM);
161 	}
162 
163 	mddev_init(&rs->md);
164 
165 	rs->ti = ti;
166 	rs->raid_type = raid_type;
167 	rs->md.raid_disks = raid_devs;
168 	rs->md.level = raid_type->level;
169 	rs->md.new_level = rs->md.level;
170 	rs->md.layout = raid_type->algorithm;
171 	rs->md.new_layout = rs->md.layout;
172 	rs->md.delta_disks = 0;
173 	rs->md.recovery_cp = 0;
174 
175 	for (i = 0; i < raid_devs; i++)
176 		md_rdev_init(&rs->dev[i].rdev);
177 
178 	/*
179 	 * Remaining items to be initialized by further RAID params:
180 	 *  rs->md.persistent
181 	 *  rs->md.external
182 	 *  rs->md.chunk_sectors
183 	 *  rs->md.new_chunk_sectors
184 	 *  rs->md.dev_sectors
185 	 */
186 
187 	return rs;
188 }
189 
context_free(struct raid_set * rs)190 static void context_free(struct raid_set *rs)
191 {
192 	int i;
193 
194 	for (i = 0; i < rs->md.raid_disks; i++) {
195 		if (rs->dev[i].meta_dev)
196 			dm_put_device(rs->ti, rs->dev[i].meta_dev);
197 		md_rdev_clear(&rs->dev[i].rdev);
198 		if (rs->dev[i].data_dev)
199 			dm_put_device(rs->ti, rs->dev[i].data_dev);
200 	}
201 
202 	kfree(rs);
203 }
204 
205 /*
206  * For every device we have two words
207  *  <meta_dev>: meta device name or '-' if missing
208  *  <data_dev>: data device name or '-' if missing
209  *
210  * The following are permitted:
211  *    - -
212  *    - <data_dev>
213  *    <meta_dev> <data_dev>
214  *
215  * The following is not allowed:
216  *    <meta_dev> -
217  *
218  * This code parses those words.  If there is a failure,
219  * the caller must use context_free to unwind the operations.
220  */
dev_parms(struct raid_set * rs,char ** argv)221 static int dev_parms(struct raid_set *rs, char **argv)
222 {
223 	int i;
224 	int rebuild = 0;
225 	int metadata_available = 0;
226 	int ret = 0;
227 
228 	for (i = 0; i < rs->md.raid_disks; i++, argv += 2) {
229 		rs->dev[i].rdev.raid_disk = i;
230 
231 		rs->dev[i].meta_dev = NULL;
232 		rs->dev[i].data_dev = NULL;
233 
234 		/*
235 		 * There are no offsets, since there is a separate device
236 		 * for data and metadata.
237 		 */
238 		rs->dev[i].rdev.data_offset = 0;
239 		rs->dev[i].rdev.mddev = &rs->md;
240 
241 		if (strcmp(argv[0], "-")) {
242 			ret = dm_get_device(rs->ti, argv[0],
243 					    dm_table_get_mode(rs->ti->table),
244 					    &rs->dev[i].meta_dev);
245 			rs->ti->error = "RAID metadata device lookup failure";
246 			if (ret)
247 				return ret;
248 
249 			rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
250 			if (!rs->dev[i].rdev.sb_page)
251 				return -ENOMEM;
252 		}
253 
254 		if (!strcmp(argv[1], "-")) {
255 			if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
256 			    (!rs->dev[i].rdev.recovery_offset)) {
257 				rs->ti->error = "Drive designated for rebuild not specified";
258 				return -EINVAL;
259 			}
260 
261 			rs->ti->error = "No data device supplied with metadata device";
262 			if (rs->dev[i].meta_dev)
263 				return -EINVAL;
264 
265 			continue;
266 		}
267 
268 		ret = dm_get_device(rs->ti, argv[1],
269 				    dm_table_get_mode(rs->ti->table),
270 				    &rs->dev[i].data_dev);
271 		if (ret) {
272 			rs->ti->error = "RAID device lookup failure";
273 			return ret;
274 		}
275 
276 		if (rs->dev[i].meta_dev) {
277 			metadata_available = 1;
278 			rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
279 		}
280 		rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
281 		list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
282 		if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
283 			rebuild++;
284 	}
285 
286 	if (metadata_available) {
287 		rs->md.external = 0;
288 		rs->md.persistent = 1;
289 		rs->md.major_version = 2;
290 	} else if (rebuild && !rs->md.recovery_cp) {
291 		/*
292 		 * Without metadata, we will not be able to tell if the array
293 		 * is in-sync or not - we must assume it is not.  Therefore,
294 		 * it is impossible to rebuild a drive.
295 		 *
296 		 * Even if there is metadata, the on-disk information may
297 		 * indicate that the array is not in-sync and it will then
298 		 * fail at that time.
299 		 *
300 		 * User could specify 'nosync' option if desperate.
301 		 */
302 		DMERR("Unable to rebuild drive while array is not in-sync");
303 		rs->ti->error = "RAID device lookup failure";
304 		return -EINVAL;
305 	}
306 
307 	return 0;
308 }
309 
310 /*
311  * validate_region_size
312  * @rs
313  * @region_size:  region size in sectors.  If 0, pick a size (4MiB default).
314  *
315  * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
316  * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
317  *
318  * Returns: 0 on success, -EINVAL on failure.
319  */
validate_region_size(struct raid_set * rs,unsigned long region_size)320 static int validate_region_size(struct raid_set *rs, unsigned long region_size)
321 {
322 	unsigned long min_region_size = rs->ti->len / (1 << 21);
323 
324 	if (!region_size) {
325 		/*
326 		 * Choose a reasonable default.  All figures in sectors.
327 		 */
328 		if (min_region_size > (1 << 13)) {
329 			/* If not a power of 2, make it the next power of 2 */
330 			region_size = roundup_pow_of_two(min_region_size);
331 			DMINFO("Choosing default region size of %lu sectors",
332 			       region_size);
333 		} else {
334 			DMINFO("Choosing default region size of 4MiB");
335 			region_size = 1 << 13; /* sectors */
336 		}
337 	} else {
338 		/*
339 		 * Validate user-supplied value.
340 		 */
341 		if (region_size > rs->ti->len) {
342 			rs->ti->error = "Supplied region size is too large";
343 			return -EINVAL;
344 		}
345 
346 		if (region_size < min_region_size) {
347 			DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
348 			      region_size, min_region_size);
349 			rs->ti->error = "Supplied region size is too small";
350 			return -EINVAL;
351 		}
352 
353 		if (!is_power_of_2(region_size)) {
354 			rs->ti->error = "Region size is not a power of 2";
355 			return -EINVAL;
356 		}
357 
358 		if (region_size < rs->md.chunk_sectors) {
359 			rs->ti->error = "Region size is smaller than the chunk size";
360 			return -EINVAL;
361 		}
362 	}
363 
364 	/*
365 	 * Convert sectors to bytes.
366 	 */
367 	rs->md.bitmap_info.chunksize = (region_size << 9);
368 
369 	return 0;
370 }
371 
372 /*
373  * validate_raid_redundancy
374  * @rs
375  *
376  * Determine if there are enough devices in the array that haven't
377  * failed (or are being rebuilt) to form a usable array.
378  *
379  * Returns: 0 on success, -EINVAL on failure.
380  */
validate_raid_redundancy(struct raid_set * rs)381 static int validate_raid_redundancy(struct raid_set *rs)
382 {
383 	unsigned i, rebuild_cnt = 0;
384 	unsigned rebuilds_per_group = 0, copies, d;
385 	unsigned group_size, last_group_start;
386 
387 	for (i = 0; i < rs->md.raid_disks; i++)
388 		if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
389 		    !rs->dev[i].rdev.sb_page)
390 			rebuild_cnt++;
391 
392 	switch (rs->raid_type->level) {
393 	case 1:
394 		if (rebuild_cnt >= rs->md.raid_disks)
395 			goto too_many;
396 		break;
397 	case 4:
398 	case 5:
399 	case 6:
400 		if (rebuild_cnt > rs->raid_type->parity_devs)
401 			goto too_many;
402 		break;
403 	case 10:
404 		copies = raid10_md_layout_to_copies(rs->md.layout);
405 		if (rebuild_cnt < copies)
406 			break;
407 
408 		/*
409 		 * It is possible to have a higher rebuild count for RAID10,
410 		 * as long as the failed devices occur in different mirror
411 		 * groups (i.e. different stripes).
412 		 *
413 		 * When checking "near" format, make sure no adjacent devices
414 		 * have failed beyond what can be handled.  In addition to the
415 		 * simple case where the number of devices is a multiple of the
416 		 * number of copies, we must also handle cases where the number
417 		 * of devices is not a multiple of the number of copies.
418 		 * E.g.    dev1 dev2 dev3 dev4 dev5
419 		 *          A    A    B    B    C
420 		 *          C    D    D    E    E
421 		 */
422 		if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
423 			for (i = 0; i < rs->md.raid_disks * copies; i++) {
424 				if (!(i % copies))
425 					rebuilds_per_group = 0;
426 				d = i % rs->md.raid_disks;
427 				if ((!rs->dev[d].rdev.sb_page ||
428 				     !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
429 				    (++rebuilds_per_group >= copies))
430 					goto too_many;
431 			}
432 			break;
433 		}
434 
435 		/*
436 		 * When checking "far" and "offset" formats, we need to ensure
437 		 * that the device that holds its copy is not also dead or
438 		 * being rebuilt.  (Note that "far" and "offset" formats only
439 		 * support two copies right now.  These formats also only ever
440 		 * use the 'use_far_sets' variant.)
441 		 *
442 		 * This check is somewhat complicated by the need to account
443 		 * for arrays that are not a multiple of (far) copies.  This
444 		 * results in the need to treat the last (potentially larger)
445 		 * set differently.
446 		 */
447 		group_size = (rs->md.raid_disks / copies);
448 		last_group_start = (rs->md.raid_disks / group_size) - 1;
449 		last_group_start *= group_size;
450 		for (i = 0; i < rs->md.raid_disks; i++) {
451 			if (!(i % copies) && !(i > last_group_start))
452 				rebuilds_per_group = 0;
453 			if ((!rs->dev[i].rdev.sb_page ||
454 			     !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
455 			    (++rebuilds_per_group >= copies))
456 					goto too_many;
457 		}
458 		break;
459 	default:
460 		if (rebuild_cnt)
461 			return -EINVAL;
462 	}
463 
464 	return 0;
465 
466 too_many:
467 	return -EINVAL;
468 }
469 
470 /*
471  * Possible arguments are...
472  *	<chunk_size> [optional_args]
473  *
474  * Argument definitions
475  *    <chunk_size>			The number of sectors per disk that
476  *                                      will form the "stripe"
477  *    [[no]sync]			Force or prevent recovery of the
478  *                                      entire array
479  *    [devices_handle_discard_safely]	Allow discards on RAID4/5/6; useful if RAID
480  *					member device(s) properly support TRIM/UNMAP
481  *    [rebuild <idx>]			Rebuild the drive indicated by the index
482  *    [daemon_sleep <ms>]		Time between bitmap daemon work to
483  *                                      clear bits
484  *    [min_recovery_rate <kB/sec/disk>]	Throttle RAID initialization
485  *    [max_recovery_rate <kB/sec/disk>]	Throttle RAID initialization
486  *    [write_mostly <idx>]		Indicate a write mostly drive via index
487  *    [max_write_behind <sectors>]	See '-write-behind=' (man mdadm)
488  *    [stripe_cache <sectors>]		Stripe cache size for higher RAIDs
489  *    [region_size <sectors>]           Defines granularity of bitmap
490  *
491  * RAID10-only options:
492  *    [raid10_copies <# copies>]        Number of copies.  (Default: 2)
493  *    [raid10_format <near|far|offset>] Layout algorithm.  (Default: near)
494  */
parse_raid_params(struct raid_set * rs,char ** argv,unsigned num_raid_params)495 static int parse_raid_params(struct raid_set *rs, char **argv,
496 			     unsigned num_raid_params)
497 {
498 	char *raid10_format = "near";
499 	unsigned raid10_copies = 2;
500 	unsigned i;
501 	unsigned long value, region_size = 0;
502 	sector_t sectors_per_dev = rs->ti->len;
503 	sector_t max_io_len;
504 	char *key;
505 
506 	/*
507 	 * First, parse the in-order required arguments
508 	 * "chunk_size" is the only argument of this type.
509 	 */
510 	if ((kstrtoul(argv[0], 10, &value) < 0)) {
511 		rs->ti->error = "Bad chunk size";
512 		return -EINVAL;
513 	} else if (rs->raid_type->level == 1) {
514 		if (value)
515 			DMERR("Ignoring chunk size parameter for RAID 1");
516 		value = 0;
517 	} else if (!is_power_of_2(value)) {
518 		rs->ti->error = "Chunk size must be a power of 2";
519 		return -EINVAL;
520 	} else if (value < 8) {
521 		rs->ti->error = "Chunk size value is too small";
522 		return -EINVAL;
523 	}
524 
525 	rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
526 	argv++;
527 	num_raid_params--;
528 
529 	/*
530 	 * We set each individual device as In_sync with a completed
531 	 * 'recovery_offset'.  If there has been a device failure or
532 	 * replacement then one of the following cases applies:
533 	 *
534 	 *   1) User specifies 'rebuild'.
535 	 *      - Device is reset when param is read.
536 	 *   2) A new device is supplied.
537 	 *      - No matching superblock found, resets device.
538 	 *   3) Device failure was transient and returns on reload.
539 	 *      - Failure noticed, resets device for bitmap replay.
540 	 *   4) Device hadn't completed recovery after previous failure.
541 	 *      - Superblock is read and overrides recovery_offset.
542 	 *
543 	 * What is found in the superblocks of the devices is always
544 	 * authoritative, unless 'rebuild' or '[no]sync' was specified.
545 	 */
546 	for (i = 0; i < rs->md.raid_disks; i++) {
547 		set_bit(In_sync, &rs->dev[i].rdev.flags);
548 		rs->dev[i].rdev.recovery_offset = MaxSector;
549 	}
550 
551 	/*
552 	 * Second, parse the unordered optional arguments
553 	 */
554 	for (i = 0; i < num_raid_params; i++) {
555 		if (!strcasecmp(argv[i], "nosync")) {
556 			rs->md.recovery_cp = MaxSector;
557 			rs->print_flags |= DMPF_NOSYNC;
558 			continue;
559 		}
560 		if (!strcasecmp(argv[i], "sync")) {
561 			rs->md.recovery_cp = 0;
562 			rs->print_flags |= DMPF_SYNC;
563 			continue;
564 		}
565 
566 		/* The rest of the optional arguments come in key/value pairs */
567 		if ((i + 1) >= num_raid_params) {
568 			rs->ti->error = "Wrong number of raid parameters given";
569 			return -EINVAL;
570 		}
571 
572 		key = argv[i++];
573 
574 		/* Parameters that take a string value are checked here. */
575 		if (!strcasecmp(key, "raid10_format")) {
576 			if (rs->raid_type->level != 10) {
577 				rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
578 				return -EINVAL;
579 			}
580 			if (strcmp("near", argv[i]) &&
581 			    strcmp("far", argv[i]) &&
582 			    strcmp("offset", argv[i])) {
583 				rs->ti->error = "Invalid 'raid10_format' value given";
584 				return -EINVAL;
585 			}
586 			raid10_format = argv[i];
587 			rs->print_flags |= DMPF_RAID10_FORMAT;
588 			continue;
589 		}
590 
591 		if (kstrtoul(argv[i], 10, &value) < 0) {
592 			rs->ti->error = "Bad numerical argument given in raid params";
593 			return -EINVAL;
594 		}
595 
596 		/* Parameters that take a numeric value are checked here */
597 		if (!strcasecmp(key, "rebuild")) {
598 			if (value >= rs->md.raid_disks) {
599 				rs->ti->error = "Invalid rebuild index given";
600 				return -EINVAL;
601 			}
602 			clear_bit(In_sync, &rs->dev[value].rdev.flags);
603 			rs->dev[value].rdev.recovery_offset = 0;
604 			rs->print_flags |= DMPF_REBUILD;
605 		} else if (!strcasecmp(key, "write_mostly")) {
606 			if (rs->raid_type->level != 1) {
607 				rs->ti->error = "write_mostly option is only valid for RAID1";
608 				return -EINVAL;
609 			}
610 			if (value >= rs->md.raid_disks) {
611 				rs->ti->error = "Invalid write_mostly drive index given";
612 				return -EINVAL;
613 			}
614 			set_bit(WriteMostly, &rs->dev[value].rdev.flags);
615 		} else if (!strcasecmp(key, "max_write_behind")) {
616 			if (rs->raid_type->level != 1) {
617 				rs->ti->error = "max_write_behind option is only valid for RAID1";
618 				return -EINVAL;
619 			}
620 			rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
621 
622 			/*
623 			 * In device-mapper, we specify things in sectors, but
624 			 * MD records this value in kB
625 			 */
626 			value /= 2;
627 			if (value > COUNTER_MAX) {
628 				rs->ti->error = "Max write-behind limit out of range";
629 				return -EINVAL;
630 			}
631 			rs->md.bitmap_info.max_write_behind = value;
632 		} else if (!strcasecmp(key, "daemon_sleep")) {
633 			rs->print_flags |= DMPF_DAEMON_SLEEP;
634 			if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
635 				rs->ti->error = "daemon sleep period out of range";
636 				return -EINVAL;
637 			}
638 			rs->md.bitmap_info.daemon_sleep = value;
639 		} else if (!strcasecmp(key, "stripe_cache")) {
640 			rs->print_flags |= DMPF_STRIPE_CACHE;
641 
642 			/*
643 			 * In device-mapper, we specify things in sectors, but
644 			 * MD records this value in kB
645 			 */
646 			value /= 2;
647 
648 			if ((rs->raid_type->level != 5) &&
649 			    (rs->raid_type->level != 6)) {
650 				rs->ti->error = "Inappropriate argument: stripe_cache";
651 				return -EINVAL;
652 			}
653 			if (raid5_set_cache_size(&rs->md, (int)value)) {
654 				rs->ti->error = "Bad stripe_cache size";
655 				return -EINVAL;
656 			}
657 		} else if (!strcasecmp(key, "min_recovery_rate")) {
658 			rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
659 			if (value > INT_MAX) {
660 				rs->ti->error = "min_recovery_rate out of range";
661 				return -EINVAL;
662 			}
663 			rs->md.sync_speed_min = (int)value;
664 		} else if (!strcasecmp(key, "max_recovery_rate")) {
665 			rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
666 			if (value > INT_MAX) {
667 				rs->ti->error = "max_recovery_rate out of range";
668 				return -EINVAL;
669 			}
670 			rs->md.sync_speed_max = (int)value;
671 		} else if (!strcasecmp(key, "region_size")) {
672 			rs->print_flags |= DMPF_REGION_SIZE;
673 			region_size = value;
674 		} else if (!strcasecmp(key, "raid10_copies") &&
675 			   (rs->raid_type->level == 10)) {
676 			if ((value < 2) || (value > 0xFF)) {
677 				rs->ti->error = "Bad value for 'raid10_copies'";
678 				return -EINVAL;
679 			}
680 			rs->print_flags |= DMPF_RAID10_COPIES;
681 			raid10_copies = value;
682 		} else {
683 			DMERR("Unable to parse RAID parameter: %s", key);
684 			rs->ti->error = "Unable to parse RAID parameters";
685 			return -EINVAL;
686 		}
687 	}
688 
689 	if (validate_region_size(rs, region_size))
690 		return -EINVAL;
691 
692 	if (rs->md.chunk_sectors)
693 		max_io_len = rs->md.chunk_sectors;
694 	else
695 		max_io_len = region_size;
696 
697 	if (dm_set_target_max_io_len(rs->ti, max_io_len))
698 		return -EINVAL;
699 
700 	if (rs->raid_type->level == 10) {
701 		if (raid10_copies > rs->md.raid_disks) {
702 			rs->ti->error = "Not enough devices to satisfy specification";
703 			return -EINVAL;
704 		}
705 
706 		/*
707 		 * If the format is not "near", we only support
708 		 * two copies at the moment.
709 		 */
710 		if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
711 			rs->ti->error = "Too many copies for given RAID10 format.";
712 			return -EINVAL;
713 		}
714 
715 		/* (Len * #mirrors) / #devices */
716 		sectors_per_dev = rs->ti->len * raid10_copies;
717 		sector_div(sectors_per_dev, rs->md.raid_disks);
718 
719 		rs->md.layout = raid10_format_to_md_layout(raid10_format,
720 							   raid10_copies);
721 		rs->md.new_layout = rs->md.layout;
722 	} else if ((rs->raid_type->level > 1) &&
723 		   sector_div(sectors_per_dev,
724 			      (rs->md.raid_disks - rs->raid_type->parity_devs))) {
725 		rs->ti->error = "Target length not divisible by number of data devices";
726 		return -EINVAL;
727 	}
728 	rs->md.dev_sectors = sectors_per_dev;
729 
730 	/* Assume there are no metadata devices until the drives are parsed */
731 	rs->md.persistent = 0;
732 	rs->md.external = 1;
733 
734 	return 0;
735 }
736 
do_table_event(struct work_struct * ws)737 static void do_table_event(struct work_struct *ws)
738 {
739 	struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
740 
741 	dm_table_event(rs->ti->table);
742 }
743 
raid_is_congested(struct dm_target_callbacks * cb,int bits)744 static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
745 {
746 	struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
747 
748 	return mddev_congested(&rs->md, bits);
749 }
750 
751 /*
752  * This structure is never routinely used by userspace, unlike md superblocks.
753  * Devices with this superblock should only ever be accessed via device-mapper.
754  */
755 #define DM_RAID_MAGIC 0x64526D44
756 struct dm_raid_superblock {
757 	__le32 magic;		/* "DmRd" */
758 	__le32 features;	/* Used to indicate possible future changes */
759 
760 	__le32 num_devices;	/* Number of devices in this array. (Max 64) */
761 	__le32 array_position;	/* The position of this drive in the array */
762 
763 	__le64 events;		/* Incremented by md when superblock updated */
764 	__le64 failed_devices;	/* Bit field of devices to indicate failures */
765 
766 	/*
767 	 * This offset tracks the progress of the repair or replacement of
768 	 * an individual drive.
769 	 */
770 	__le64 disk_recovery_offset;
771 
772 	/*
773 	 * This offset tracks the progress of the initial array
774 	 * synchronisation/parity calculation.
775 	 */
776 	__le64 array_resync_offset;
777 
778 	/*
779 	 * RAID characteristics
780 	 */
781 	__le32 level;
782 	__le32 layout;
783 	__le32 stripe_sectors;
784 
785 	/* Remainder of a logical block is zero-filled when writing (see super_sync()). */
786 } __packed;
787 
read_disk_sb(struct md_rdev * rdev,int size)788 static int read_disk_sb(struct md_rdev *rdev, int size)
789 {
790 	BUG_ON(!rdev->sb_page);
791 
792 	if (rdev->sb_loaded)
793 		return 0;
794 
795 	if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
796 		DMERR("Failed to read superblock of device at position %d",
797 		      rdev->raid_disk);
798 		md_error(rdev->mddev, rdev);
799 		return -EINVAL;
800 	}
801 
802 	rdev->sb_loaded = 1;
803 
804 	return 0;
805 }
806 
super_sync(struct mddev * mddev,struct md_rdev * rdev)807 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
808 {
809 	int i;
810 	uint64_t failed_devices;
811 	struct dm_raid_superblock *sb;
812 	struct raid_set *rs = container_of(mddev, struct raid_set, md);
813 
814 	sb = page_address(rdev->sb_page);
815 	failed_devices = le64_to_cpu(sb->failed_devices);
816 
817 	for (i = 0; i < mddev->raid_disks; i++)
818 		if (!rs->dev[i].data_dev ||
819 		    test_bit(Faulty, &(rs->dev[i].rdev.flags)))
820 			failed_devices |= (1ULL << i);
821 
822 	memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
823 
824 	sb->magic = cpu_to_le32(DM_RAID_MAGIC);
825 	sb->features = cpu_to_le32(0);	/* No features yet */
826 
827 	sb->num_devices = cpu_to_le32(mddev->raid_disks);
828 	sb->array_position = cpu_to_le32(rdev->raid_disk);
829 
830 	sb->events = cpu_to_le64(mddev->events);
831 	sb->failed_devices = cpu_to_le64(failed_devices);
832 
833 	sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
834 	sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
835 
836 	sb->level = cpu_to_le32(mddev->level);
837 	sb->layout = cpu_to_le32(mddev->layout);
838 	sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
839 }
840 
841 /*
842  * super_load
843  *
844  * This function creates a superblock if one is not found on the device
845  * and will decide which superblock to use if there's a choice.
846  *
847  * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
848  */
super_load(struct md_rdev * rdev,struct md_rdev * refdev)849 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
850 {
851 	int ret;
852 	struct dm_raid_superblock *sb;
853 	struct dm_raid_superblock *refsb;
854 	uint64_t events_sb, events_refsb;
855 
856 	rdev->sb_start = 0;
857 	rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
858 	if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
859 		DMERR("superblock size of a logical block is no longer valid");
860 		return -EINVAL;
861 	}
862 
863 	ret = read_disk_sb(rdev, rdev->sb_size);
864 	if (ret)
865 		return ret;
866 
867 	sb = page_address(rdev->sb_page);
868 
869 	/*
870 	 * Two cases that we want to write new superblocks and rebuild:
871 	 * 1) New device (no matching magic number)
872 	 * 2) Device specified for rebuild (!In_sync w/ offset == 0)
873 	 */
874 	if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
875 	    (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
876 		super_sync(rdev->mddev, rdev);
877 
878 		set_bit(FirstUse, &rdev->flags);
879 
880 		/* Force writing of superblocks to disk */
881 		set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
882 
883 		/* Any superblock is better than none, choose that if given */
884 		return refdev ? 0 : 1;
885 	}
886 
887 	if (!refdev)
888 		return 1;
889 
890 	events_sb = le64_to_cpu(sb->events);
891 
892 	refsb = page_address(refdev->sb_page);
893 	events_refsb = le64_to_cpu(refsb->events);
894 
895 	return (events_sb > events_refsb) ? 1 : 0;
896 }
897 
super_init_validation(struct mddev * mddev,struct md_rdev * rdev)898 static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
899 {
900 	int role;
901 	struct raid_set *rs = container_of(mddev, struct raid_set, md);
902 	uint64_t events_sb;
903 	uint64_t failed_devices;
904 	struct dm_raid_superblock *sb;
905 	uint32_t new_devs = 0;
906 	uint32_t rebuilds = 0;
907 	struct md_rdev *r;
908 	struct dm_raid_superblock *sb2;
909 
910 	sb = page_address(rdev->sb_page);
911 	events_sb = le64_to_cpu(sb->events);
912 	failed_devices = le64_to_cpu(sb->failed_devices);
913 
914 	/*
915 	 * Initialise to 1 if this is a new superblock.
916 	 */
917 	mddev->events = events_sb ? : 1;
918 
919 	/*
920 	 * Reshaping is not currently allowed
921 	 */
922 	if (le32_to_cpu(sb->level) != mddev->level) {
923 		DMERR("Reshaping arrays not yet supported. (RAID level change)");
924 		return -EINVAL;
925 	}
926 	if (le32_to_cpu(sb->layout) != mddev->layout) {
927 		DMERR("Reshaping arrays not yet supported. (RAID layout change)");
928 		DMERR("  0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
929 		DMERR("  Old layout: %s w/ %d copies",
930 		      raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
931 		      raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
932 		DMERR("  New layout: %s w/ %d copies",
933 		      raid10_md_layout_to_format(mddev->layout),
934 		      raid10_md_layout_to_copies(mddev->layout));
935 		return -EINVAL;
936 	}
937 	if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
938 		DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
939 		return -EINVAL;
940 	}
941 
942 	/* We can only change the number of devices in RAID1 right now */
943 	if ((rs->raid_type->level != 1) &&
944 	    (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
945 		DMERR("Reshaping arrays not yet supported. (device count change)");
946 		return -EINVAL;
947 	}
948 
949 	if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)))
950 		mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
951 
952 	/*
953 	 * During load, we set FirstUse if a new superblock was written.
954 	 * There are two reasons we might not have a superblock:
955 	 * 1) The array is brand new - in which case, all of the
956 	 *    devices must have their In_sync bit set.  Also,
957 	 *    recovery_cp must be 0, unless forced.
958 	 * 2) This is a new device being added to an old array
959 	 *    and the new device needs to be rebuilt - in which
960 	 *    case the In_sync bit will /not/ be set and
961 	 *    recovery_cp must be MaxSector.
962 	 */
963 	rdev_for_each(r, mddev) {
964 		if (!test_bit(In_sync, &r->flags)) {
965 			DMINFO("Device %d specified for rebuild: "
966 			       "Clearing superblock", r->raid_disk);
967 			rebuilds++;
968 		} else if (test_bit(FirstUse, &r->flags))
969 			new_devs++;
970 	}
971 
972 	if (!rebuilds) {
973 		if (new_devs == mddev->raid_disks) {
974 			DMINFO("Superblocks created for new array");
975 			set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
976 		} else if (new_devs) {
977 			DMERR("New device injected "
978 			      "into existing array without 'rebuild' "
979 			      "parameter specified");
980 			return -EINVAL;
981 		}
982 	} else if (new_devs) {
983 		DMERR("'rebuild' devices cannot be "
984 		      "injected into an array with other first-time devices");
985 		return -EINVAL;
986 	} else if (mddev->recovery_cp != MaxSector) {
987 		DMERR("'rebuild' specified while array is not in-sync");
988 		return -EINVAL;
989 	}
990 
991 	/*
992 	 * Now we set the Faulty bit for those devices that are
993 	 * recorded in the superblock as failed.
994 	 */
995 	rdev_for_each(r, mddev) {
996 		if (!r->sb_page)
997 			continue;
998 		sb2 = page_address(r->sb_page);
999 		sb2->failed_devices = 0;
1000 
1001 		/*
1002 		 * Check for any device re-ordering.
1003 		 */
1004 		if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
1005 			role = le32_to_cpu(sb2->array_position);
1006 			if (role != r->raid_disk) {
1007 				if (rs->raid_type->level != 1) {
1008 					rs->ti->error = "Cannot change device "
1009 						"positions in RAID array";
1010 					return -EINVAL;
1011 				}
1012 				DMINFO("RAID1 device #%d now at position #%d",
1013 				       role, r->raid_disk);
1014 			}
1015 
1016 			/*
1017 			 * Partial recovery is performed on
1018 			 * returning failed devices.
1019 			 */
1020 			if (failed_devices & (1 << role))
1021 				set_bit(Faulty, &r->flags);
1022 		}
1023 	}
1024 
1025 	return 0;
1026 }
1027 
super_validate(struct mddev * mddev,struct md_rdev * rdev)1028 static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
1029 {
1030 	struct dm_raid_superblock *sb = page_address(rdev->sb_page);
1031 
1032 	/*
1033 	 * If mddev->events is not set, we know we have not yet initialized
1034 	 * the array.
1035 	 */
1036 	if (!mddev->events && super_init_validation(mddev, rdev))
1037 		return -EINVAL;
1038 
1039 	mddev->bitmap_info.offset = 4096 >> 9; /* Enable bitmap creation */
1040 	rdev->mddev->bitmap_info.default_offset = 4096 >> 9;
1041 	if (!test_bit(FirstUse, &rdev->flags)) {
1042 		rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
1043 		if (rdev->recovery_offset != MaxSector)
1044 			clear_bit(In_sync, &rdev->flags);
1045 	}
1046 
1047 	/*
1048 	 * If a device comes back, set it as not In_sync and no longer faulty.
1049 	 */
1050 	if (test_bit(Faulty, &rdev->flags)) {
1051 		clear_bit(Faulty, &rdev->flags);
1052 		clear_bit(In_sync, &rdev->flags);
1053 		rdev->saved_raid_disk = rdev->raid_disk;
1054 		rdev->recovery_offset = 0;
1055 	}
1056 
1057 	clear_bit(FirstUse, &rdev->flags);
1058 
1059 	return 0;
1060 }
1061 
1062 /*
1063  * Analyse superblocks and select the freshest.
1064  */
analyse_superblocks(struct dm_target * ti,struct raid_set * rs)1065 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
1066 {
1067 	int ret;
1068 	struct raid_dev *dev;
1069 	struct md_rdev *rdev, *tmp, *freshest;
1070 	struct mddev *mddev = &rs->md;
1071 
1072 	freshest = NULL;
1073 	rdev_for_each_safe(rdev, tmp, mddev) {
1074 		/*
1075 		 * Skipping super_load due to DMPF_SYNC will cause
1076 		 * the array to undergo initialization again as
1077 		 * though it were new.  This is the intended effect
1078 		 * of the "sync" directive.
1079 		 *
1080 		 * When reshaping capability is added, we must ensure
1081 		 * that the "sync" directive is disallowed during the
1082 		 * reshape.
1083 		 */
1084 		if (rs->print_flags & DMPF_SYNC)
1085 			continue;
1086 
1087 		if (!rdev->meta_bdev)
1088 			continue;
1089 
1090 		ret = super_load(rdev, freshest);
1091 
1092 		switch (ret) {
1093 		case 1:
1094 			freshest = rdev;
1095 			break;
1096 		case 0:
1097 			break;
1098 		default:
1099 			dev = container_of(rdev, struct raid_dev, rdev);
1100 			if (dev->meta_dev)
1101 				dm_put_device(ti, dev->meta_dev);
1102 
1103 			dev->meta_dev = NULL;
1104 			rdev->meta_bdev = NULL;
1105 
1106 			if (rdev->sb_page)
1107 				put_page(rdev->sb_page);
1108 
1109 			rdev->sb_page = NULL;
1110 
1111 			rdev->sb_loaded = 0;
1112 
1113 			/*
1114 			 * We might be able to salvage the data device
1115 			 * even though the meta device has failed.  For
1116 			 * now, we behave as though '- -' had been
1117 			 * set for this device in the table.
1118 			 */
1119 			if (dev->data_dev)
1120 				dm_put_device(ti, dev->data_dev);
1121 
1122 			dev->data_dev = NULL;
1123 			rdev->bdev = NULL;
1124 
1125 			list_del(&rdev->same_set);
1126 		}
1127 	}
1128 
1129 	if (!freshest)
1130 		return 0;
1131 
1132 	if (validate_raid_redundancy(rs)) {
1133 		rs->ti->error = "Insufficient redundancy to activate array";
1134 		return -EINVAL;
1135 	}
1136 
1137 	/*
1138 	 * Validation of the freshest device provides the source of
1139 	 * validation for the remaining devices.
1140 	 */
1141 	ti->error = "Unable to assemble array: Invalid superblocks";
1142 	if (super_validate(mddev, freshest))
1143 		return -EINVAL;
1144 
1145 	rdev_for_each(rdev, mddev)
1146 		if ((rdev != freshest) && super_validate(mddev, rdev))
1147 			return -EINVAL;
1148 
1149 	return 0;
1150 }
1151 
1152 /*
1153  * Enable/disable discard support on RAID set depending on
1154  * RAID level and discard properties of underlying RAID members.
1155  */
configure_discard_support(struct dm_target * ti,struct raid_set * rs)1156 static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
1157 {
1158 	int i;
1159 	bool raid456;
1160 
1161 	/* Assume discards not supported until after checks below. */
1162 	ti->discards_supported = false;
1163 
1164 	/* RAID level 4,5,6 require discard_zeroes_data for data integrity! */
1165 	raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
1166 
1167 	for (i = 0; i < rs->md.raid_disks; i++) {
1168 		struct request_queue *q;
1169 
1170 		if (!rs->dev[i].rdev.bdev)
1171 			continue;
1172 
1173 		q = bdev_get_queue(rs->dev[i].rdev.bdev);
1174 		if (!q || !blk_queue_discard(q))
1175 			return;
1176 
1177 		if (raid456) {
1178 			if (!q->limits.discard_zeroes_data)
1179 				return;
1180 			if (!devices_handle_discard_safely) {
1181 				DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
1182 				DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
1183 				return;
1184 			}
1185 		}
1186 	}
1187 
1188 	/* All RAID members properly support discards */
1189 	ti->discards_supported = true;
1190 
1191 	/*
1192 	 * RAID1 and RAID10 personalities require bio splitting,
1193 	 * RAID0/4/5/6 don't and process large discard bios properly.
1194 	 */
1195 	ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10);
1196 	ti->num_discard_bios = 1;
1197 }
1198 
1199 /*
1200  * Construct a RAID4/5/6 mapping:
1201  * Args:
1202  *	<raid_type> <#raid_params> <raid_params>		\
1203  *	<#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
1204  *
1205  * <raid_params> varies by <raid_type>.  See 'parse_raid_params' for
1206  * details on possible <raid_params>.
1207  */
raid_ctr(struct dm_target * ti,unsigned argc,char ** argv)1208 static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
1209 {
1210 	int ret;
1211 	struct raid_type *rt;
1212 	unsigned long num_raid_params, num_raid_devs;
1213 	struct raid_set *rs = NULL;
1214 
1215 	/* Must have at least <raid_type> <#raid_params> */
1216 	if (argc < 2) {
1217 		ti->error = "Too few arguments";
1218 		return -EINVAL;
1219 	}
1220 
1221 	/* raid type */
1222 	rt = get_raid_type(argv[0]);
1223 	if (!rt) {
1224 		ti->error = "Unrecognised raid_type";
1225 		return -EINVAL;
1226 	}
1227 	argc--;
1228 	argv++;
1229 
1230 	/* number of RAID parameters */
1231 	if (kstrtoul(argv[0], 10, &num_raid_params) < 0) {
1232 		ti->error = "Cannot understand number of RAID parameters";
1233 		return -EINVAL;
1234 	}
1235 	argc--;
1236 	argv++;
1237 
1238 	/* Skip over RAID params for now and find out # of devices */
1239 	if (num_raid_params >= argc) {
1240 		ti->error = "Arguments do not agree with counts given";
1241 		return -EINVAL;
1242 	}
1243 
1244 	if ((kstrtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
1245 	    (num_raid_devs >= INT_MAX)) {
1246 		ti->error = "Cannot understand number of raid devices";
1247 		return -EINVAL;
1248 	}
1249 
1250 	argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
1251 	if (argc != (num_raid_devs * 2)) {
1252 		ti->error = "Supplied RAID devices does not match the count given";
1253 		return -EINVAL;
1254 	}
1255 
1256 	rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
1257 	if (IS_ERR(rs))
1258 		return PTR_ERR(rs);
1259 
1260 	ret = parse_raid_params(rs, argv, (unsigned)num_raid_params);
1261 	if (ret)
1262 		goto bad;
1263 
1264 	argv += num_raid_params + 1;
1265 
1266 	ret = dev_parms(rs, argv);
1267 	if (ret)
1268 		goto bad;
1269 
1270 	rs->md.sync_super = super_sync;
1271 	ret = analyse_superblocks(ti, rs);
1272 	if (ret)
1273 		goto bad;
1274 
1275 	INIT_WORK(&rs->md.event_work, do_table_event);
1276 	ti->private = rs;
1277 	ti->num_flush_bios = 1;
1278 
1279 	/*
1280 	 * Disable/enable discard support on RAID set.
1281 	 */
1282 	configure_discard_support(ti, rs);
1283 
1284 	mutex_lock(&rs->md.reconfig_mutex);
1285 	ret = md_run(&rs->md);
1286 	rs->md.in_sync = 0; /* Assume already marked dirty */
1287 	mutex_unlock(&rs->md.reconfig_mutex);
1288 
1289 	if (ret) {
1290 		ti->error = "Fail to run raid array";
1291 		goto bad;
1292 	}
1293 
1294 	if (ti->len != rs->md.array_sectors) {
1295 		ti->error = "Array size does not match requested target length";
1296 		ret = -EINVAL;
1297 		goto size_mismatch;
1298 	}
1299 	rs->callbacks.congested_fn = raid_is_congested;
1300 	dm_table_add_target_callbacks(ti->table, &rs->callbacks);
1301 
1302 	mddev_suspend(&rs->md);
1303 	return 0;
1304 
1305 size_mismatch:
1306 	md_stop(&rs->md);
1307 bad:
1308 	context_free(rs);
1309 
1310 	return ret;
1311 }
1312 
raid_dtr(struct dm_target * ti)1313 static void raid_dtr(struct dm_target *ti)
1314 {
1315 	struct raid_set *rs = ti->private;
1316 
1317 	list_del_init(&rs->callbacks.list);
1318 	md_stop(&rs->md);
1319 	context_free(rs);
1320 }
1321 
raid_map(struct dm_target * ti,struct bio * bio)1322 static int raid_map(struct dm_target *ti, struct bio *bio)
1323 {
1324 	struct raid_set *rs = ti->private;
1325 	struct mddev *mddev = &rs->md;
1326 
1327 	mddev->pers->make_request(mddev, bio);
1328 
1329 	return DM_MAPIO_SUBMITTED;
1330 }
1331 
decipher_sync_action(struct mddev * mddev)1332 static const char *decipher_sync_action(struct mddev *mddev)
1333 {
1334 	if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
1335 		return "frozen";
1336 
1337 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1338 	    (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
1339 		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1340 			return "reshape";
1341 
1342 		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1343 			if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1344 				return "resync";
1345 			else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1346 				return "check";
1347 			return "repair";
1348 		}
1349 
1350 		if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
1351 			return "recover";
1352 	}
1353 
1354 	return "idle";
1355 }
1356 
raid_status(struct dm_target * ti,status_type_t type,unsigned status_flags,char * result,unsigned maxlen)1357 static void raid_status(struct dm_target *ti, status_type_t type,
1358 			unsigned status_flags, char *result, unsigned maxlen)
1359 {
1360 	struct raid_set *rs = ti->private;
1361 	unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
1362 	unsigned sz = 0;
1363 	int i, array_in_sync = 0;
1364 	sector_t sync;
1365 
1366 	switch (type) {
1367 	case STATUSTYPE_INFO:
1368 		DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
1369 
1370 		if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
1371 			sync = rs->md.curr_resync_completed;
1372 		else
1373 			sync = rs->md.recovery_cp;
1374 
1375 		if (sync >= rs->md.resync_max_sectors) {
1376 			/*
1377 			 * Sync complete.
1378 			 */
1379 			array_in_sync = 1;
1380 			sync = rs->md.resync_max_sectors;
1381 		} else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) {
1382 			/*
1383 			 * If "check" or "repair" is occurring, the array has
1384 			 * undergone and initial sync and the health characters
1385 			 * should not be 'a' anymore.
1386 			 */
1387 			array_in_sync = 1;
1388 		} else {
1389 			/*
1390 			 * The array may be doing an initial sync, or it may
1391 			 * be rebuilding individual components.  If all the
1392 			 * devices are In_sync, then it is the array that is
1393 			 * being initialized.
1394 			 */
1395 			for (i = 0; i < rs->md.raid_disks; i++)
1396 				if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
1397 					array_in_sync = 1;
1398 		}
1399 
1400 		/*
1401 		 * Status characters:
1402 		 *  'D' = Dead/Failed device
1403 		 *  'a' = Alive but not in-sync
1404 		 *  'A' = Alive and in-sync
1405 		 */
1406 		for (i = 0; i < rs->md.raid_disks; i++) {
1407 			if (test_bit(Faulty, &rs->dev[i].rdev.flags))
1408 				DMEMIT("D");
1409 			else if (!array_in_sync ||
1410 				 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1411 				DMEMIT("a");
1412 			else
1413 				DMEMIT("A");
1414 		}
1415 
1416 		/*
1417 		 * In-sync ratio:
1418 		 *  The in-sync ratio shows the progress of:
1419 		 *   - Initializing the array
1420 		 *   - Rebuilding a subset of devices of the array
1421 		 *  The user can distinguish between the two by referring
1422 		 *  to the status characters.
1423 		 */
1424 		DMEMIT(" %llu/%llu",
1425 		       (unsigned long long) sync,
1426 		       (unsigned long long) rs->md.resync_max_sectors);
1427 
1428 		/*
1429 		 * Sync action:
1430 		 *   See Documentation/device-mapper/dm-raid.c for
1431 		 *   information on each of these states.
1432 		 */
1433 		DMEMIT(" %s", decipher_sync_action(&rs->md));
1434 
1435 		/*
1436 		 * resync_mismatches/mismatch_cnt
1437 		 *   This field shows the number of discrepancies found when
1438 		 *   performing a "check" of the array.
1439 		 */
1440 		DMEMIT(" %llu",
1441 		       (strcmp(rs->md.last_sync_action, "check")) ? 0 :
1442 		       (unsigned long long)
1443 		       atomic64_read(&rs->md.resync_mismatches));
1444 		break;
1445 	case STATUSTYPE_TABLE:
1446 		/* The string you would use to construct this array */
1447 		for (i = 0; i < rs->md.raid_disks; i++) {
1448 			if ((rs->print_flags & DMPF_REBUILD) &&
1449 			    rs->dev[i].data_dev &&
1450 			    !test_bit(In_sync, &rs->dev[i].rdev.flags))
1451 				raid_param_cnt += 2; /* for rebuilds */
1452 			if (rs->dev[i].data_dev &&
1453 			    test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1454 				raid_param_cnt += 2;
1455 		}
1456 
1457 		raid_param_cnt += (hweight32(rs->print_flags & ~DMPF_REBUILD) * 2);
1458 		if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
1459 			raid_param_cnt--;
1460 
1461 		DMEMIT("%s %u %u", rs->raid_type->name,
1462 		       raid_param_cnt, rs->md.chunk_sectors);
1463 
1464 		if ((rs->print_flags & DMPF_SYNC) &&
1465 		    (rs->md.recovery_cp == MaxSector))
1466 			DMEMIT(" sync");
1467 		if (rs->print_flags & DMPF_NOSYNC)
1468 			DMEMIT(" nosync");
1469 
1470 		for (i = 0; i < rs->md.raid_disks; i++)
1471 			if ((rs->print_flags & DMPF_REBUILD) &&
1472 			    rs->dev[i].data_dev &&
1473 			    !test_bit(In_sync, &rs->dev[i].rdev.flags))
1474 				DMEMIT(" rebuild %u", i);
1475 
1476 		if (rs->print_flags & DMPF_DAEMON_SLEEP)
1477 			DMEMIT(" daemon_sleep %lu",
1478 			       rs->md.bitmap_info.daemon_sleep);
1479 
1480 		if (rs->print_flags & DMPF_MIN_RECOVERY_RATE)
1481 			DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
1482 
1483 		if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
1484 			DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
1485 
1486 		for (i = 0; i < rs->md.raid_disks; i++)
1487 			if (rs->dev[i].data_dev &&
1488 			    test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1489 				DMEMIT(" write_mostly %u", i);
1490 
1491 		if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
1492 			DMEMIT(" max_write_behind %lu",
1493 			       rs->md.bitmap_info.max_write_behind);
1494 
1495 		if (rs->print_flags & DMPF_STRIPE_CACHE) {
1496 			struct r5conf *conf = rs->md.private;
1497 
1498 			/* convert from kiB to sectors */
1499 			DMEMIT(" stripe_cache %d",
1500 			       conf ? conf->max_nr_stripes * 2 : 0);
1501 		}
1502 
1503 		if (rs->print_flags & DMPF_REGION_SIZE)
1504 			DMEMIT(" region_size %lu",
1505 			       rs->md.bitmap_info.chunksize >> 9);
1506 
1507 		if (rs->print_flags & DMPF_RAID10_COPIES)
1508 			DMEMIT(" raid10_copies %u",
1509 			       raid10_md_layout_to_copies(rs->md.layout));
1510 
1511 		if (rs->print_flags & DMPF_RAID10_FORMAT)
1512 			DMEMIT(" raid10_format %s",
1513 			       raid10_md_layout_to_format(rs->md.layout));
1514 
1515 		DMEMIT(" %d", rs->md.raid_disks);
1516 		for (i = 0; i < rs->md.raid_disks; i++) {
1517 			if (rs->dev[i].meta_dev)
1518 				DMEMIT(" %s", rs->dev[i].meta_dev->name);
1519 			else
1520 				DMEMIT(" -");
1521 
1522 			if (rs->dev[i].data_dev)
1523 				DMEMIT(" %s", rs->dev[i].data_dev->name);
1524 			else
1525 				DMEMIT(" -");
1526 		}
1527 	}
1528 }
1529 
raid_message(struct dm_target * ti,unsigned argc,char ** argv)1530 static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
1531 {
1532 	struct raid_set *rs = ti->private;
1533 	struct mddev *mddev = &rs->md;
1534 
1535 	if (!strcasecmp(argv[0], "reshape")) {
1536 		DMERR("Reshape not supported.");
1537 		return -EINVAL;
1538 	}
1539 
1540 	if (!mddev->pers || !mddev->pers->sync_request)
1541 		return -EINVAL;
1542 
1543 	if (!strcasecmp(argv[0], "frozen"))
1544 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1545 	else
1546 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1547 
1548 	if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
1549 		if (mddev->sync_thread) {
1550 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1551 			md_reap_sync_thread(mddev);
1552 		}
1553 	} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1554 		   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
1555 		return -EBUSY;
1556 	else if (!strcasecmp(argv[0], "resync"))
1557 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1558 	else if (!strcasecmp(argv[0], "recover")) {
1559 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
1560 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1561 	} else {
1562 		if (!strcasecmp(argv[0], "check"))
1563 			set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
1564 		else if (!!strcasecmp(argv[0], "repair"))
1565 			return -EINVAL;
1566 		set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
1567 		set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
1568 	}
1569 	if (mddev->ro == 2) {
1570 		/* A write to sync_action is enough to justify
1571 		 * canceling read-auto mode
1572 		 */
1573 		mddev->ro = 0;
1574 		if (!mddev->suspended)
1575 			md_wakeup_thread(mddev->sync_thread);
1576 	}
1577 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1578 	if (!mddev->suspended)
1579 		md_wakeup_thread(mddev->thread);
1580 
1581 	return 0;
1582 }
1583 
raid_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)1584 static int raid_iterate_devices(struct dm_target *ti,
1585 				iterate_devices_callout_fn fn, void *data)
1586 {
1587 	struct raid_set *rs = ti->private;
1588 	unsigned i;
1589 	int ret = 0;
1590 
1591 	for (i = 0; !ret && i < rs->md.raid_disks; i++)
1592 		if (rs->dev[i].data_dev)
1593 			ret = fn(ti,
1594 				 rs->dev[i].data_dev,
1595 				 0, /* No offset on data devs */
1596 				 rs->md.dev_sectors,
1597 				 data);
1598 
1599 	return ret;
1600 }
1601 
raid_io_hints(struct dm_target * ti,struct queue_limits * limits)1602 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
1603 {
1604 	struct raid_set *rs = ti->private;
1605 	unsigned chunk_size = rs->md.chunk_sectors << 9;
1606 	struct r5conf *conf = rs->md.private;
1607 
1608 	blk_limits_io_min(limits, chunk_size);
1609 	blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
1610 }
1611 
raid_presuspend(struct dm_target * ti)1612 static void raid_presuspend(struct dm_target *ti)
1613 {
1614 	struct raid_set *rs = ti->private;
1615 
1616 	md_stop_writes(&rs->md);
1617 }
1618 
raid_postsuspend(struct dm_target * ti)1619 static void raid_postsuspend(struct dm_target *ti)
1620 {
1621 	struct raid_set *rs = ti->private;
1622 
1623 	mddev_suspend(&rs->md);
1624 }
1625 
attempt_restore_of_faulty_devices(struct raid_set * rs)1626 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
1627 {
1628 	int i;
1629 	uint64_t failed_devices, cleared_failed_devices = 0;
1630 	unsigned long flags;
1631 	struct dm_raid_superblock *sb;
1632 	struct md_rdev *r;
1633 
1634 	for (i = 0; i < rs->md.raid_disks; i++) {
1635 		r = &rs->dev[i].rdev;
1636 		if (test_bit(Faulty, &r->flags) && r->sb_page &&
1637 		    sync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) {
1638 			DMINFO("Faulty %s device #%d has readable super block."
1639 			       "  Attempting to revive it.",
1640 			       rs->raid_type->name, i);
1641 
1642 			/*
1643 			 * Faulty bit may be set, but sometimes the array can
1644 			 * be suspended before the personalities can respond
1645 			 * by removing the device from the array (i.e. calling
1646 			 * 'hot_remove_disk').  If they haven't yet removed
1647 			 * the failed device, its 'raid_disk' number will be
1648 			 * '>= 0' - meaning we must call this function
1649 			 * ourselves.
1650 			 */
1651 			if ((r->raid_disk >= 0) &&
1652 			    (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0))
1653 				/* Failed to revive this device, try next */
1654 				continue;
1655 
1656 			r->raid_disk = i;
1657 			r->saved_raid_disk = i;
1658 			flags = r->flags;
1659 			clear_bit(Faulty, &r->flags);
1660 			clear_bit(WriteErrorSeen, &r->flags);
1661 			clear_bit(In_sync, &r->flags);
1662 			if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
1663 				r->raid_disk = -1;
1664 				r->saved_raid_disk = -1;
1665 				r->flags = flags;
1666 			} else {
1667 				r->recovery_offset = 0;
1668 				cleared_failed_devices |= 1 << i;
1669 			}
1670 		}
1671 	}
1672 	if (cleared_failed_devices) {
1673 		rdev_for_each(r, &rs->md) {
1674 			sb = page_address(r->sb_page);
1675 			failed_devices = le64_to_cpu(sb->failed_devices);
1676 			failed_devices &= ~cleared_failed_devices;
1677 			sb->failed_devices = cpu_to_le64(failed_devices);
1678 		}
1679 	}
1680 }
1681 
raid_resume(struct dm_target * ti)1682 static void raid_resume(struct dm_target *ti)
1683 {
1684 	struct raid_set *rs = ti->private;
1685 
1686 	set_bit(MD_CHANGE_DEVS, &rs->md.flags);
1687 	if (!rs->bitmap_loaded) {
1688 		bitmap_load(&rs->md);
1689 		rs->bitmap_loaded = 1;
1690 	} else {
1691 		/*
1692 		 * A secondary resume while the device is active.
1693 		 * Take this opportunity to check whether any failed
1694 		 * devices are reachable again.
1695 		 */
1696 		attempt_restore_of_faulty_devices(rs);
1697 	}
1698 
1699 	clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
1700 	mddev_resume(&rs->md);
1701 }
1702 
1703 static struct target_type raid_target = {
1704 	.name = "raid",
1705 	.version = {1, 6, 0},
1706 	.module = THIS_MODULE,
1707 	.ctr = raid_ctr,
1708 	.dtr = raid_dtr,
1709 	.map = raid_map,
1710 	.status = raid_status,
1711 	.message = raid_message,
1712 	.iterate_devices = raid_iterate_devices,
1713 	.io_hints = raid_io_hints,
1714 	.presuspend = raid_presuspend,
1715 	.postsuspend = raid_postsuspend,
1716 	.resume = raid_resume,
1717 };
1718 
dm_raid_init(void)1719 static int __init dm_raid_init(void)
1720 {
1721 	DMINFO("Loading target version %u.%u.%u",
1722 	       raid_target.version[0],
1723 	       raid_target.version[1],
1724 	       raid_target.version[2]);
1725 	return dm_register_target(&raid_target);
1726 }
1727 
dm_raid_exit(void)1728 static void __exit dm_raid_exit(void)
1729 {
1730 	dm_unregister_target(&raid_target);
1731 }
1732 
1733 module_init(dm_raid_init);
1734 module_exit(dm_raid_exit);
1735 
1736 module_param(devices_handle_discard_safely, bool, 0644);
1737 MODULE_PARM_DESC(devices_handle_discard_safely,
1738 		 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
1739 
1740 MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
1741 MODULE_ALIAS("dm-raid1");
1742 MODULE_ALIAS("dm-raid10");
1743 MODULE_ALIAS("dm-raid4");
1744 MODULE_ALIAS("dm-raid5");
1745 MODULE_ALIAS("dm-raid6");
1746 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
1747 MODULE_LICENSE("GPL");
1748