1 /*
2  * Copyright (c) 2012 Linutronix GmbH
3  * Copyright (c) 2014 sigma star gmbh
4  * Author: Richard Weinberger <richard@nod.at>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13  * the GNU General Public License for more details.
14  *
15  */
16 
17 #include <linux/crc32.h>
18 #include "ubi.h"
19 
20 /**
21  * init_seen - allocate memory for used for debugging.
22  * @ubi: UBI device description object
23  */
init_seen(struct ubi_device * ubi)24 static inline int *init_seen(struct ubi_device *ubi)
25 {
26 	int *ret;
27 
28 	if (!ubi_dbg_chk_fastmap(ubi))
29 		return NULL;
30 
31 	ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL);
32 	if (!ret)
33 		return ERR_PTR(-ENOMEM);
34 
35 	return ret;
36 }
37 
38 /**
39  * free_seen - free the seen logic integer array.
40  * @seen: integer array of @ubi->peb_count size
41  */
free_seen(int * seen)42 static inline void free_seen(int *seen)
43 {
44 	kfree(seen);
45 }
46 
47 /**
48  * set_seen - mark a PEB as seen.
49  * @ubi: UBI device description object
50  * @pnum: The PEB to be makred as seen
51  * @seen: integer array of @ubi->peb_count size
52  */
set_seen(struct ubi_device * ubi,int pnum,int * seen)53 static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
54 {
55 	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
56 		return;
57 
58 	seen[pnum] = 1;
59 }
60 
61 /**
62  * self_check_seen - check whether all PEB have been seen by fastmap.
63  * @ubi: UBI device description object
64  * @seen: integer array of @ubi->peb_count size
65  */
self_check_seen(struct ubi_device * ubi,int * seen)66 static int self_check_seen(struct ubi_device *ubi, int *seen)
67 {
68 	int pnum, ret = 0;
69 
70 	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
71 		return 0;
72 
73 	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
74 		if (!seen[pnum] && ubi->lookuptbl[pnum]) {
75 			ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
76 			ret = -EINVAL;
77 		}
78 	}
79 
80 	return ret;
81 }
82 
83 /**
84  * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
85  * @ubi: UBI device description object
86  */
ubi_calc_fm_size(struct ubi_device * ubi)87 size_t ubi_calc_fm_size(struct ubi_device *ubi)
88 {
89 	size_t size;
90 
91 	size = sizeof(struct ubi_fm_sb) + \
92 		sizeof(struct ubi_fm_hdr) + \
93 		sizeof(struct ubi_fm_scan_pool) + \
94 		sizeof(struct ubi_fm_scan_pool) + \
95 		(ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
96 		(sizeof(struct ubi_fm_eba) + \
97 		(ubi->peb_count * sizeof(__be32))) + \
98 		sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
99 	return roundup(size, ubi->leb_size);
100 }
101 
102 
103 /**
104  * new_fm_vhdr - allocate a new volume header for fastmap usage.
105  * @ubi: UBI device description object
106  * @vol_id: the VID of the new header
107  *
108  * Returns a new struct ubi_vid_hdr on success.
109  * NULL indicates out of memory.
110  */
new_fm_vhdr(struct ubi_device * ubi,int vol_id)111 static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
112 {
113 	struct ubi_vid_hdr *new;
114 
115 	new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
116 	if (!new)
117 		goto out;
118 
119 	new->vol_type = UBI_VID_DYNAMIC;
120 	new->vol_id = cpu_to_be32(vol_id);
121 
122 	/* UBI implementations without fastmap support have to delete the
123 	 * fastmap.
124 	 */
125 	new->compat = UBI_COMPAT_DELETE;
126 
127 out:
128 	return new;
129 }
130 
131 /**
132  * add_aeb - create and add a attach erase block to a given list.
133  * @ai: UBI attach info object
134  * @list: the target list
135  * @pnum: PEB number of the new attach erase block
136  * @ec: erease counter of the new LEB
137  * @scrub: scrub this PEB after attaching
138  *
139  * Returns 0 on success, < 0 indicates an internal error.
140  */
add_aeb(struct ubi_attach_info * ai,struct list_head * list,int pnum,int ec,int scrub)141 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
142 		   int pnum, int ec, int scrub)
143 {
144 	struct ubi_ainf_peb *aeb;
145 
146 	aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
147 	if (!aeb)
148 		return -ENOMEM;
149 
150 	aeb->pnum = pnum;
151 	aeb->ec = ec;
152 	aeb->lnum = -1;
153 	aeb->scrub = scrub;
154 	aeb->copy_flag = aeb->sqnum = 0;
155 
156 	ai->ec_sum += aeb->ec;
157 	ai->ec_count++;
158 
159 	if (ai->max_ec < aeb->ec)
160 		ai->max_ec = aeb->ec;
161 
162 	if (ai->min_ec > aeb->ec)
163 		ai->min_ec = aeb->ec;
164 
165 	list_add_tail(&aeb->u.list, list);
166 
167 	return 0;
168 }
169 
170 /**
171  * add_vol - create and add a new volume to ubi_attach_info.
172  * @ai: ubi_attach_info object
173  * @vol_id: VID of the new volume
174  * @used_ebs: number of used EBS
175  * @data_pad: data padding value of the new volume
176  * @vol_type: volume type
177  * @last_eb_bytes: number of bytes in the last LEB
178  *
179  * Returns the new struct ubi_ainf_volume on success.
180  * NULL indicates an error.
181  */
add_vol(struct ubi_attach_info * ai,int vol_id,int used_ebs,int data_pad,u8 vol_type,int last_eb_bytes)182 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
183 				       int used_ebs, int data_pad, u8 vol_type,
184 				       int last_eb_bytes)
185 {
186 	struct ubi_ainf_volume *av;
187 	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
188 
189 	while (*p) {
190 		parent = *p;
191 		av = rb_entry(parent, struct ubi_ainf_volume, rb);
192 
193 		if (vol_id > av->vol_id)
194 			p = &(*p)->rb_left;
195 		else
196 			p = &(*p)->rb_right;
197 	}
198 
199 	av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
200 	if (!av)
201 		goto out;
202 
203 	av->highest_lnum = av->leb_count = av->used_ebs = 0;
204 	av->vol_id = vol_id;
205 	av->data_pad = data_pad;
206 	av->last_data_size = last_eb_bytes;
207 	av->compat = 0;
208 	av->vol_type = vol_type;
209 	av->root = RB_ROOT;
210 	if (av->vol_type == UBI_STATIC_VOLUME)
211 		av->used_ebs = used_ebs;
212 
213 	dbg_bld("found volume (ID %i)", vol_id);
214 
215 	rb_link_node(&av->rb, parent, p);
216 	rb_insert_color(&av->rb, &ai->volumes);
217 
218 out:
219 	return av;
220 }
221 
222 /**
223  * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
224  * from it's original list.
225  * @ai: ubi_attach_info object
226  * @aeb: the to be assigned SEB
227  * @av: target scan volume
228  */
assign_aeb_to_av(struct ubi_attach_info * ai,struct ubi_ainf_peb * aeb,struct ubi_ainf_volume * av)229 static void assign_aeb_to_av(struct ubi_attach_info *ai,
230 			     struct ubi_ainf_peb *aeb,
231 			     struct ubi_ainf_volume *av)
232 {
233 	struct ubi_ainf_peb *tmp_aeb;
234 	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
235 
236 	p = &av->root.rb_node;
237 	while (*p) {
238 		parent = *p;
239 
240 		tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
241 		if (aeb->lnum != tmp_aeb->lnum) {
242 			if (aeb->lnum < tmp_aeb->lnum)
243 				p = &(*p)->rb_left;
244 			else
245 				p = &(*p)->rb_right;
246 
247 			continue;
248 		} else
249 			break;
250 	}
251 
252 	list_del(&aeb->u.list);
253 	av->leb_count++;
254 
255 	rb_link_node(&aeb->u.rb, parent, p);
256 	rb_insert_color(&aeb->u.rb, &av->root);
257 }
258 
259 /**
260  * update_vol - inserts or updates a LEB which was found a pool.
261  * @ubi: the UBI device object
262  * @ai: attach info object
263  * @av: the volume this LEB belongs to
264  * @new_vh: the volume header derived from new_aeb
265  * @new_aeb: the AEB to be examined
266  *
267  * Returns 0 on success, < 0 indicates an internal error.
268  */
update_vol(struct ubi_device * ubi,struct ubi_attach_info * ai,struct ubi_ainf_volume * av,struct ubi_vid_hdr * new_vh,struct ubi_ainf_peb * new_aeb)269 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
270 		      struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
271 		      struct ubi_ainf_peb *new_aeb)
272 {
273 	struct rb_node **p = &av->root.rb_node, *parent = NULL;
274 	struct ubi_ainf_peb *aeb, *victim;
275 	int cmp_res;
276 
277 	while (*p) {
278 		parent = *p;
279 		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
280 
281 		if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
282 			if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
283 				p = &(*p)->rb_left;
284 			else
285 				p = &(*p)->rb_right;
286 
287 			continue;
288 		}
289 
290 		/* This case can happen if the fastmap gets written
291 		 * because of a volume change (creation, deletion, ..).
292 		 * Then a PEB can be within the persistent EBA and the pool.
293 		 */
294 		if (aeb->pnum == new_aeb->pnum) {
295 			ubi_assert(aeb->lnum == new_aeb->lnum);
296 			kmem_cache_free(ai->aeb_slab_cache, new_aeb);
297 
298 			return 0;
299 		}
300 
301 		cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
302 		if (cmp_res < 0)
303 			return cmp_res;
304 
305 		/* new_aeb is newer */
306 		if (cmp_res & 1) {
307 			victim = kmem_cache_alloc(ai->aeb_slab_cache,
308 				GFP_KERNEL);
309 			if (!victim)
310 				return -ENOMEM;
311 
312 			victim->ec = aeb->ec;
313 			victim->pnum = aeb->pnum;
314 			list_add_tail(&victim->u.list, &ai->erase);
315 
316 			if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
317 				av->last_data_size = \
318 					be32_to_cpu(new_vh->data_size);
319 
320 			dbg_bld("vol %i: AEB %i's PEB %i is the newer",
321 				av->vol_id, aeb->lnum, new_aeb->pnum);
322 
323 			aeb->ec = new_aeb->ec;
324 			aeb->pnum = new_aeb->pnum;
325 			aeb->copy_flag = new_vh->copy_flag;
326 			aeb->scrub = new_aeb->scrub;
327 			kmem_cache_free(ai->aeb_slab_cache, new_aeb);
328 
329 		/* new_aeb is older */
330 		} else {
331 			dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
332 				av->vol_id, aeb->lnum, new_aeb->pnum);
333 			list_add_tail(&new_aeb->u.list, &ai->erase);
334 		}
335 
336 		return 0;
337 	}
338 	/* This LEB is new, let's add it to the volume */
339 
340 	if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
341 		av->highest_lnum = be32_to_cpu(new_vh->lnum);
342 		av->last_data_size = be32_to_cpu(new_vh->data_size);
343 	}
344 
345 	if (av->vol_type == UBI_STATIC_VOLUME)
346 		av->used_ebs = be32_to_cpu(new_vh->used_ebs);
347 
348 	av->leb_count++;
349 
350 	rb_link_node(&new_aeb->u.rb, parent, p);
351 	rb_insert_color(&new_aeb->u.rb, &av->root);
352 
353 	return 0;
354 }
355 
356 /**
357  * process_pool_aeb - we found a non-empty PEB in a pool.
358  * @ubi: UBI device object
359  * @ai: attach info object
360  * @new_vh: the volume header derived from new_aeb
361  * @new_aeb: the AEB to be examined
362  *
363  * Returns 0 on success, < 0 indicates an internal error.
364  */
process_pool_aeb(struct ubi_device * ubi,struct ubi_attach_info * ai,struct ubi_vid_hdr * new_vh,struct ubi_ainf_peb * new_aeb)365 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
366 			    struct ubi_vid_hdr *new_vh,
367 			    struct ubi_ainf_peb *new_aeb)
368 {
369 	struct ubi_ainf_volume *av, *tmp_av = NULL;
370 	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
371 	int found = 0;
372 
373 	if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
374 		be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
375 		kmem_cache_free(ai->aeb_slab_cache, new_aeb);
376 
377 		return 0;
378 	}
379 
380 	/* Find the volume this SEB belongs to */
381 	while (*p) {
382 		parent = *p;
383 		tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
384 
385 		if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
386 			p = &(*p)->rb_left;
387 		else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
388 			p = &(*p)->rb_right;
389 		else {
390 			found = 1;
391 			break;
392 		}
393 	}
394 
395 	if (found)
396 		av = tmp_av;
397 	else {
398 		ubi_err(ubi, "orphaned volume in fastmap pool!");
399 		kmem_cache_free(ai->aeb_slab_cache, new_aeb);
400 		return UBI_BAD_FASTMAP;
401 	}
402 
403 	ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
404 
405 	return update_vol(ubi, ai, av, new_vh, new_aeb);
406 }
407 
408 /**
409  * unmap_peb - unmap a PEB.
410  * If fastmap detects a free PEB in the pool it has to check whether
411  * this PEB has been unmapped after writing the fastmap.
412  *
413  * @ai: UBI attach info object
414  * @pnum: The PEB to be unmapped
415  */
unmap_peb(struct ubi_attach_info * ai,int pnum)416 static void unmap_peb(struct ubi_attach_info *ai, int pnum)
417 {
418 	struct ubi_ainf_volume *av;
419 	struct rb_node *node, *node2;
420 	struct ubi_ainf_peb *aeb;
421 
422 	for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
423 		av = rb_entry(node, struct ubi_ainf_volume, rb);
424 
425 		for (node2 = rb_first(&av->root); node2;
426 		     node2 = rb_next(node2)) {
427 			aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
428 			if (aeb->pnum == pnum) {
429 				rb_erase(&aeb->u.rb, &av->root);
430 				av->leb_count--;
431 				kmem_cache_free(ai->aeb_slab_cache, aeb);
432 				return;
433 			}
434 		}
435 	}
436 }
437 
438 /**
439  * scan_pool - scans a pool for changed (no longer empty PEBs).
440  * @ubi: UBI device object
441  * @ai: attach info object
442  * @pebs: an array of all PEB numbers in the to be scanned pool
443  * @pool_size: size of the pool (number of entries in @pebs)
444  * @max_sqnum: pointer to the maximal sequence number
445  * @free: list of PEBs which are most likely free (and go into @ai->free)
446  *
447  * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
448  * < 0 indicates an internal error.
449  */
scan_pool(struct ubi_device * ubi,struct ubi_attach_info * ai,int * pebs,int pool_size,unsigned long long * max_sqnum,struct list_head * free)450 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
451 		     int *pebs, int pool_size, unsigned long long *max_sqnum,
452 		     struct list_head *free)
453 {
454 	struct ubi_vid_hdr *vh;
455 	struct ubi_ec_hdr *ech;
456 	struct ubi_ainf_peb *new_aeb;
457 	int i, pnum, err, ret = 0;
458 
459 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
460 	if (!ech)
461 		return -ENOMEM;
462 
463 	vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
464 	if (!vh) {
465 		kfree(ech);
466 		return -ENOMEM;
467 	}
468 
469 	dbg_bld("scanning fastmap pool: size = %i", pool_size);
470 
471 	/*
472 	 * Now scan all PEBs in the pool to find changes which have been made
473 	 * after the creation of the fastmap
474 	 */
475 	for (i = 0; i < pool_size; i++) {
476 		int scrub = 0;
477 		int image_seq;
478 
479 		pnum = be32_to_cpu(pebs[i]);
480 
481 		if (ubi_io_is_bad(ubi, pnum)) {
482 			ubi_err(ubi, "bad PEB in fastmap pool!");
483 			ret = UBI_BAD_FASTMAP;
484 			goto out;
485 		}
486 
487 		err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
488 		if (err && err != UBI_IO_BITFLIPS) {
489 			ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
490 				pnum, err);
491 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
492 			goto out;
493 		} else if (err == UBI_IO_BITFLIPS)
494 			scrub = 1;
495 
496 		/*
497 		 * Older UBI implementations have image_seq set to zero, so
498 		 * we shouldn't fail if image_seq == 0.
499 		 */
500 		image_seq = be32_to_cpu(ech->image_seq);
501 
502 		if (image_seq && (image_seq != ubi->image_seq)) {
503 			ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
504 				be32_to_cpu(ech->image_seq), ubi->image_seq);
505 			ret = UBI_BAD_FASTMAP;
506 			goto out;
507 		}
508 
509 		err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
510 		if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
511 			unsigned long long ec = be64_to_cpu(ech->ec);
512 			unmap_peb(ai, pnum);
513 			dbg_bld("Adding PEB to free: %i", pnum);
514 			if (err == UBI_IO_FF_BITFLIPS)
515 				add_aeb(ai, free, pnum, ec, 1);
516 			else
517 				add_aeb(ai, free, pnum, ec, 0);
518 			continue;
519 		} else if (err == 0 || err == UBI_IO_BITFLIPS) {
520 			dbg_bld("Found non empty PEB:%i in pool", pnum);
521 
522 			if (err == UBI_IO_BITFLIPS)
523 				scrub = 1;
524 
525 			new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
526 						   GFP_KERNEL);
527 			if (!new_aeb) {
528 				ret = -ENOMEM;
529 				goto out;
530 			}
531 
532 			new_aeb->ec = be64_to_cpu(ech->ec);
533 			new_aeb->pnum = pnum;
534 			new_aeb->lnum = be32_to_cpu(vh->lnum);
535 			new_aeb->sqnum = be64_to_cpu(vh->sqnum);
536 			new_aeb->copy_flag = vh->copy_flag;
537 			new_aeb->scrub = scrub;
538 
539 			if (*max_sqnum < new_aeb->sqnum)
540 				*max_sqnum = new_aeb->sqnum;
541 
542 			err = process_pool_aeb(ubi, ai, vh, new_aeb);
543 			if (err) {
544 				ret = err > 0 ? UBI_BAD_FASTMAP : err;
545 				goto out;
546 			}
547 		} else {
548 			/* We are paranoid and fall back to scanning mode */
549 			ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
550 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
551 			goto out;
552 		}
553 
554 	}
555 
556 out:
557 	ubi_free_vid_hdr(ubi, vh);
558 	kfree(ech);
559 	return ret;
560 }
561 
562 /**
563  * count_fastmap_pebs - Counts the PEBs found by fastmap.
564  * @ai: The UBI attach info object
565  */
count_fastmap_pebs(struct ubi_attach_info * ai)566 static int count_fastmap_pebs(struct ubi_attach_info *ai)
567 {
568 	struct ubi_ainf_peb *aeb;
569 	struct ubi_ainf_volume *av;
570 	struct rb_node *rb1, *rb2;
571 	int n = 0;
572 
573 	list_for_each_entry(aeb, &ai->erase, u.list)
574 		n++;
575 
576 	list_for_each_entry(aeb, &ai->free, u.list)
577 		n++;
578 
579 	 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
580 		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
581 			n++;
582 
583 	return n;
584 }
585 
586 /**
587  * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
588  * @ubi: UBI device object
589  * @ai: UBI attach info object
590  * @fm: the fastmap to be attached
591  *
592  * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
593  * < 0 indicates an internal error.
594  */
ubi_attach_fastmap(struct ubi_device * ubi,struct ubi_attach_info * ai,struct ubi_fastmap_layout * fm)595 static int ubi_attach_fastmap(struct ubi_device *ubi,
596 			      struct ubi_attach_info *ai,
597 			      struct ubi_fastmap_layout *fm)
598 {
599 	struct list_head used, free;
600 	struct ubi_ainf_volume *av;
601 	struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
602 	struct ubi_fm_sb *fmsb;
603 	struct ubi_fm_hdr *fmhdr;
604 	struct ubi_fm_scan_pool *fmpl1, *fmpl2;
605 	struct ubi_fm_ec *fmec;
606 	struct ubi_fm_volhdr *fmvhdr;
607 	struct ubi_fm_eba *fm_eba;
608 	int ret, i, j, pool_size, wl_pool_size;
609 	size_t fm_pos = 0, fm_size = ubi->fm_size;
610 	unsigned long long max_sqnum = 0;
611 	void *fm_raw = ubi->fm_buf;
612 
613 	INIT_LIST_HEAD(&used);
614 	INIT_LIST_HEAD(&free);
615 	ai->min_ec = UBI_MAX_ERASECOUNTER;
616 
617 	fmsb = (struct ubi_fm_sb *)(fm_raw);
618 	ai->max_sqnum = fmsb->sqnum;
619 	fm_pos += sizeof(struct ubi_fm_sb);
620 	if (fm_pos >= fm_size)
621 		goto fail_bad;
622 
623 	fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
624 	fm_pos += sizeof(*fmhdr);
625 	if (fm_pos >= fm_size)
626 		goto fail_bad;
627 
628 	if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
629 		ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
630 			be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
631 		goto fail_bad;
632 	}
633 
634 	fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
635 	fm_pos += sizeof(*fmpl1);
636 	if (fm_pos >= fm_size)
637 		goto fail_bad;
638 	if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
639 		ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
640 			be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
641 		goto fail_bad;
642 	}
643 
644 	fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
645 	fm_pos += sizeof(*fmpl2);
646 	if (fm_pos >= fm_size)
647 		goto fail_bad;
648 	if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
649 		ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
650 			be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
651 		goto fail_bad;
652 	}
653 
654 	pool_size = be16_to_cpu(fmpl1->size);
655 	wl_pool_size = be16_to_cpu(fmpl2->size);
656 	fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
657 	fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
658 
659 	if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
660 		ubi_err(ubi, "bad pool size: %i", pool_size);
661 		goto fail_bad;
662 	}
663 
664 	if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
665 		ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
666 		goto fail_bad;
667 	}
668 
669 
670 	if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
671 	    fm->max_pool_size < 0) {
672 		ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
673 		goto fail_bad;
674 	}
675 
676 	if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
677 	    fm->max_wl_pool_size < 0) {
678 		ubi_err(ubi, "bad maximal WL pool size: %i",
679 			fm->max_wl_pool_size);
680 		goto fail_bad;
681 	}
682 
683 	/* read EC values from free list */
684 	for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
685 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
686 		fm_pos += sizeof(*fmec);
687 		if (fm_pos >= fm_size)
688 			goto fail_bad;
689 
690 		add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
691 			be32_to_cpu(fmec->ec), 0);
692 	}
693 
694 	/* read EC values from used list */
695 	for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
696 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
697 		fm_pos += sizeof(*fmec);
698 		if (fm_pos >= fm_size)
699 			goto fail_bad;
700 
701 		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
702 			be32_to_cpu(fmec->ec), 0);
703 	}
704 
705 	/* read EC values from scrub list */
706 	for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
707 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
708 		fm_pos += sizeof(*fmec);
709 		if (fm_pos >= fm_size)
710 			goto fail_bad;
711 
712 		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
713 			be32_to_cpu(fmec->ec), 1);
714 	}
715 
716 	/* read EC values from erase list */
717 	for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
718 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
719 		fm_pos += sizeof(*fmec);
720 		if (fm_pos >= fm_size)
721 			goto fail_bad;
722 
723 		add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
724 			be32_to_cpu(fmec->ec), 1);
725 	}
726 
727 	ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
728 	ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
729 
730 	/* Iterate over all volumes and read their EBA table */
731 	for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
732 		fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
733 		fm_pos += sizeof(*fmvhdr);
734 		if (fm_pos >= fm_size)
735 			goto fail_bad;
736 
737 		if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
738 			ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
739 				be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
740 			goto fail_bad;
741 		}
742 
743 		av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
744 			     be32_to_cpu(fmvhdr->used_ebs),
745 			     be32_to_cpu(fmvhdr->data_pad),
746 			     fmvhdr->vol_type,
747 			     be32_to_cpu(fmvhdr->last_eb_bytes));
748 
749 		if (!av)
750 			goto fail_bad;
751 
752 		ai->vols_found++;
753 		if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
754 			ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
755 
756 		fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
757 		fm_pos += sizeof(*fm_eba);
758 		fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
759 		if (fm_pos >= fm_size)
760 			goto fail_bad;
761 
762 		if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
763 			ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
764 				be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
765 			goto fail_bad;
766 		}
767 
768 		for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
769 			int pnum = be32_to_cpu(fm_eba->pnum[j]);
770 
771 			if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
772 				continue;
773 
774 			aeb = NULL;
775 			list_for_each_entry(tmp_aeb, &used, u.list) {
776 				if (tmp_aeb->pnum == pnum) {
777 					aeb = tmp_aeb;
778 					break;
779 				}
780 			}
781 
782 			if (!aeb) {
783 				ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
784 				goto fail_bad;
785 			}
786 
787 			aeb->lnum = j;
788 
789 			if (av->highest_lnum <= aeb->lnum)
790 				av->highest_lnum = aeb->lnum;
791 
792 			assign_aeb_to_av(ai, aeb, av);
793 
794 			dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
795 				aeb->pnum, aeb->lnum, av->vol_id);
796 		}
797 	}
798 
799 	ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, &free);
800 	if (ret)
801 		goto fail;
802 
803 	ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, &free);
804 	if (ret)
805 		goto fail;
806 
807 	if (max_sqnum > ai->max_sqnum)
808 		ai->max_sqnum = max_sqnum;
809 
810 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
811 		list_move_tail(&tmp_aeb->u.list, &ai->free);
812 
813 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
814 		list_move_tail(&tmp_aeb->u.list, &ai->erase);
815 
816 	ubi_assert(list_empty(&free));
817 
818 	/*
819 	 * If fastmap is leaking PEBs (must not happen), raise a
820 	 * fat warning and fall back to scanning mode.
821 	 * We do this here because in ubi_wl_init() it's too late
822 	 * and we cannot fall back to scanning.
823 	 */
824 	if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
825 		    ai->bad_peb_count - fm->used_blocks))
826 		goto fail_bad;
827 
828 	return 0;
829 
830 fail_bad:
831 	ret = UBI_BAD_FASTMAP;
832 fail:
833 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
834 		list_del(&tmp_aeb->u.list);
835 		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
836 	}
837 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
838 		list_del(&tmp_aeb->u.list);
839 		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
840 	}
841 
842 	return ret;
843 }
844 
845 /**
846  * ubi_scan_fastmap - scan the fastmap.
847  * @ubi: UBI device object
848  * @ai: UBI attach info to be filled
849  * @fm_anchor: The fastmap starts at this PEB
850  *
851  * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
852  * UBI_BAD_FASTMAP if one was found but is not usable.
853  * < 0 indicates an internal error.
854  */
ubi_scan_fastmap(struct ubi_device * ubi,struct ubi_attach_info * ai,int fm_anchor)855 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
856 		     int fm_anchor)
857 {
858 	struct ubi_fm_sb *fmsb, *fmsb2;
859 	struct ubi_vid_hdr *vh;
860 	struct ubi_ec_hdr *ech;
861 	struct ubi_fastmap_layout *fm;
862 	int i, used_blocks, pnum, ret = 0;
863 	size_t fm_size;
864 	__be32 crc, tmp_crc;
865 	unsigned long long sqnum = 0;
866 
867 	down_write(&ubi->fm_protect);
868 	memset(ubi->fm_buf, 0, ubi->fm_size);
869 
870 	fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
871 	if (!fmsb) {
872 		ret = -ENOMEM;
873 		goto out;
874 	}
875 
876 	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
877 	if (!fm) {
878 		ret = -ENOMEM;
879 		kfree(fmsb);
880 		goto out;
881 	}
882 
883 	ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
884 	if (ret && ret != UBI_IO_BITFLIPS)
885 		goto free_fm_sb;
886 	else if (ret == UBI_IO_BITFLIPS)
887 		fm->to_be_tortured[0] = 1;
888 
889 	if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
890 		ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
891 			be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
892 		ret = UBI_BAD_FASTMAP;
893 		goto free_fm_sb;
894 	}
895 
896 	if (fmsb->version != UBI_FM_FMT_VERSION) {
897 		ubi_err(ubi, "bad fastmap version: %i, expected: %i",
898 			fmsb->version, UBI_FM_FMT_VERSION);
899 		ret = UBI_BAD_FASTMAP;
900 		goto free_fm_sb;
901 	}
902 
903 	used_blocks = be32_to_cpu(fmsb->used_blocks);
904 	if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
905 		ubi_err(ubi, "number of fastmap blocks is invalid: %i",
906 			used_blocks);
907 		ret = UBI_BAD_FASTMAP;
908 		goto free_fm_sb;
909 	}
910 
911 	fm_size = ubi->leb_size * used_blocks;
912 	if (fm_size != ubi->fm_size) {
913 		ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
914 			fm_size, ubi->fm_size);
915 		ret = UBI_BAD_FASTMAP;
916 		goto free_fm_sb;
917 	}
918 
919 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
920 	if (!ech) {
921 		ret = -ENOMEM;
922 		goto free_fm_sb;
923 	}
924 
925 	vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
926 	if (!vh) {
927 		ret = -ENOMEM;
928 		goto free_hdr;
929 	}
930 
931 	for (i = 0; i < used_blocks; i++) {
932 		int image_seq;
933 
934 		pnum = be32_to_cpu(fmsb->block_loc[i]);
935 
936 		if (ubi_io_is_bad(ubi, pnum)) {
937 			ret = UBI_BAD_FASTMAP;
938 			goto free_hdr;
939 		}
940 
941 		ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
942 		if (ret && ret != UBI_IO_BITFLIPS) {
943 			ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
944 				i, pnum);
945 			if (ret > 0)
946 				ret = UBI_BAD_FASTMAP;
947 			goto free_hdr;
948 		} else if (ret == UBI_IO_BITFLIPS)
949 			fm->to_be_tortured[i] = 1;
950 
951 		image_seq = be32_to_cpu(ech->image_seq);
952 		if (!ubi->image_seq)
953 			ubi->image_seq = image_seq;
954 
955 		/*
956 		 * Older UBI implementations have image_seq set to zero, so
957 		 * we shouldn't fail if image_seq == 0.
958 		 */
959 		if (image_seq && (image_seq != ubi->image_seq)) {
960 			ubi_err(ubi, "wrong image seq:%d instead of %d",
961 				be32_to_cpu(ech->image_seq), ubi->image_seq);
962 			ret = UBI_BAD_FASTMAP;
963 			goto free_hdr;
964 		}
965 
966 		ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
967 		if (ret && ret != UBI_IO_BITFLIPS) {
968 			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
969 				i, pnum);
970 			goto free_hdr;
971 		}
972 
973 		if (i == 0) {
974 			if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
975 				ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
976 					be32_to_cpu(vh->vol_id),
977 					UBI_FM_SB_VOLUME_ID);
978 				ret = UBI_BAD_FASTMAP;
979 				goto free_hdr;
980 			}
981 		} else {
982 			if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
983 				ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
984 					be32_to_cpu(vh->vol_id),
985 					UBI_FM_DATA_VOLUME_ID);
986 				ret = UBI_BAD_FASTMAP;
987 				goto free_hdr;
988 			}
989 		}
990 
991 		if (sqnum < be64_to_cpu(vh->sqnum))
992 			sqnum = be64_to_cpu(vh->sqnum);
993 
994 		ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
995 				  ubi->leb_start, ubi->leb_size);
996 		if (ret && ret != UBI_IO_BITFLIPS) {
997 			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
998 				"err: %i)", i, pnum, ret);
999 			goto free_hdr;
1000 		}
1001 	}
1002 
1003 	kfree(fmsb);
1004 	fmsb = NULL;
1005 
1006 	fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1007 	tmp_crc = be32_to_cpu(fmsb2->data_crc);
1008 	fmsb2->data_crc = 0;
1009 	crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1010 	if (crc != tmp_crc) {
1011 		ubi_err(ubi, "fastmap data CRC is invalid");
1012 		ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1013 			tmp_crc, crc);
1014 		ret = UBI_BAD_FASTMAP;
1015 		goto free_hdr;
1016 	}
1017 
1018 	fmsb2->sqnum = sqnum;
1019 
1020 	fm->used_blocks = used_blocks;
1021 
1022 	ret = ubi_attach_fastmap(ubi, ai, fm);
1023 	if (ret) {
1024 		if (ret > 0)
1025 			ret = UBI_BAD_FASTMAP;
1026 		goto free_hdr;
1027 	}
1028 
1029 	for (i = 0; i < used_blocks; i++) {
1030 		struct ubi_wl_entry *e;
1031 
1032 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1033 		if (!e) {
1034 			while (i--)
1035 				kfree(fm->e[i]);
1036 
1037 			ret = -ENOMEM;
1038 			goto free_hdr;
1039 		}
1040 
1041 		e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1042 		e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1043 		fm->e[i] = e;
1044 	}
1045 
1046 	ubi->fm = fm;
1047 	ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1048 	ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1049 	ubi_msg(ubi, "attached by fastmap");
1050 	ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1051 	ubi_msg(ubi, "fastmap WL pool size: %d",
1052 		ubi->fm_wl_pool.max_size);
1053 	ubi->fm_disabled = 0;
1054 	ubi->fast_attach = 1;
1055 
1056 	ubi_free_vid_hdr(ubi, vh);
1057 	kfree(ech);
1058 out:
1059 	up_write(&ubi->fm_protect);
1060 	if (ret == UBI_BAD_FASTMAP)
1061 		ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1062 	return ret;
1063 
1064 free_hdr:
1065 	ubi_free_vid_hdr(ubi, vh);
1066 	kfree(ech);
1067 free_fm_sb:
1068 	kfree(fmsb);
1069 	kfree(fm);
1070 	goto out;
1071 }
1072 
1073 /**
1074  * ubi_write_fastmap - writes a fastmap.
1075  * @ubi: UBI device object
1076  * @new_fm: the to be written fastmap
1077  *
1078  * Returns 0 on success, < 0 indicates an internal error.
1079  */
ubi_write_fastmap(struct ubi_device * ubi,struct ubi_fastmap_layout * new_fm)1080 static int ubi_write_fastmap(struct ubi_device *ubi,
1081 			     struct ubi_fastmap_layout *new_fm)
1082 {
1083 	size_t fm_pos = 0;
1084 	void *fm_raw;
1085 	struct ubi_fm_sb *fmsb;
1086 	struct ubi_fm_hdr *fmh;
1087 	struct ubi_fm_scan_pool *fmpl1, *fmpl2;
1088 	struct ubi_fm_ec *fec;
1089 	struct ubi_fm_volhdr *fvh;
1090 	struct ubi_fm_eba *feba;
1091 	struct ubi_wl_entry *wl_e;
1092 	struct ubi_volume *vol;
1093 	struct ubi_vid_hdr *avhdr, *dvhdr;
1094 	struct ubi_work *ubi_wrk;
1095 	struct rb_node *tmp_rb;
1096 	int ret, i, j, free_peb_count, used_peb_count, vol_count;
1097 	int scrub_peb_count, erase_peb_count;
1098 	int *seen_pebs = NULL;
1099 
1100 	fm_raw = ubi->fm_buf;
1101 	memset(ubi->fm_buf, 0, ubi->fm_size);
1102 
1103 	avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1104 	if (!avhdr) {
1105 		ret = -ENOMEM;
1106 		goto out;
1107 	}
1108 
1109 	dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1110 	if (!dvhdr) {
1111 		ret = -ENOMEM;
1112 		goto out_kfree;
1113 	}
1114 
1115 	seen_pebs = init_seen(ubi);
1116 	if (IS_ERR(seen_pebs)) {
1117 		ret = PTR_ERR(seen_pebs);
1118 		goto out_kfree;
1119 	}
1120 
1121 	spin_lock(&ubi->volumes_lock);
1122 	spin_lock(&ubi->wl_lock);
1123 
1124 	fmsb = (struct ubi_fm_sb *)fm_raw;
1125 	fm_pos += sizeof(*fmsb);
1126 	ubi_assert(fm_pos <= ubi->fm_size);
1127 
1128 	fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1129 	fm_pos += sizeof(*fmh);
1130 	ubi_assert(fm_pos <= ubi->fm_size);
1131 
1132 	fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1133 	fmsb->version = UBI_FM_FMT_VERSION;
1134 	fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1135 	/* the max sqnum will be filled in while *reading* the fastmap */
1136 	fmsb->sqnum = 0;
1137 
1138 	fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1139 	free_peb_count = 0;
1140 	used_peb_count = 0;
1141 	scrub_peb_count = 0;
1142 	erase_peb_count = 0;
1143 	vol_count = 0;
1144 
1145 	fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1146 	fm_pos += sizeof(*fmpl1);
1147 	fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1148 	fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
1149 	fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1150 
1151 	for (i = 0; i < ubi->fm_pool.size; i++) {
1152 		fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1153 		set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1154 	}
1155 
1156 	fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1157 	fm_pos += sizeof(*fmpl2);
1158 	fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1159 	fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
1160 	fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1161 
1162 	for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1163 		fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1164 		set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1165 	}
1166 
1167 	ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1168 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1169 
1170 		fec->pnum = cpu_to_be32(wl_e->pnum);
1171 		set_seen(ubi, wl_e->pnum, seen_pebs);
1172 		fec->ec = cpu_to_be32(wl_e->ec);
1173 
1174 		free_peb_count++;
1175 		fm_pos += sizeof(*fec);
1176 		ubi_assert(fm_pos <= ubi->fm_size);
1177 	}
1178 	fmh->free_peb_count = cpu_to_be32(free_peb_count);
1179 
1180 	ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1181 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1182 
1183 		fec->pnum = cpu_to_be32(wl_e->pnum);
1184 		set_seen(ubi, wl_e->pnum, seen_pebs);
1185 		fec->ec = cpu_to_be32(wl_e->ec);
1186 
1187 		used_peb_count++;
1188 		fm_pos += sizeof(*fec);
1189 		ubi_assert(fm_pos <= ubi->fm_size);
1190 	}
1191 
1192 	ubi_for_each_protected_peb(ubi, i, wl_e) {
1193 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1194 
1195 		fec->pnum = cpu_to_be32(wl_e->pnum);
1196 		set_seen(ubi, wl_e->pnum, seen_pebs);
1197 		fec->ec = cpu_to_be32(wl_e->ec);
1198 
1199 		used_peb_count++;
1200 		fm_pos += sizeof(*fec);
1201 		ubi_assert(fm_pos <= ubi->fm_size);
1202 	}
1203 	fmh->used_peb_count = cpu_to_be32(used_peb_count);
1204 
1205 	ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1206 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1207 
1208 		fec->pnum = cpu_to_be32(wl_e->pnum);
1209 		set_seen(ubi, wl_e->pnum, seen_pebs);
1210 		fec->ec = cpu_to_be32(wl_e->ec);
1211 
1212 		scrub_peb_count++;
1213 		fm_pos += sizeof(*fec);
1214 		ubi_assert(fm_pos <= ubi->fm_size);
1215 	}
1216 	fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1217 
1218 
1219 	list_for_each_entry(ubi_wrk, &ubi->works, list) {
1220 		if (ubi_is_erase_work(ubi_wrk)) {
1221 			wl_e = ubi_wrk->e;
1222 			ubi_assert(wl_e);
1223 
1224 			fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1225 
1226 			fec->pnum = cpu_to_be32(wl_e->pnum);
1227 			set_seen(ubi, wl_e->pnum, seen_pebs);
1228 			fec->ec = cpu_to_be32(wl_e->ec);
1229 
1230 			erase_peb_count++;
1231 			fm_pos += sizeof(*fec);
1232 			ubi_assert(fm_pos <= ubi->fm_size);
1233 		}
1234 	}
1235 	fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1236 
1237 	for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1238 		vol = ubi->volumes[i];
1239 
1240 		if (!vol)
1241 			continue;
1242 
1243 		vol_count++;
1244 
1245 		fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1246 		fm_pos += sizeof(*fvh);
1247 		ubi_assert(fm_pos <= ubi->fm_size);
1248 
1249 		fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1250 		fvh->vol_id = cpu_to_be32(vol->vol_id);
1251 		fvh->vol_type = vol->vol_type;
1252 		fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1253 		fvh->data_pad = cpu_to_be32(vol->data_pad);
1254 		fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1255 
1256 		ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1257 			vol->vol_type == UBI_STATIC_VOLUME);
1258 
1259 		feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1260 		fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1261 		ubi_assert(fm_pos <= ubi->fm_size);
1262 
1263 		for (j = 0; j < vol->reserved_pebs; j++)
1264 			feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1265 
1266 		feba->reserved_pebs = cpu_to_be32(j);
1267 		feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1268 	}
1269 	fmh->vol_count = cpu_to_be32(vol_count);
1270 	fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1271 
1272 	avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1273 	avhdr->lnum = 0;
1274 
1275 	spin_unlock(&ubi->wl_lock);
1276 	spin_unlock(&ubi->volumes_lock);
1277 
1278 	dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1279 	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1280 	if (ret) {
1281 		ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1282 		goto out_kfree;
1283 	}
1284 
1285 	for (i = 0; i < new_fm->used_blocks; i++) {
1286 		fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1287 		set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1288 		fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1289 	}
1290 
1291 	fmsb->data_crc = 0;
1292 	fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1293 					   ubi->fm_size));
1294 
1295 	for (i = 1; i < new_fm->used_blocks; i++) {
1296 		dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1297 		dvhdr->lnum = cpu_to_be32(i);
1298 		dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1299 			new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1300 		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1301 		if (ret) {
1302 			ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1303 				new_fm->e[i]->pnum);
1304 			goto out_kfree;
1305 		}
1306 	}
1307 
1308 	for (i = 0; i < new_fm->used_blocks; i++) {
1309 		ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1310 			new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1311 		if (ret) {
1312 			ubi_err(ubi, "unable to write fastmap to PEB %i!",
1313 				new_fm->e[i]->pnum);
1314 			goto out_kfree;
1315 		}
1316 	}
1317 
1318 	ubi_assert(new_fm);
1319 	ubi->fm = new_fm;
1320 
1321 	ret = self_check_seen(ubi, seen_pebs);
1322 	dbg_bld("fastmap written!");
1323 
1324 out_kfree:
1325 	ubi_free_vid_hdr(ubi, avhdr);
1326 	ubi_free_vid_hdr(ubi, dvhdr);
1327 	free_seen(seen_pebs);
1328 out:
1329 	return ret;
1330 }
1331 
1332 /**
1333  * erase_block - Manually erase a PEB.
1334  * @ubi: UBI device object
1335  * @pnum: PEB to be erased
1336  *
1337  * Returns the new EC value on success, < 0 indicates an internal error.
1338  */
erase_block(struct ubi_device * ubi,int pnum)1339 static int erase_block(struct ubi_device *ubi, int pnum)
1340 {
1341 	int ret;
1342 	struct ubi_ec_hdr *ec_hdr;
1343 	long long ec;
1344 
1345 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1346 	if (!ec_hdr)
1347 		return -ENOMEM;
1348 
1349 	ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1350 	if (ret < 0)
1351 		goto out;
1352 	else if (ret && ret != UBI_IO_BITFLIPS) {
1353 		ret = -EINVAL;
1354 		goto out;
1355 	}
1356 
1357 	ret = ubi_io_sync_erase(ubi, pnum, 0);
1358 	if (ret < 0)
1359 		goto out;
1360 
1361 	ec = be64_to_cpu(ec_hdr->ec);
1362 	ec += ret;
1363 	if (ec > UBI_MAX_ERASECOUNTER) {
1364 		ret = -EINVAL;
1365 		goto out;
1366 	}
1367 
1368 	ec_hdr->ec = cpu_to_be64(ec);
1369 	ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1370 	if (ret < 0)
1371 		goto out;
1372 
1373 	ret = ec;
1374 out:
1375 	kfree(ec_hdr);
1376 	return ret;
1377 }
1378 
1379 /**
1380  * invalidate_fastmap - destroys a fastmap.
1381  * @ubi: UBI device object
1382  *
1383  * This function ensures that upon next UBI attach a full scan
1384  * is issued. We need this if UBI is about to write a new fastmap
1385  * but is unable to do so. In this case we have two options:
1386  * a) Make sure that the current fastmap will not be usued upon
1387  * attach time and contine or b) fall back to RO mode to have the
1388  * current fastmap in a valid state.
1389  * Returns 0 on success, < 0 indicates an internal error.
1390  */
invalidate_fastmap(struct ubi_device * ubi)1391 static int invalidate_fastmap(struct ubi_device *ubi)
1392 {
1393 	int ret;
1394 	struct ubi_fastmap_layout *fm;
1395 	struct ubi_wl_entry *e;
1396 	struct ubi_vid_hdr *vh = NULL;
1397 
1398 	if (!ubi->fm)
1399 		return 0;
1400 
1401 	ubi->fm = NULL;
1402 
1403 	ret = -ENOMEM;
1404 	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1405 	if (!fm)
1406 		goto out;
1407 
1408 	vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1409 	if (!vh)
1410 		goto out_free_fm;
1411 
1412 	ret = -ENOSPC;
1413 	e = ubi_wl_get_fm_peb(ubi, 1);
1414 	if (!e)
1415 		goto out_free_fm;
1416 
1417 	/*
1418 	 * Create fake fastmap such that UBI will fall back
1419 	 * to scanning mode.
1420 	 */
1421 	vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1422 	ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
1423 	if (ret < 0) {
1424 		ubi_wl_put_fm_peb(ubi, e, 0, 0);
1425 		goto out_free_fm;
1426 	}
1427 
1428 	fm->used_blocks = 1;
1429 	fm->e[0] = e;
1430 
1431 	ubi->fm = fm;
1432 
1433 out:
1434 	ubi_free_vid_hdr(ubi, vh);
1435 	return ret;
1436 
1437 out_free_fm:
1438 	kfree(fm);
1439 	goto out;
1440 }
1441 
1442 /**
1443  * return_fm_pebs - returns all PEBs used by a fastmap back to the
1444  * WL sub-system.
1445  * @ubi: UBI device object
1446  * @fm: fastmap layout object
1447  */
return_fm_pebs(struct ubi_device * ubi,struct ubi_fastmap_layout * fm)1448 static void return_fm_pebs(struct ubi_device *ubi,
1449 			   struct ubi_fastmap_layout *fm)
1450 {
1451 	int i;
1452 
1453 	if (!fm)
1454 		return;
1455 
1456 	for (i = 0; i < fm->used_blocks; i++) {
1457 		if (fm->e[i]) {
1458 			ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1459 					  fm->to_be_tortured[i]);
1460 			fm->e[i] = NULL;
1461 		}
1462 	}
1463 }
1464 
1465 /**
1466  * ubi_update_fastmap - will be called by UBI if a volume changes or
1467  * a fastmap pool becomes full.
1468  * @ubi: UBI device object
1469  *
1470  * Returns 0 on success, < 0 indicates an internal error.
1471  */
ubi_update_fastmap(struct ubi_device * ubi)1472 int ubi_update_fastmap(struct ubi_device *ubi)
1473 {
1474 	int ret, i, j;
1475 	struct ubi_fastmap_layout *new_fm, *old_fm;
1476 	struct ubi_wl_entry *tmp_e;
1477 
1478 	down_write(&ubi->fm_protect);
1479 
1480 	ubi_refill_pools(ubi);
1481 
1482 	if (ubi->ro_mode || ubi->fm_disabled) {
1483 		up_write(&ubi->fm_protect);
1484 		return 0;
1485 	}
1486 
1487 	ret = ubi_ensure_anchor_pebs(ubi);
1488 	if (ret) {
1489 		up_write(&ubi->fm_protect);
1490 		return ret;
1491 	}
1492 
1493 	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1494 	if (!new_fm) {
1495 		up_write(&ubi->fm_protect);
1496 		return -ENOMEM;
1497 	}
1498 
1499 	new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1500 	old_fm = ubi->fm;
1501 	ubi->fm = NULL;
1502 
1503 	if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1504 		ubi_err(ubi, "fastmap too large");
1505 		ret = -ENOSPC;
1506 		goto err;
1507 	}
1508 
1509 	for (i = 1; i < new_fm->used_blocks; i++) {
1510 		spin_lock(&ubi->wl_lock);
1511 		tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1512 		spin_unlock(&ubi->wl_lock);
1513 
1514 		if (!tmp_e) {
1515 			if (old_fm && old_fm->e[i]) {
1516 				ret = erase_block(ubi, old_fm->e[i]->pnum);
1517 				if (ret < 0) {
1518 					ubi_err(ubi, "could not erase old fastmap PEB");
1519 
1520 					for (j = 1; j < i; j++) {
1521 						ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1522 								  j, 0);
1523 						new_fm->e[j] = NULL;
1524 					}
1525 					goto err;
1526 				}
1527 				new_fm->e[i] = old_fm->e[i];
1528 				old_fm->e[i] = NULL;
1529 			} else {
1530 				ubi_err(ubi, "could not get any free erase block");
1531 
1532 				for (j = 1; j < i; j++) {
1533 					ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1534 					new_fm->e[j] = NULL;
1535 				}
1536 
1537 				ret = -ENOSPC;
1538 				goto err;
1539 			}
1540 		} else {
1541 			new_fm->e[i] = tmp_e;
1542 
1543 			if (old_fm && old_fm->e[i]) {
1544 				ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1545 						  old_fm->to_be_tortured[i]);
1546 				old_fm->e[i] = NULL;
1547 			}
1548 		}
1549 	}
1550 
1551 	/* Old fastmap is larger than the new one */
1552 	if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1553 		for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1554 			ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1555 					  old_fm->to_be_tortured[i]);
1556 			old_fm->e[i] = NULL;
1557 		}
1558 	}
1559 
1560 	spin_lock(&ubi->wl_lock);
1561 	tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1562 	spin_unlock(&ubi->wl_lock);
1563 
1564 	if (old_fm) {
1565 		/* no fresh anchor PEB was found, reuse the old one */
1566 		if (!tmp_e) {
1567 			ret = erase_block(ubi, old_fm->e[0]->pnum);
1568 			if (ret < 0) {
1569 				ubi_err(ubi, "could not erase old anchor PEB");
1570 
1571 				for (i = 1; i < new_fm->used_blocks; i++) {
1572 					ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1573 							  i, 0);
1574 					new_fm->e[i] = NULL;
1575 				}
1576 				goto err;
1577 			}
1578 			new_fm->e[0] = old_fm->e[0];
1579 			new_fm->e[0]->ec = ret;
1580 			old_fm->e[0] = NULL;
1581 		} else {
1582 			/* we've got a new anchor PEB, return the old one */
1583 			ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1584 					  old_fm->to_be_tortured[0]);
1585 			new_fm->e[0] = tmp_e;
1586 			old_fm->e[0] = NULL;
1587 		}
1588 	} else {
1589 		if (!tmp_e) {
1590 			ubi_err(ubi, "could not find any anchor PEB");
1591 
1592 			for (i = 1; i < new_fm->used_blocks; i++) {
1593 				ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1594 				new_fm->e[i] = NULL;
1595 			}
1596 
1597 			ret = -ENOSPC;
1598 			goto err;
1599 		}
1600 		new_fm->e[0] = tmp_e;
1601 	}
1602 
1603 	down_write(&ubi->work_sem);
1604 	down_write(&ubi->fm_eba_sem);
1605 	ret = ubi_write_fastmap(ubi, new_fm);
1606 	up_write(&ubi->fm_eba_sem);
1607 	up_write(&ubi->work_sem);
1608 
1609 	if (ret)
1610 		goto err;
1611 
1612 out_unlock:
1613 	up_write(&ubi->fm_protect);
1614 	kfree(old_fm);
1615 	return ret;
1616 
1617 err:
1618 	ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1619 
1620 	ret = invalidate_fastmap(ubi);
1621 	if (ret < 0) {
1622 		ubi_err(ubi, "Unable to invalidiate current fastmap!");
1623 		ubi_ro_mode(ubi);
1624 	} else {
1625 		return_fm_pebs(ubi, old_fm);
1626 		return_fm_pebs(ubi, new_fm);
1627 		ret = 0;
1628 	}
1629 
1630 	kfree(new_fm);
1631 	goto out_unlock;
1632 }
1633