1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21/*
22 * The UBI Eraseblock Association (EBA) sub-system.
23 *
24 * This sub-system is responsible for I/O to/from logical eraseblock.
25 *
26 * Although in this implementation the EBA table is fully kept and managed in
27 * RAM, which assumes poor scalability, it might be (partially) maintained on
28 * flash in future implementations.
29 *
30 * The EBA sub-system implements per-logical eraseblock locking. Before
31 * accessing a logical eraseblock it is locked for reading or writing. The
32 * per-logical eraseblock locking is implemented by means of the lock tree. The
33 * lock tree is an RB-tree which refers all the currently locked logical
34 * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
35 * They are indexed by (@vol_id, @lnum) pairs.
36 *
37 * EBA also maintains the global sequence counter which is incremented each
38 * time a logical eraseblock is mapped to a physical eraseblock and it is
39 * stored in the volume identifier header. This means that each VID header has
40 * a unique sequence number. The sequence number is only increased an we assume
41 * 64 bits is enough to never overflow.
42 */
43
44#include <linux/slab.h>
45#include <linux/crc32.h>
46#include <linux/err.h>
47#include "ubi.h"
48
49/* Number of physical eraseblocks reserved for atomic LEB change operation */
50#define EBA_RESERVED_PEBS 1
51
52/**
53 * next_sqnum - get next sequence number.
54 * @ubi: UBI device description object
55 *
56 * This function returns next sequence number to use, which is just the current
57 * global sequence counter value. It also increases the global sequence
58 * counter.
59 */
60unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
61{
62	unsigned long long sqnum;
63
64	spin_lock(&ubi->ltree_lock);
65	sqnum = ubi->global_sqnum++;
66	spin_unlock(&ubi->ltree_lock);
67
68	return sqnum;
69}
70
71/**
72 * ubi_get_compat - get compatibility flags of a volume.
73 * @ubi: UBI device description object
74 * @vol_id: volume ID
75 *
76 * This function returns compatibility flags for an internal volume. User
77 * volumes have no compatibility flags, so %0 is returned.
78 */
79static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
80{
81	if (vol_id == UBI_LAYOUT_VOLUME_ID)
82		return UBI_LAYOUT_VOLUME_COMPAT;
83	return 0;
84}
85
86/**
87 * ltree_lookup - look up the lock tree.
88 * @ubi: UBI device description object
89 * @vol_id: volume ID
90 * @lnum: logical eraseblock number
91 *
92 * This function returns a pointer to the corresponding &struct ubi_ltree_entry
93 * object if the logical eraseblock is locked and %NULL if it is not.
94 * @ubi->ltree_lock has to be locked.
95 */
96static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
97					    int lnum)
98{
99	struct rb_node *p;
100
101	p = ubi->ltree.rb_node;
102	while (p) {
103		struct ubi_ltree_entry *le;
104
105		le = rb_entry(p, struct ubi_ltree_entry, rb);
106
107		if (vol_id < le->vol_id)
108			p = p->rb_left;
109		else if (vol_id > le->vol_id)
110			p = p->rb_right;
111		else {
112			if (lnum < le->lnum)
113				p = p->rb_left;
114			else if (lnum > le->lnum)
115				p = p->rb_right;
116			else
117				return le;
118		}
119	}
120
121	return NULL;
122}
123
124/**
125 * ltree_add_entry - add new entry to the lock tree.
126 * @ubi: UBI device description object
127 * @vol_id: volume ID
128 * @lnum: logical eraseblock number
129 *
130 * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
131 * lock tree. If such entry is already there, its usage counter is increased.
132 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
133 * failed.
134 */
135static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
136					       int vol_id, int lnum)
137{
138	struct ubi_ltree_entry *le, *le1, *le_free;
139
140	le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
141	if (!le)
142		return ERR_PTR(-ENOMEM);
143
144	le->users = 0;
145	init_rwsem(&le->mutex);
146	le->vol_id = vol_id;
147	le->lnum = lnum;
148
149	spin_lock(&ubi->ltree_lock);
150	le1 = ltree_lookup(ubi, vol_id, lnum);
151
152	if (le1) {
153		/*
154		 * This logical eraseblock is already locked. The newly
155		 * allocated lock entry is not needed.
156		 */
157		le_free = le;
158		le = le1;
159	} else {
160		struct rb_node **p, *parent = NULL;
161
162		/*
163		 * No lock entry, add the newly allocated one to the
164		 * @ubi->ltree RB-tree.
165		 */
166		le_free = NULL;
167
168		p = &ubi->ltree.rb_node;
169		while (*p) {
170			parent = *p;
171			le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
172
173			if (vol_id < le1->vol_id)
174				p = &(*p)->rb_left;
175			else if (vol_id > le1->vol_id)
176				p = &(*p)->rb_right;
177			else {
178				ubi_assert(lnum != le1->lnum);
179				if (lnum < le1->lnum)
180					p = &(*p)->rb_left;
181				else
182					p = &(*p)->rb_right;
183			}
184		}
185
186		rb_link_node(&le->rb, parent, p);
187		rb_insert_color(&le->rb, &ubi->ltree);
188	}
189	le->users += 1;
190	spin_unlock(&ubi->ltree_lock);
191
192	kfree(le_free);
193	return le;
194}
195
196/**
197 * leb_read_lock - lock logical eraseblock for reading.
198 * @ubi: UBI device description object
199 * @vol_id: volume ID
200 * @lnum: logical eraseblock number
201 *
202 * This function locks a logical eraseblock for reading. Returns zero in case
203 * of success and a negative error code in case of failure.
204 */
205static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
206{
207	struct ubi_ltree_entry *le;
208
209	le = ltree_add_entry(ubi, vol_id, lnum);
210	if (IS_ERR(le))
211		return PTR_ERR(le);
212	down_read(&le->mutex);
213	return 0;
214}
215
216/**
217 * leb_read_unlock - unlock logical eraseblock.
218 * @ubi: UBI device description object
219 * @vol_id: volume ID
220 * @lnum: logical eraseblock number
221 */
222static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
223{
224	struct ubi_ltree_entry *le;
225
226	spin_lock(&ubi->ltree_lock);
227	le = ltree_lookup(ubi, vol_id, lnum);
228	le->users -= 1;
229	ubi_assert(le->users >= 0);
230	up_read(&le->mutex);
231	if (le->users == 0) {
232		rb_erase(&le->rb, &ubi->ltree);
233		kfree(le);
234	}
235	spin_unlock(&ubi->ltree_lock);
236}
237
238/**
239 * leb_write_lock - lock logical eraseblock for writing.
240 * @ubi: UBI device description object
241 * @vol_id: volume ID
242 * @lnum: logical eraseblock number
243 *
244 * This function locks a logical eraseblock for writing. Returns zero in case
245 * of success and a negative error code in case of failure.
246 */
247static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
248{
249	struct ubi_ltree_entry *le;
250
251	le = ltree_add_entry(ubi, vol_id, lnum);
252	if (IS_ERR(le))
253		return PTR_ERR(le);
254	down_write(&le->mutex);
255	return 0;
256}
257
258/**
259 * leb_write_lock - lock logical eraseblock for writing.
260 * @ubi: UBI device description object
261 * @vol_id: volume ID
262 * @lnum: logical eraseblock number
263 *
264 * This function locks a logical eraseblock for writing if there is no
265 * contention and does nothing if there is contention. Returns %0 in case of
266 * success, %1 in case of contention, and and a negative error code in case of
267 * failure.
268 */
269static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
270{
271	struct ubi_ltree_entry *le;
272
273	le = ltree_add_entry(ubi, vol_id, lnum);
274	if (IS_ERR(le))
275		return PTR_ERR(le);
276	if (down_write_trylock(&le->mutex))
277		return 0;
278
279	/* Contention, cancel */
280	spin_lock(&ubi->ltree_lock);
281	le->users -= 1;
282	ubi_assert(le->users >= 0);
283	if (le->users == 0) {
284		rb_erase(&le->rb, &ubi->ltree);
285		kfree(le);
286	}
287	spin_unlock(&ubi->ltree_lock);
288
289	return 1;
290}
291
292/**
293 * leb_write_unlock - unlock logical eraseblock.
294 * @ubi: UBI device description object
295 * @vol_id: volume ID
296 * @lnum: logical eraseblock number
297 */
298static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
299{
300	struct ubi_ltree_entry *le;
301
302	spin_lock(&ubi->ltree_lock);
303	le = ltree_lookup(ubi, vol_id, lnum);
304	le->users -= 1;
305	ubi_assert(le->users >= 0);
306	up_write(&le->mutex);
307	if (le->users == 0) {
308		rb_erase(&le->rb, &ubi->ltree);
309		kfree(le);
310	}
311	spin_unlock(&ubi->ltree_lock);
312}
313
314/**
315 * ubi_eba_unmap_leb - un-map logical eraseblock.
316 * @ubi: UBI device description object
317 * @vol: volume description object
318 * @lnum: logical eraseblock number
319 *
320 * This function un-maps logical eraseblock @lnum and schedules corresponding
321 * physical eraseblock for erasure. Returns zero in case of success and a
322 * negative error code in case of failure.
323 */
324int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
325		      int lnum)
326{
327	int err, pnum, vol_id = vol->vol_id;
328
329	if (ubi->ro_mode)
330		return -EROFS;
331
332	err = leb_write_lock(ubi, vol_id, lnum);
333	if (err)
334		return err;
335
336	pnum = vol->eba_tbl[lnum];
337	if (pnum < 0)
338		/* This logical eraseblock is already unmapped */
339		goto out_unlock;
340
341	dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
342
343	down_read(&ubi->fm_eba_sem);
344	vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
345	up_read(&ubi->fm_eba_sem);
346	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
347
348out_unlock:
349	leb_write_unlock(ubi, vol_id, lnum);
350	return err;
351}
352
353/**
354 * ubi_eba_read_leb - read data.
355 * @ubi: UBI device description object
356 * @vol: volume description object
357 * @lnum: logical eraseblock number
358 * @buf: buffer to store the read data
359 * @offset: offset from where to read
360 * @len: how many bytes to read
361 * @check: data CRC check flag
362 *
363 * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
364 * bytes. The @check flag only makes sense for static volumes and forces
365 * eraseblock data CRC checking.
366 *
367 * In case of success this function returns zero. In case of a static volume,
368 * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
369 * returned for any volume type if an ECC error was detected by the MTD device
370 * driver. Other negative error cored may be returned in case of other errors.
371 */
372int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
373		     void *buf, int offset, int len, int check)
374{
375	int err, pnum, scrub = 0, vol_id = vol->vol_id;
376	struct ubi_vid_hdr *vid_hdr;
377	uint32_t uninitialized_var(crc);
378
379	err = leb_read_lock(ubi, vol_id, lnum);
380	if (err)
381		return err;
382
383	pnum = vol->eba_tbl[lnum];
384	if (pnum < 0) {
385		/*
386		 * The logical eraseblock is not mapped, fill the whole buffer
387		 * with 0xFF bytes. The exception is static volumes for which
388		 * it is an error to read unmapped logical eraseblocks.
389		 */
390		dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
391			len, offset, vol_id, lnum);
392		leb_read_unlock(ubi, vol_id, lnum);
393		ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
394		memset(buf, 0xFF, len);
395		return 0;
396	}
397
398	dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
399		len, offset, vol_id, lnum, pnum);
400
401	if (vol->vol_type == UBI_DYNAMIC_VOLUME)
402		check = 0;
403
404retry:
405	if (check) {
406		vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
407		if (!vid_hdr) {
408			err = -ENOMEM;
409			goto out_unlock;
410		}
411
412		err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
413		if (err && err != UBI_IO_BITFLIPS) {
414			if (err > 0) {
415				/*
416				 * The header is either absent or corrupted.
417				 * The former case means there is a bug -
418				 * switch to read-only mode just in case.
419				 * The latter case means a real corruption - we
420				 * may try to recover data. FIXME: but this is
421				 * not implemented.
422				 */
423				if (err == UBI_IO_BAD_HDR_EBADMSG ||
424				    err == UBI_IO_BAD_HDR) {
425					ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
426						 pnum, vol_id, lnum);
427					err = -EBADMSG;
428				} else {
429					/*
430					 * Ending up here in the non-Fastmap case
431					 * is a clear bug as the VID header had to
432					 * be present at scan time to have it referenced.
433					 * With fastmap the story is more complicated.
434					 * Fastmap has the mapping info without the need
435					 * of a full scan. So the LEB could have been
436					 * unmapped, Fastmap cannot know this and keeps
437					 * the LEB referenced.
438					 * This is valid and works as the layer above UBI
439					 * has to do bookkeeping about used/referenced
440					 * LEBs in any case.
441					 */
442					if (ubi->fast_attach) {
443						err = -EBADMSG;
444					} else {
445						err = -EINVAL;
446						ubi_ro_mode(ubi);
447					}
448				}
449			}
450			goto out_free;
451		} else if (err == UBI_IO_BITFLIPS)
452			scrub = 1;
453
454		ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
455		ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
456
457		crc = be32_to_cpu(vid_hdr->data_crc);
458		ubi_free_vid_hdr(ubi, vid_hdr);
459	}
460
461	err = ubi_io_read_data(ubi, buf, pnum, offset, len);
462	if (err) {
463		if (err == UBI_IO_BITFLIPS)
464			scrub = 1;
465		else if (mtd_is_eccerr(err)) {
466			if (vol->vol_type == UBI_DYNAMIC_VOLUME)
467				goto out_unlock;
468			scrub = 1;
469			if (!check) {
470				ubi_msg(ubi, "force data checking");
471				check = 1;
472				goto retry;
473			}
474		} else
475			goto out_unlock;
476	}
477
478	if (check) {
479		uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
480		if (crc1 != crc) {
481			ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
482				 crc1, crc);
483			err = -EBADMSG;
484			goto out_unlock;
485		}
486	}
487
488	if (scrub)
489		err = ubi_wl_scrub_peb(ubi, pnum);
490
491	leb_read_unlock(ubi, vol_id, lnum);
492	return err;
493
494out_free:
495	ubi_free_vid_hdr(ubi, vid_hdr);
496out_unlock:
497	leb_read_unlock(ubi, vol_id, lnum);
498	return err;
499}
500
501/**
502 * ubi_eba_read_leb_sg - read data into a scatter gather list.
503 * @ubi: UBI device description object
504 * @vol: volume description object
505 * @lnum: logical eraseblock number
506 * @sgl: UBI scatter gather list to store the read data
507 * @offset: offset from where to read
508 * @len: how many bytes to read
509 * @check: data CRC check flag
510 *
511 * This function works exactly like ubi_eba_read_leb(). But instead of
512 * storing the read data into a buffer it writes to an UBI scatter gather
513 * list.
514 */
515int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
516			struct ubi_sgl *sgl, int lnum, int offset, int len,
517			int check)
518{
519	int to_read;
520	int ret;
521	struct scatterlist *sg;
522
523	for (;;) {
524		ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
525		sg = &sgl->sg[sgl->list_pos];
526		if (len < sg->length - sgl->page_pos)
527			to_read = len;
528		else
529			to_read = sg->length - sgl->page_pos;
530
531		ret = ubi_eba_read_leb(ubi, vol, lnum,
532				       sg_virt(sg) + sgl->page_pos, offset,
533				       to_read, check);
534		if (ret < 0)
535			return ret;
536
537		offset += to_read;
538		len -= to_read;
539		if (!len) {
540			sgl->page_pos += to_read;
541			if (sgl->page_pos == sg->length) {
542				sgl->list_pos++;
543				sgl->page_pos = 0;
544			}
545
546			break;
547		}
548
549		sgl->list_pos++;
550		sgl->page_pos = 0;
551	}
552
553	return ret;
554}
555
556/**
557 * recover_peb - recover from write failure.
558 * @ubi: UBI device description object
559 * @pnum: the physical eraseblock to recover
560 * @vol_id: volume ID
561 * @lnum: logical eraseblock number
562 * @buf: data which was not written because of the write failure
563 * @offset: offset of the failed write
564 * @len: how many bytes should have been written
565 *
566 * This function is called in case of a write failure and moves all good data
567 * from the potentially bad physical eraseblock to a good physical eraseblock.
568 * This function also writes the data which was not written due to the failure.
569 * Returns new physical eraseblock number in case of success, and a negative
570 * error code in case of failure.
571 */
572static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
573		       const void *buf, int offset, int len)
574{
575	int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
576	struct ubi_volume *vol = ubi->volumes[idx];
577	struct ubi_vid_hdr *vid_hdr;
578
579	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
580	if (!vid_hdr)
581		return -ENOMEM;
582
583retry:
584	new_pnum = ubi_wl_get_peb(ubi);
585	if (new_pnum < 0) {
586		ubi_free_vid_hdr(ubi, vid_hdr);
587		up_read(&ubi->fm_eba_sem);
588		return new_pnum;
589	}
590
591	ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
592		pnum, new_pnum);
593
594	err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
595	if (err && err != UBI_IO_BITFLIPS) {
596		if (err > 0)
597			err = -EIO;
598		up_read(&ubi->fm_eba_sem);
599		goto out_put;
600	}
601
602	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
603	err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
604	if (err) {
605		up_read(&ubi->fm_eba_sem);
606		goto write_error;
607	}
608
609	data_size = offset + len;
610	mutex_lock(&ubi->buf_mutex);
611	memset(ubi->peb_buf + offset, 0xFF, len);
612
613	/* Read everything before the area where the write failure happened */
614	if (offset > 0) {
615		err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
616		if (err && err != UBI_IO_BITFLIPS) {
617			up_read(&ubi->fm_eba_sem);
618			goto out_unlock;
619		}
620	}
621
622	memcpy(ubi->peb_buf + offset, buf, len);
623
624	err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
625	if (err) {
626		mutex_unlock(&ubi->buf_mutex);
627		up_read(&ubi->fm_eba_sem);
628		goto write_error;
629	}
630
631	mutex_unlock(&ubi->buf_mutex);
632	ubi_free_vid_hdr(ubi, vid_hdr);
633
634	vol->eba_tbl[lnum] = new_pnum;
635	up_read(&ubi->fm_eba_sem);
636	ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
637
638	ubi_msg(ubi, "data was successfully recovered");
639	return 0;
640
641out_unlock:
642	mutex_unlock(&ubi->buf_mutex);
643out_put:
644	ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
645	ubi_free_vid_hdr(ubi, vid_hdr);
646	return err;
647
648write_error:
649	/*
650	 * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
651	 * get another one.
652	 */
653	ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
654	ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
655	if (++tries > UBI_IO_RETRIES) {
656		ubi_free_vid_hdr(ubi, vid_hdr);
657		return err;
658	}
659	ubi_msg(ubi, "try again");
660	goto retry;
661}
662
663/**
664 * ubi_eba_write_leb - write data to dynamic volume.
665 * @ubi: UBI device description object
666 * @vol: volume description object
667 * @lnum: logical eraseblock number
668 * @buf: the data to write
669 * @offset: offset within the logical eraseblock where to write
670 * @len: how many bytes to write
671 *
672 * This function writes data to logical eraseblock @lnum of a dynamic volume
673 * @vol. Returns zero in case of success and a negative error code in case
674 * of failure. In case of error, it is possible that something was still
675 * written to the flash media, but may be some garbage.
676 */
677int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
678		      const void *buf, int offset, int len)
679{
680	int err, pnum, tries = 0, vol_id = vol->vol_id;
681	struct ubi_vid_hdr *vid_hdr;
682
683	if (ubi->ro_mode)
684		return -EROFS;
685
686	err = leb_write_lock(ubi, vol_id, lnum);
687	if (err)
688		return err;
689
690	pnum = vol->eba_tbl[lnum];
691	if (pnum >= 0) {
692		dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
693			len, offset, vol_id, lnum, pnum);
694
695		err = ubi_io_write_data(ubi, buf, pnum, offset, len);
696		if (err) {
697			ubi_warn(ubi, "failed to write data to PEB %d", pnum);
698			if (err == -EIO && ubi->bad_allowed)
699				err = recover_peb(ubi, pnum, vol_id, lnum, buf,
700						  offset, len);
701			if (err)
702				ubi_ro_mode(ubi);
703		}
704		leb_write_unlock(ubi, vol_id, lnum);
705		return err;
706	}
707
708	/*
709	 * The logical eraseblock is not mapped. We have to get a free physical
710	 * eraseblock and write the volume identifier header there first.
711	 */
712	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
713	if (!vid_hdr) {
714		leb_write_unlock(ubi, vol_id, lnum);
715		return -ENOMEM;
716	}
717
718	vid_hdr->vol_type = UBI_VID_DYNAMIC;
719	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
720	vid_hdr->vol_id = cpu_to_be32(vol_id);
721	vid_hdr->lnum = cpu_to_be32(lnum);
722	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
723	vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
724
725retry:
726	pnum = ubi_wl_get_peb(ubi);
727	if (pnum < 0) {
728		ubi_free_vid_hdr(ubi, vid_hdr);
729		leb_write_unlock(ubi, vol_id, lnum);
730		up_read(&ubi->fm_eba_sem);
731		return pnum;
732	}
733
734	dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
735		len, offset, vol_id, lnum, pnum);
736
737	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
738	if (err) {
739		ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
740			 vol_id, lnum, pnum);
741		up_read(&ubi->fm_eba_sem);
742		goto write_error;
743	}
744
745	if (len) {
746		err = ubi_io_write_data(ubi, buf, pnum, offset, len);
747		if (err) {
748			ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
749				 len, offset, vol_id, lnum, pnum);
750			up_read(&ubi->fm_eba_sem);
751			goto write_error;
752		}
753	}
754
755	vol->eba_tbl[lnum] = pnum;
756	up_read(&ubi->fm_eba_sem);
757
758	leb_write_unlock(ubi, vol_id, lnum);
759	ubi_free_vid_hdr(ubi, vid_hdr);
760	return 0;
761
762write_error:
763	if (err != -EIO || !ubi->bad_allowed) {
764		ubi_ro_mode(ubi);
765		leb_write_unlock(ubi, vol_id, lnum);
766		ubi_free_vid_hdr(ubi, vid_hdr);
767		return err;
768	}
769
770	/*
771	 * Fortunately, this is the first write operation to this physical
772	 * eraseblock, so just put it and request a new one. We assume that if
773	 * this physical eraseblock went bad, the erase code will handle that.
774	 */
775	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
776	if (err || ++tries > UBI_IO_RETRIES) {
777		ubi_ro_mode(ubi);
778		leb_write_unlock(ubi, vol_id, lnum);
779		ubi_free_vid_hdr(ubi, vid_hdr);
780		return err;
781	}
782
783	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
784	ubi_msg(ubi, "try another PEB");
785	goto retry;
786}
787
788/**
789 * ubi_eba_write_leb_st - write data to static volume.
790 * @ubi: UBI device description object
791 * @vol: volume description object
792 * @lnum: logical eraseblock number
793 * @buf: data to write
794 * @len: how many bytes to write
795 * @used_ebs: how many logical eraseblocks will this volume contain
796 *
797 * This function writes data to logical eraseblock @lnum of static volume
798 * @vol. The @used_ebs argument should contain total number of logical
799 * eraseblock in this static volume.
800 *
801 * When writing to the last logical eraseblock, the @len argument doesn't have
802 * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
803 * to the real data size, although the @buf buffer has to contain the
804 * alignment. In all other cases, @len has to be aligned.
805 *
806 * It is prohibited to write more than once to logical eraseblocks of static
807 * volumes. This function returns zero in case of success and a negative error
808 * code in case of failure.
809 */
810int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
811			 int lnum, const void *buf, int len, int used_ebs)
812{
813	int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
814	struct ubi_vid_hdr *vid_hdr;
815	uint32_t crc;
816
817	if (ubi->ro_mode)
818		return -EROFS;
819
820	if (lnum == used_ebs - 1)
821		/* If this is the last LEB @len may be unaligned */
822		len = ALIGN(data_size, ubi->min_io_size);
823	else
824		ubi_assert(!(len & (ubi->min_io_size - 1)));
825
826	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
827	if (!vid_hdr)
828		return -ENOMEM;
829
830	err = leb_write_lock(ubi, vol_id, lnum);
831	if (err) {
832		ubi_free_vid_hdr(ubi, vid_hdr);
833		return err;
834	}
835
836	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
837	vid_hdr->vol_id = cpu_to_be32(vol_id);
838	vid_hdr->lnum = cpu_to_be32(lnum);
839	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
840	vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
841
842	crc = crc32(UBI_CRC32_INIT, buf, data_size);
843	vid_hdr->vol_type = UBI_VID_STATIC;
844	vid_hdr->data_size = cpu_to_be32(data_size);
845	vid_hdr->used_ebs = cpu_to_be32(used_ebs);
846	vid_hdr->data_crc = cpu_to_be32(crc);
847
848retry:
849	pnum = ubi_wl_get_peb(ubi);
850	if (pnum < 0) {
851		ubi_free_vid_hdr(ubi, vid_hdr);
852		leb_write_unlock(ubi, vol_id, lnum);
853		up_read(&ubi->fm_eba_sem);
854		return pnum;
855	}
856
857	dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
858		len, vol_id, lnum, pnum, used_ebs);
859
860	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
861	if (err) {
862		ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
863			 vol_id, lnum, pnum);
864		up_read(&ubi->fm_eba_sem);
865		goto write_error;
866	}
867
868	err = ubi_io_write_data(ubi, buf, pnum, 0, len);
869	if (err) {
870		ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
871			 len, pnum);
872		up_read(&ubi->fm_eba_sem);
873		goto write_error;
874	}
875
876	ubi_assert(vol->eba_tbl[lnum] < 0);
877	vol->eba_tbl[lnum] = pnum;
878	up_read(&ubi->fm_eba_sem);
879
880	leb_write_unlock(ubi, vol_id, lnum);
881	ubi_free_vid_hdr(ubi, vid_hdr);
882	return 0;
883
884write_error:
885	if (err != -EIO || !ubi->bad_allowed) {
886		/*
887		 * This flash device does not admit of bad eraseblocks or
888		 * something nasty and unexpected happened. Switch to read-only
889		 * mode just in case.
890		 */
891		ubi_ro_mode(ubi);
892		leb_write_unlock(ubi, vol_id, lnum);
893		ubi_free_vid_hdr(ubi, vid_hdr);
894		return err;
895	}
896
897	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
898	if (err || ++tries > UBI_IO_RETRIES) {
899		ubi_ro_mode(ubi);
900		leb_write_unlock(ubi, vol_id, lnum);
901		ubi_free_vid_hdr(ubi, vid_hdr);
902		return err;
903	}
904
905	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
906	ubi_msg(ubi, "try another PEB");
907	goto retry;
908}
909
910/*
911 * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
912 * @ubi: UBI device description object
913 * @vol: volume description object
914 * @lnum: logical eraseblock number
915 * @buf: data to write
916 * @len: how many bytes to write
917 *
918 * This function changes the contents of a logical eraseblock atomically. @buf
919 * has to contain new logical eraseblock data, and @len - the length of the
920 * data, which has to be aligned. This function guarantees that in case of an
921 * unclean reboot the old contents is preserved. Returns zero in case of
922 * success and a negative error code in case of failure.
923 *
924 * UBI reserves one LEB for the "atomic LEB change" operation, so only one
925 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
926 */
927int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
928			      int lnum, const void *buf, int len)
929{
930	int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id;
931	struct ubi_vid_hdr *vid_hdr;
932	uint32_t crc;
933
934	if (ubi->ro_mode)
935		return -EROFS;
936
937	if (len == 0) {
938		/*
939		 * Special case when data length is zero. In this case the LEB
940		 * has to be unmapped and mapped somewhere else.
941		 */
942		err = ubi_eba_unmap_leb(ubi, vol, lnum);
943		if (err)
944			return err;
945		return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
946	}
947
948	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
949	if (!vid_hdr)
950		return -ENOMEM;
951
952	mutex_lock(&ubi->alc_mutex);
953	err = leb_write_lock(ubi, vol_id, lnum);
954	if (err)
955		goto out_mutex;
956
957	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
958	vid_hdr->vol_id = cpu_to_be32(vol_id);
959	vid_hdr->lnum = cpu_to_be32(lnum);
960	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
961	vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
962
963	crc = crc32(UBI_CRC32_INIT, buf, len);
964	vid_hdr->vol_type = UBI_VID_DYNAMIC;
965	vid_hdr->data_size = cpu_to_be32(len);
966	vid_hdr->copy_flag = 1;
967	vid_hdr->data_crc = cpu_to_be32(crc);
968
969retry:
970	pnum = ubi_wl_get_peb(ubi);
971	if (pnum < 0) {
972		err = pnum;
973		up_read(&ubi->fm_eba_sem);
974		goto out_leb_unlock;
975	}
976
977	dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
978		vol_id, lnum, vol->eba_tbl[lnum], pnum);
979
980	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
981	if (err) {
982		ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
983			 vol_id, lnum, pnum);
984		up_read(&ubi->fm_eba_sem);
985		goto write_error;
986	}
987
988	err = ubi_io_write_data(ubi, buf, pnum, 0, len);
989	if (err) {
990		ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
991			 len, pnum);
992		up_read(&ubi->fm_eba_sem);
993		goto write_error;
994	}
995
996	old_pnum = vol->eba_tbl[lnum];
997	vol->eba_tbl[lnum] = pnum;
998	up_read(&ubi->fm_eba_sem);
999
1000	if (old_pnum >= 0) {
1001		err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0);
1002		if (err)
1003			goto out_leb_unlock;
1004	}
1005
1006out_leb_unlock:
1007	leb_write_unlock(ubi, vol_id, lnum);
1008out_mutex:
1009	mutex_unlock(&ubi->alc_mutex);
1010	ubi_free_vid_hdr(ubi, vid_hdr);
1011	return err;
1012
1013write_error:
1014	if (err != -EIO || !ubi->bad_allowed) {
1015		/*
1016		 * This flash device does not admit of bad eraseblocks or
1017		 * something nasty and unexpected happened. Switch to read-only
1018		 * mode just in case.
1019		 */
1020		ubi_ro_mode(ubi);
1021		goto out_leb_unlock;
1022	}
1023
1024	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
1025	if (err || ++tries > UBI_IO_RETRIES) {
1026		ubi_ro_mode(ubi);
1027		goto out_leb_unlock;
1028	}
1029
1030	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1031	ubi_msg(ubi, "try another PEB");
1032	goto retry;
1033}
1034
1035/**
1036 * is_error_sane - check whether a read error is sane.
1037 * @err: code of the error happened during reading
1038 *
1039 * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
1040 * cannot read data from the target PEB (an error @err happened). If the error
1041 * code is sane, then we treat this error as non-fatal. Otherwise the error is
1042 * fatal and UBI will be switched to R/O mode later.
1043 *
1044 * The idea is that we try not to switch to R/O mode if the read error is
1045 * something which suggests there was a real read problem. E.g., %-EIO. Or a
1046 * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
1047 * mode, simply because we do not know what happened at the MTD level, and we
1048 * cannot handle this. E.g., the underlying driver may have become crazy, and
1049 * it is safer to switch to R/O mode to preserve the data.
1050 *
1051 * And bear in mind, this is about reading from the target PEB, i.e. the PEB
1052 * which we have just written.
1053 */
1054static int is_error_sane(int err)
1055{
1056	if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
1057	    err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
1058		return 0;
1059	return 1;
1060}
1061
1062/**
1063 * ubi_eba_copy_leb - copy logical eraseblock.
1064 * @ubi: UBI device description object
1065 * @from: physical eraseblock number from where to copy
1066 * @to: physical eraseblock number where to copy
1067 * @vid_hdr: VID header of the @from physical eraseblock
1068 *
1069 * This function copies logical eraseblock from physical eraseblock @from to
1070 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
1071 * function. Returns:
1072 *   o %0 in case of success;
1073 *   o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
1074 *   o a negative error code in case of failure.
1075 */
1076int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1077		     struct ubi_vid_hdr *vid_hdr)
1078{
1079	int err, vol_id, lnum, data_size, aldata_size, idx;
1080	struct ubi_volume *vol;
1081	uint32_t crc;
1082
1083	vol_id = be32_to_cpu(vid_hdr->vol_id);
1084	lnum = be32_to_cpu(vid_hdr->lnum);
1085
1086	dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
1087
1088	if (vid_hdr->vol_type == UBI_VID_STATIC) {
1089		data_size = be32_to_cpu(vid_hdr->data_size);
1090		aldata_size = ALIGN(data_size, ubi->min_io_size);
1091	} else
1092		data_size = aldata_size =
1093			    ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
1094
1095	idx = vol_id2idx(ubi, vol_id);
1096	spin_lock(&ubi->volumes_lock);
1097	/*
1098	 * Note, we may race with volume deletion, which means that the volume
1099	 * this logical eraseblock belongs to might be being deleted. Since the
1100	 * volume deletion un-maps all the volume's logical eraseblocks, it will
1101	 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
1102	 */
1103	vol = ubi->volumes[idx];
1104	spin_unlock(&ubi->volumes_lock);
1105	if (!vol) {
1106		/* No need to do further work, cancel */
1107		dbg_wl("volume %d is being removed, cancel", vol_id);
1108		return MOVE_CANCEL_RACE;
1109	}
1110
1111	/*
1112	 * We do not want anybody to write to this logical eraseblock while we
1113	 * are moving it, so lock it.
1114	 *
1115	 * Note, we are using non-waiting locking here, because we cannot sleep
1116	 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
1117	 * unmapping the LEB which is mapped to the PEB we are going to move
1118	 * (@from). This task locks the LEB and goes sleep in the
1119	 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
1120	 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
1121	 * LEB is already locked, we just do not move it and return
1122	 * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
1123	 * we do not know the reasons of the contention - it may be just a
1124	 * normal I/O on this LEB, so we want to re-try.
1125	 */
1126	err = leb_write_trylock(ubi, vol_id, lnum);
1127	if (err) {
1128		dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
1129		return MOVE_RETRY;
1130	}
1131
1132	/*
1133	 * The LEB might have been put meanwhile, and the task which put it is
1134	 * probably waiting on @ubi->move_mutex. No need to continue the work,
1135	 * cancel it.
1136	 */
1137	if (vol->eba_tbl[lnum] != from) {
1138		dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
1139		       vol_id, lnum, from, vol->eba_tbl[lnum]);
1140		err = MOVE_CANCEL_RACE;
1141		goto out_unlock_leb;
1142	}
1143
1144	/*
1145	 * OK, now the LEB is locked and we can safely start moving it. Since
1146	 * this function utilizes the @ubi->peb_buf buffer which is shared
1147	 * with some other functions - we lock the buffer by taking the
1148	 * @ubi->buf_mutex.
1149	 */
1150	mutex_lock(&ubi->buf_mutex);
1151	dbg_wl("read %d bytes of data", aldata_size);
1152	err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
1153	if (err && err != UBI_IO_BITFLIPS) {
1154		ubi_warn(ubi, "error %d while reading data from PEB %d",
1155			 err, from);
1156		err = MOVE_SOURCE_RD_ERR;
1157		goto out_unlock_buf;
1158	}
1159
1160	/*
1161	 * Now we have got to calculate how much data we have to copy. In
1162	 * case of a static volume it is fairly easy - the VID header contains
1163	 * the data size. In case of a dynamic volume it is more difficult - we
1164	 * have to read the contents, cut 0xFF bytes from the end and copy only
1165	 * the first part. We must do this to avoid writing 0xFF bytes as it
1166	 * may have some side-effects. And not only this. It is important not
1167	 * to include those 0xFFs to CRC because later the they may be filled
1168	 * by data.
1169	 */
1170	if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
1171		aldata_size = data_size =
1172			ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
1173
1174	cond_resched();
1175	crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
1176	cond_resched();
1177
1178	/*
1179	 * It may turn out to be that the whole @from physical eraseblock
1180	 * contains only 0xFF bytes. Then we have to only write the VID header
1181	 * and do not write any data. This also means we should not set
1182	 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
1183	 */
1184	if (data_size > 0) {
1185		vid_hdr->copy_flag = 1;
1186		vid_hdr->data_size = cpu_to_be32(data_size);
1187		vid_hdr->data_crc = cpu_to_be32(crc);
1188	}
1189	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1190
1191	err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1192	if (err) {
1193		if (err == -EIO)
1194			err = MOVE_TARGET_WR_ERR;
1195		goto out_unlock_buf;
1196	}
1197
1198	cond_resched();
1199
1200	/* Read the VID header back and check if it was written correctly */
1201	err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
1202	if (err) {
1203		if (err != UBI_IO_BITFLIPS) {
1204			ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
1205				 err, to);
1206			if (is_error_sane(err))
1207				err = MOVE_TARGET_RD_ERR;
1208		} else
1209			err = MOVE_TARGET_BITFLIPS;
1210		goto out_unlock_buf;
1211	}
1212
1213	if (data_size > 0) {
1214		err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
1215		if (err) {
1216			if (err == -EIO)
1217				err = MOVE_TARGET_WR_ERR;
1218			goto out_unlock_buf;
1219		}
1220
1221		cond_resched();
1222
1223		/*
1224		 * We've written the data and are going to read it back to make
1225		 * sure it was written correctly.
1226		 */
1227		memset(ubi->peb_buf, 0xFF, aldata_size);
1228		err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
1229		if (err) {
1230			if (err != UBI_IO_BITFLIPS) {
1231				ubi_warn(ubi, "error %d while reading data back from PEB %d",
1232					 err, to);
1233				if (is_error_sane(err))
1234					err = MOVE_TARGET_RD_ERR;
1235			} else
1236				err = MOVE_TARGET_BITFLIPS;
1237			goto out_unlock_buf;
1238		}
1239
1240		cond_resched();
1241
1242		if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
1243			ubi_warn(ubi, "read data back from PEB %d and it is different",
1244				 to);
1245			err = -EINVAL;
1246			goto out_unlock_buf;
1247		}
1248	}
1249
1250	ubi_assert(vol->eba_tbl[lnum] == from);
1251	down_read(&ubi->fm_eba_sem);
1252	vol->eba_tbl[lnum] = to;
1253	up_read(&ubi->fm_eba_sem);
1254
1255out_unlock_buf:
1256	mutex_unlock(&ubi->buf_mutex);
1257out_unlock_leb:
1258	leb_write_unlock(ubi, vol_id, lnum);
1259	return err;
1260}
1261
1262/**
1263 * print_rsvd_warning - warn about not having enough reserved PEBs.
1264 * @ubi: UBI device description object
1265 *
1266 * This is a helper function for 'ubi_eba_init()' which is called when UBI
1267 * cannot reserve enough PEBs for bad block handling. This function makes a
1268 * decision whether we have to print a warning or not. The algorithm is as
1269 * follows:
1270 *   o if this is a new UBI image, then just print the warning
1271 *   o if this is an UBI image which has already been used for some time, print
1272 *     a warning only if we can reserve less than 10% of the expected amount of
1273 *     the reserved PEB.
1274 *
1275 * The idea is that when UBI is used, PEBs become bad, and the reserved pool
1276 * of PEBs becomes smaller, which is normal and we do not want to scare users
1277 * with a warning every time they attach the MTD device. This was an issue
1278 * reported by real users.
1279 */
1280static void print_rsvd_warning(struct ubi_device *ubi,
1281			       struct ubi_attach_info *ai)
1282{
1283	/*
1284	 * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
1285	 * large number to distinguish between newly flashed and used images.
1286	 */
1287	if (ai->max_sqnum > (1 << 18)) {
1288		int min = ubi->beb_rsvd_level / 10;
1289
1290		if (!min)
1291			min = 1;
1292		if (ubi->beb_rsvd_pebs > min)
1293			return;
1294	}
1295
1296	ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
1297		 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1298	if (ubi->corr_peb_count)
1299		ubi_warn(ubi, "%d PEBs are corrupted and not used",
1300			 ubi->corr_peb_count);
1301}
1302
1303/**
1304 * self_check_eba - run a self check on the EBA table constructed by fastmap.
1305 * @ubi: UBI device description object
1306 * @ai_fastmap: UBI attach info object created by fastmap
1307 * @ai_scan: UBI attach info object created by scanning
1308 *
1309 * Returns < 0 in case of an internal error, 0 otherwise.
1310 * If a bad EBA table entry was found it will be printed out and
1311 * ubi_assert() triggers.
1312 */
1313int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
1314		   struct ubi_attach_info *ai_scan)
1315{
1316	int i, j, num_volumes, ret = 0;
1317	int **scan_eba, **fm_eba;
1318	struct ubi_ainf_volume *av;
1319	struct ubi_volume *vol;
1320	struct ubi_ainf_peb *aeb;
1321	struct rb_node *rb;
1322
1323	num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1324
1325	scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
1326	if (!scan_eba)
1327		return -ENOMEM;
1328
1329	fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
1330	if (!fm_eba) {
1331		kfree(scan_eba);
1332		return -ENOMEM;
1333	}
1334
1335	for (i = 0; i < num_volumes; i++) {
1336		vol = ubi->volumes[i];
1337		if (!vol)
1338			continue;
1339
1340		scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
1341				      GFP_KERNEL);
1342		if (!scan_eba[i]) {
1343			ret = -ENOMEM;
1344			goto out_free;
1345		}
1346
1347		fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
1348				    GFP_KERNEL);
1349		if (!fm_eba[i]) {
1350			ret = -ENOMEM;
1351			goto out_free;
1352		}
1353
1354		for (j = 0; j < vol->reserved_pebs; j++)
1355			scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
1356
1357		av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
1358		if (!av)
1359			continue;
1360
1361		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1362			scan_eba[i][aeb->lnum] = aeb->pnum;
1363
1364		av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
1365		if (!av)
1366			continue;
1367
1368		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1369			fm_eba[i][aeb->lnum] = aeb->pnum;
1370
1371		for (j = 0; j < vol->reserved_pebs; j++) {
1372			if (scan_eba[i][j] != fm_eba[i][j]) {
1373				if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
1374					fm_eba[i][j] == UBI_LEB_UNMAPPED)
1375					continue;
1376
1377				ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
1378					vol->vol_id, i, fm_eba[i][j],
1379					scan_eba[i][j]);
1380				ubi_assert(0);
1381			}
1382		}
1383	}
1384
1385out_free:
1386	for (i = 0; i < num_volumes; i++) {
1387		if (!ubi->volumes[i])
1388			continue;
1389
1390		kfree(scan_eba[i]);
1391		kfree(fm_eba[i]);
1392	}
1393
1394	kfree(scan_eba);
1395	kfree(fm_eba);
1396	return ret;
1397}
1398
1399/**
1400 * ubi_eba_init - initialize the EBA sub-system using attaching information.
1401 * @ubi: UBI device description object
1402 * @ai: attaching information
1403 *
1404 * This function returns zero in case of success and a negative error code in
1405 * case of failure.
1406 */
1407int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1408{
1409	int i, j, err, num_volumes;
1410	struct ubi_ainf_volume *av;
1411	struct ubi_volume *vol;
1412	struct ubi_ainf_peb *aeb;
1413	struct rb_node *rb;
1414
1415	dbg_eba("initialize EBA sub-system");
1416
1417	spin_lock_init(&ubi->ltree_lock);
1418	mutex_init(&ubi->alc_mutex);
1419	ubi->ltree = RB_ROOT;
1420
1421	ubi->global_sqnum = ai->max_sqnum + 1;
1422	num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1423
1424	for (i = 0; i < num_volumes; i++) {
1425		vol = ubi->volumes[i];
1426		if (!vol)
1427			continue;
1428
1429		cond_resched();
1430
1431		vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
1432				       GFP_KERNEL);
1433		if (!vol->eba_tbl) {
1434			err = -ENOMEM;
1435			goto out_free;
1436		}
1437
1438		for (j = 0; j < vol->reserved_pebs; j++)
1439			vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
1440
1441		av = ubi_find_av(ai, idx2vol_id(ubi, i));
1442		if (!av)
1443			continue;
1444
1445		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
1446			if (aeb->lnum >= vol->reserved_pebs)
1447				/*
1448				 * This may happen in case of an unclean reboot
1449				 * during re-size.
1450				 */
1451				ubi_move_aeb_to_list(av, aeb, &ai->erase);
1452			else
1453				vol->eba_tbl[aeb->lnum] = aeb->pnum;
1454		}
1455	}
1456
1457	if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1458		ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1459			ubi->avail_pebs, EBA_RESERVED_PEBS);
1460		if (ubi->corr_peb_count)
1461			ubi_err(ubi, "%d PEBs are corrupted and not used",
1462				ubi->corr_peb_count);
1463		err = -ENOSPC;
1464		goto out_free;
1465	}
1466	ubi->avail_pebs -= EBA_RESERVED_PEBS;
1467	ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1468
1469	if (ubi->bad_allowed) {
1470		ubi_calculate_reserved(ubi);
1471
1472		if (ubi->avail_pebs < ubi->beb_rsvd_level) {
1473			/* No enough free physical eraseblocks */
1474			ubi->beb_rsvd_pebs = ubi->avail_pebs;
1475			print_rsvd_warning(ubi, ai);
1476		} else
1477			ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
1478
1479		ubi->avail_pebs -= ubi->beb_rsvd_pebs;
1480		ubi->rsvd_pebs  += ubi->beb_rsvd_pebs;
1481	}
1482
1483	dbg_eba("EBA sub-system is initialized");
1484	return 0;
1485
1486out_free:
1487	for (i = 0; i < num_volumes; i++) {
1488		if (!ubi->volumes[i])
1489			continue;
1490		kfree(ubi->volumes[i]->eba_tbl);
1491		ubi->volumes[i]->eba_tbl = NULL;
1492	}
1493	return err;
1494}
1495