ubi                79 drivers/mtd/ubi/attach.c static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
ubi               330 drivers/mtd/ubi/attach.c static int validate_vid_hdr(const struct ubi_device *ubi,
ubi               349 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "inconsistent vol_id");
ubi               359 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "inconsistent vol_type");
ubi               364 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "inconsistent used_ebs");
ubi               369 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "inconsistent data_pad");
ubi               377 drivers/mtd/ubi/attach.c 	ubi_err(ubi, "inconsistent VID header at PEB %d", pnum);
ubi               438 drivers/mtd/ubi/attach.c int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
ubi               455 drivers/mtd/ubi/attach.c 		ubi_err(ubi, "unsupported on-flash UBI format");
ubi               486 drivers/mtd/ubi/attach.c 		vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
ubi               491 drivers/mtd/ubi/attach.c 		err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0);
ubi               496 drivers/mtd/ubi/attach.c 				ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d",
ubi               512 drivers/mtd/ubi/attach.c 	mutex_lock(&ubi->buf_mutex);
ubi               513 drivers/mtd/ubi/attach.c 	err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
ubi               518 drivers/mtd/ubi/attach.c 	crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
ubi               529 drivers/mtd/ubi/attach.c 	mutex_unlock(&ubi->buf_mutex);
ubi               541 drivers/mtd/ubi/attach.c 	mutex_unlock(&ubi->buf_mutex);
ubi               563 drivers/mtd/ubi/attach.c int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
ubi               626 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "two LEBs with same sequence number %llu",
ubi               637 drivers/mtd/ubi/attach.c 		cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
ubi               646 drivers/mtd/ubi/attach.c 			err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
ubi               684 drivers/mtd/ubi/attach.c 	err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
ubi               771 drivers/mtd/ubi/attach.c static int early_erase_peb(struct ubi_device *ubi,
ubi               782 drivers/mtd/ubi/attach.c 		ubi_err(ubi, "erase counter overflow at PEB %d, EC %d",
ubi               787 drivers/mtd/ubi/attach.c 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
ubi               793 drivers/mtd/ubi/attach.c 	err = ubi_io_sync_erase(ubi, pnum, 0);
ubi               797 drivers/mtd/ubi/attach.c 	err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
ubi               818 drivers/mtd/ubi/attach.c struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
ubi               841 drivers/mtd/ubi/attach.c 		err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
ubi               851 drivers/mtd/ubi/attach.c 	ubi_err(ubi, "no free eraseblocks");
ubi               872 drivers/mtd/ubi/attach.c static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
ubi               877 drivers/mtd/ubi/attach.c 	mutex_lock(&ubi->buf_mutex);
ubi               878 drivers/mtd/ubi/attach.c 	memset(ubi->peb_buf, 0x00, ubi->leb_size);
ubi               880 drivers/mtd/ubi/attach.c 	err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
ubi               881 drivers/mtd/ubi/attach.c 			  ubi->leb_size);
ubi               897 drivers/mtd/ubi/attach.c 	if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
ubi               900 drivers/mtd/ubi/attach.c 	ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
ubi               902 drivers/mtd/ubi/attach.c 	ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
ubi               905 drivers/mtd/ubi/attach.c 	       pnum, ubi->leb_start, ubi->leb_size);
ubi               907 drivers/mtd/ubi/attach.c 			       ubi->peb_buf, ubi->leb_size, 1);
ubi               911 drivers/mtd/ubi/attach.c 	mutex_unlock(&ubi->buf_mutex);
ubi               941 drivers/mtd/ubi/attach.c static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi               953 drivers/mtd/ubi/attach.c 	err = ubi_io_is_bad(ubi, pnum);
ubi               961 drivers/mtd/ubi/attach.c 	err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
ubi               990 drivers/mtd/ubi/attach.c 		ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d",
ubi              1000 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "this UBI version is %d, image version is %d",
ubi              1014 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "erase counter overflow, max is %d",
ubi              1032 drivers/mtd/ubi/attach.c 		if (!ubi->image_seq)
ubi              1033 drivers/mtd/ubi/attach.c 			ubi->image_seq = image_seq;
ubi              1034 drivers/mtd/ubi/attach.c 		if (image_seq && ubi->image_seq != image_seq) {
ubi              1035 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d",
ubi              1036 drivers/mtd/ubi/attach.c 				image_seq, pnum, ubi->image_seq);
ubi              1044 drivers/mtd/ubi/attach.c 	err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0);
ubi              1097 drivers/mtd/ubi/attach.c 			err = check_corruption(ubi, vidh, pnum);
ubi              1128 drivers/mtd/ubi/attach.c 		ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d",
ubi              1140 drivers/mtd/ubi/attach.c 			ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
ubi              1150 drivers/mtd/ubi/attach.c 			ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode",
ubi              1152 drivers/mtd/ubi/attach.c 			ubi->ro_mode = 1;
ubi              1156 drivers/mtd/ubi/attach.c 			ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found",
ubi              1165 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "incompatible internal volume %d:%d found",
ubi              1172 drivers/mtd/ubi/attach.c 		ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
ubi              1178 drivers/mtd/ubi/attach.c 		err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
ubi              1207 drivers/mtd/ubi/attach.c static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
ubi              1212 drivers/mtd/ubi/attach.c 	peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
ubi              1221 drivers/mtd/ubi/attach.c 		ubi_err(ubi, "%d PEBs are corrupted and preserved",
ubi              1233 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "too many corrupted PEBs, refusing");
ubi              1256 drivers/mtd/ubi/attach.c 			ubi_msg(ubi, "empty MTD device detected");
ubi              1257 drivers/mtd/ubi/attach.c 			get_random_bytes(&ubi->image_seq,
ubi              1258 drivers/mtd/ubi/attach.c 					 sizeof(ubi->image_seq));
ubi              1260 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
ubi              1374 drivers/mtd/ubi/attach.c static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi              1384 drivers/mtd/ubi/attach.c 	ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
ubi              1388 drivers/mtd/ubi/attach.c 	ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
ubi              1392 drivers/mtd/ubi/attach.c 	for (pnum = start; pnum < ubi->peb_count; pnum++) {
ubi              1396 drivers/mtd/ubi/attach.c 		err = scan_peb(ubi, ai, pnum, false);
ubi              1401 drivers/mtd/ubi/attach.c 	ubi_msg(ubi, "scanning is finished");
ubi              1407 drivers/mtd/ubi/attach.c 	err = late_analysis(ubi, ai);
ubi              1434 drivers/mtd/ubi/attach.c 	err = self_check_ai(ubi, ai);
ubi              1487 drivers/mtd/ubi/attach.c static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
ubi              1498 drivers/mtd/ubi/attach.c 	scan_ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
ubi              1502 drivers/mtd/ubi/attach.c 	scan_ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
ubi              1510 drivers/mtd/ubi/attach.c 		err = scan_peb(ubi, scan_ai, pnum, true);
ubi              1521 drivers/mtd/ubi/attach.c 		err = ubi_scan_fastmap(ubi, *ai, scan_ai);
ubi              1555 drivers/mtd/ubi/attach.c int ubi_attach(struct ubi_device *ubi, int force_scan)
ubi              1566 drivers/mtd/ubi/attach.c 	if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
ubi              1567 drivers/mtd/ubi/attach.c 		ubi->fm_disabled = 1;
ubi              1572 drivers/mtd/ubi/attach.c 		err = scan_all(ubi, ai, 0);
ubi              1574 drivers/mtd/ubi/attach.c 		err = scan_fast(ubi, &ai);
ubi              1582 drivers/mtd/ubi/attach.c 				err = scan_all(ubi, ai, 0);
ubi              1584 drivers/mtd/ubi/attach.c 				err = scan_all(ubi, ai, UBI_FM_MAX_START);
ubi              1589 drivers/mtd/ubi/attach.c 	err = scan_all(ubi, ai, 0);
ubi              1594 drivers/mtd/ubi/attach.c 	ubi->bad_peb_count = ai->bad_peb_count;
ubi              1595 drivers/mtd/ubi/attach.c 	ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
ubi              1596 drivers/mtd/ubi/attach.c 	ubi->corr_peb_count = ai->corr_peb_count;
ubi              1597 drivers/mtd/ubi/attach.c 	ubi->max_ec = ai->max_ec;
ubi              1598 drivers/mtd/ubi/attach.c 	ubi->mean_ec = ai->mean_ec;
ubi              1601 drivers/mtd/ubi/attach.c 	err = ubi_read_volume_table(ubi, ai);
ubi              1605 drivers/mtd/ubi/attach.c 	err = ubi_wl_init(ubi, ai);
ubi              1609 drivers/mtd/ubi/attach.c 	err = ubi_eba_init(ubi, ai);
ubi              1614 drivers/mtd/ubi/attach.c 	if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
ubi              1623 drivers/mtd/ubi/attach.c 		err = scan_all(ubi, scan_ai, 0);
ubi              1629 drivers/mtd/ubi/attach.c 		err = self_check_eba(ubi, ai, scan_ai);
ubi              1641 drivers/mtd/ubi/attach.c 	ubi_wl_close(ubi);
ubi              1643 drivers/mtd/ubi/attach.c 	ubi_free_internal_volumes(ubi);
ubi              1644 drivers/mtd/ubi/attach.c 	vfree(ubi->vtbl);
ubi              1658 drivers/mtd/ubi/attach.c static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
ubi              1668 drivers/mtd/ubi/attach.c 	if (!ubi_dbg_chk_gen(ubi))
ubi              1682 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "bad is_empty flag");
ubi              1689 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "negative values");
ubi              1695 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "bad vol_id");
ubi              1700 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there",
ubi              1707 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "bad vol_type");
ubi              1711 drivers/mtd/ubi/attach.c 		if (av->data_pad > ubi->leb_size / 2) {
ubi              1712 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "bad data_pad");
ubi              1724 drivers/mtd/ubi/attach.c 				ubi_err(ubi, "negative values");
ubi              1729 drivers/mtd/ubi/attach.c 				ubi_err(ubi, "bad ai->min_ec (%d), %d found",
ubi              1735 drivers/mtd/ubi/attach.c 				ubi_err(ubi, "bad ai->max_ec (%d), %d found",
ubi              1740 drivers/mtd/ubi/attach.c 			if (aeb->pnum >= ubi->peb_count) {
ubi              1741 drivers/mtd/ubi/attach.c 				ubi_err(ubi, "too high PEB number %d, total PEBs %d",
ubi              1742 drivers/mtd/ubi/attach.c 					aeb->pnum, ubi->peb_count);
ubi              1748 drivers/mtd/ubi/attach.c 					ubi_err(ubi, "bad lnum or used_ebs");
ubi              1753 drivers/mtd/ubi/attach.c 					ubi_err(ubi, "non-zero used_ebs");
ubi              1759 drivers/mtd/ubi/attach.c 				ubi_err(ubi, "incorrect highest_lnum or lnum");
ubi              1765 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "bad leb_count, %d objects in the tree",
ubi              1776 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "bad highest_lnum");
ubi              1782 drivers/mtd/ubi/attach.c 		ubi_err(ubi, "bad ai->vols_found %d, should be %d",
ubi              1797 drivers/mtd/ubi/attach.c 			err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidb, 1);
ubi              1799 drivers/mtd/ubi/attach.c 				ubi_err(ubi, "VID header is not OK (%d)",
ubi              1809 drivers/mtd/ubi/attach.c 				ubi_err(ubi, "bad vol_type");
ubi              1814 drivers/mtd/ubi/attach.c 				ubi_err(ubi, "bad sqnum %llu", aeb->sqnum);
ubi              1819 drivers/mtd/ubi/attach.c 				ubi_err(ubi, "bad vol_id %d", av->vol_id);
ubi              1824 drivers/mtd/ubi/attach.c 				ubi_err(ubi, "bad compat %d", vidh->compat);
ubi              1829 drivers/mtd/ubi/attach.c 				ubi_err(ubi, "bad lnum %d", aeb->lnum);
ubi              1834 drivers/mtd/ubi/attach.c 				ubi_err(ubi, "bad used_ebs %d", av->used_ebs);
ubi              1839 drivers/mtd/ubi/attach.c 				ubi_err(ubi, "bad data_pad %d", av->data_pad);
ubi              1848 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum);
ubi              1853 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "bad last_data_size %d",
ubi              1863 drivers/mtd/ubi/attach.c 	buf = kzalloc(ubi->peb_count, GFP_KERNEL);
ubi              1867 drivers/mtd/ubi/attach.c 	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
ubi              1868 drivers/mtd/ubi/attach.c 		err = ubi_io_is_bad(ubi, pnum);
ubi              1893 drivers/mtd/ubi/attach.c 	for (pnum = 0; pnum < ubi->peb_count; pnum++)
ubi              1895 drivers/mtd/ubi/attach.c 			ubi_err(ubi, "PEB %d is not referred", pnum);
ubi              1905 drivers/mtd/ubi/attach.c 	ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum);
ubi              1911 drivers/mtd/ubi/attach.c 	ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
ubi              1916 drivers/mtd/ubi/attach.c 	ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
ubi               154 drivers/mtd/ubi/build.c int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
ubi               159 drivers/mtd/ubi/build.c 	ubi_do_get_device_info(ubi, &nt.di);
ubi               160 drivers/mtd/ubi/build.c 	ubi_do_get_volume_info(ubi, vol, &nt.vi);
ubi               167 drivers/mtd/ubi/build.c 		ret = ubi_update_fastmap(ubi);
ubi               169 drivers/mtd/ubi/build.c 			ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
ubi               186 drivers/mtd/ubi/build.c int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
ubi               191 drivers/mtd/ubi/build.c 	ubi_do_get_device_info(ubi, &nt.di);
ubi               193 drivers/mtd/ubi/build.c 	mutex_lock(&ubi->device_mutex);
ubi               194 drivers/mtd/ubi/build.c 	for (i = 0; i < ubi->vtbl_slots; i++) {
ubi               200 drivers/mtd/ubi/build.c 		if (!ubi->volumes[i])
ubi               203 drivers/mtd/ubi/build.c 		ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
ubi               211 drivers/mtd/ubi/build.c 	mutex_unlock(&ubi->device_mutex);
ubi               234 drivers/mtd/ubi/build.c 		struct ubi_device *ubi = ubi_devices[i];
ubi               236 drivers/mtd/ubi/build.c 		if (!ubi)
ubi               238 drivers/mtd/ubi/build.c 		count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
ubi               255 drivers/mtd/ubi/build.c 	struct ubi_device *ubi;
ubi               258 drivers/mtd/ubi/build.c 	ubi = ubi_devices[ubi_num];
ubi               259 drivers/mtd/ubi/build.c 	if (ubi) {
ubi               260 drivers/mtd/ubi/build.c 		ubi_assert(ubi->ref_count >= 0);
ubi               261 drivers/mtd/ubi/build.c 		ubi->ref_count += 1;
ubi               262 drivers/mtd/ubi/build.c 		get_device(&ubi->dev);
ubi               266 drivers/mtd/ubi/build.c 	return ubi;
ubi               273 drivers/mtd/ubi/build.c void ubi_put_device(struct ubi_device *ubi)
ubi               276 drivers/mtd/ubi/build.c 	ubi->ref_count -= 1;
ubi               277 drivers/mtd/ubi/build.c 	put_device(&ubi->dev);
ubi               291 drivers/mtd/ubi/build.c 	struct ubi_device *ubi;
ubi               295 drivers/mtd/ubi/build.c 		ubi = ubi_devices[i];
ubi               296 drivers/mtd/ubi/build.c 		if (ubi && MAJOR(ubi->cdev.dev) == major) {
ubi               297 drivers/mtd/ubi/build.c 			ubi_assert(ubi->ref_count >= 0);
ubi               298 drivers/mtd/ubi/build.c 			ubi->ref_count += 1;
ubi               299 drivers/mtd/ubi/build.c 			get_device(&ubi->dev);
ubi               301 drivers/mtd/ubi/build.c 			return ubi;
ubi               323 drivers/mtd/ubi/build.c 		struct ubi_device *ubi = ubi_devices[i];
ubi               325 drivers/mtd/ubi/build.c 		if (ubi && MAJOR(ubi->cdev.dev) == major) {
ubi               326 drivers/mtd/ubi/build.c 			ubi_num = ubi->ubi_num;
ubi               340 drivers/mtd/ubi/build.c 	struct ubi_device *ubi;
ubi               352 drivers/mtd/ubi/build.c 	ubi = container_of(dev, struct ubi_device, dev);
ubi               353 drivers/mtd/ubi/build.c 	ubi = ubi_get_device(ubi->ubi_num);
ubi               354 drivers/mtd/ubi/build.c 	if (!ubi)
ubi               358 drivers/mtd/ubi/build.c 		ret = sprintf(buf, "%d\n", ubi->leb_size);
ubi               360 drivers/mtd/ubi/build.c 		ret = sprintf(buf, "%d\n", ubi->avail_pebs);
ubi               362 drivers/mtd/ubi/build.c 		ret = sprintf(buf, "%d\n", ubi->good_peb_count);
ubi               364 drivers/mtd/ubi/build.c 		ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
ubi               366 drivers/mtd/ubi/build.c 		ret = sprintf(buf, "%d\n", ubi->max_ec);
ubi               368 drivers/mtd/ubi/build.c 		ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
ubi               370 drivers/mtd/ubi/build.c 		ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
ubi               372 drivers/mtd/ubi/build.c 		ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
ubi               374 drivers/mtd/ubi/build.c 		ret = sprintf(buf, "%d\n", ubi->min_io_size);
ubi               376 drivers/mtd/ubi/build.c 		ret = sprintf(buf, "%d\n", ubi->thread_enabled);
ubi               378 drivers/mtd/ubi/build.c 		ret = sprintf(buf, "%d\n", ubi->mtd->index);
ubi               380 drivers/mtd/ubi/build.c 		ret = sprintf(buf, "%d\n", ubi->ro_mode);
ubi               384 drivers/mtd/ubi/build.c 	ubi_put_device(ubi);
ubi               407 drivers/mtd/ubi/build.c 	struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
ubi               409 drivers/mtd/ubi/build.c 	kfree(ubi);
ubi               416 drivers/mtd/ubi/build.c static void kill_volumes(struct ubi_device *ubi)
ubi               420 drivers/mtd/ubi/build.c 	for (i = 0; i < ubi->vtbl_slots; i++)
ubi               421 drivers/mtd/ubi/build.c 		if (ubi->volumes[i])
ubi               422 drivers/mtd/ubi/build.c 			ubi_free_volume(ubi, ubi->volumes[i]);
ubi               436 drivers/mtd/ubi/build.c static int uif_init(struct ubi_device *ubi)
ubi               441 drivers/mtd/ubi/build.c 	sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
ubi               451 drivers/mtd/ubi/build.c 	err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
ubi               453 drivers/mtd/ubi/build.c 		ubi_err(ubi, "cannot register UBI character devices");
ubi               457 drivers/mtd/ubi/build.c 	ubi->dev.devt = dev;
ubi               460 drivers/mtd/ubi/build.c 	cdev_init(&ubi->cdev, &ubi_cdev_operations);
ubi               461 drivers/mtd/ubi/build.c 	dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
ubi               462 drivers/mtd/ubi/build.c 	ubi->cdev.owner = THIS_MODULE;
ubi               464 drivers/mtd/ubi/build.c 	dev_set_name(&ubi->dev, UBI_NAME_STR "%d", ubi->ubi_num);
ubi               465 drivers/mtd/ubi/build.c 	err = cdev_device_add(&ubi->cdev, &ubi->dev);
ubi               469 drivers/mtd/ubi/build.c 	for (i = 0; i < ubi->vtbl_slots; i++)
ubi               470 drivers/mtd/ubi/build.c 		if (ubi->volumes[i]) {
ubi               471 drivers/mtd/ubi/build.c 			err = ubi_add_volume(ubi, ubi->volumes[i]);
ubi               473 drivers/mtd/ubi/build.c 				ubi_err(ubi, "cannot add volume %d", i);
ubi               481 drivers/mtd/ubi/build.c 	kill_volumes(ubi);
ubi               482 drivers/mtd/ubi/build.c 	cdev_device_del(&ubi->cdev, &ubi->dev);
ubi               484 drivers/mtd/ubi/build.c 	unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
ubi               485 drivers/mtd/ubi/build.c 	ubi_err(ubi, "cannot initialize UBI %s, error %d",
ubi               486 drivers/mtd/ubi/build.c 		ubi->ubi_name, err);
ubi               498 drivers/mtd/ubi/build.c static void uif_close(struct ubi_device *ubi)
ubi               500 drivers/mtd/ubi/build.c 	kill_volumes(ubi);
ubi               501 drivers/mtd/ubi/build.c 	cdev_device_del(&ubi->cdev, &ubi->dev);
ubi               502 drivers/mtd/ubi/build.c 	unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
ubi               509 drivers/mtd/ubi/build.c void ubi_free_internal_volumes(struct ubi_device *ubi)
ubi               513 drivers/mtd/ubi/build.c 	for (i = ubi->vtbl_slots;
ubi               514 drivers/mtd/ubi/build.c 	     i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
ubi               515 drivers/mtd/ubi/build.c 		ubi_eba_replace_table(ubi->volumes[i], NULL);
ubi               516 drivers/mtd/ubi/build.c 		ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
ubi               517 drivers/mtd/ubi/build.c 		kfree(ubi->volumes[i]);
ubi               521 drivers/mtd/ubi/build.c static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
ubi               532 drivers/mtd/ubi/build.c 		limit = mtd_max_bad_blocks(ubi->mtd, 0, ubi->mtd->size);
ubi               547 drivers/mtd/ubi/build.c 	device_size = mtd_get_device_size(ubi->mtd);
ubi               548 drivers/mtd/ubi/build.c 	device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
ubi               574 drivers/mtd/ubi/build.c static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi               579 drivers/mtd/ubi/build.c 	if (ubi->mtd->numeraseregions != 0) {
ubi               589 drivers/mtd/ubi/build.c 		ubi_err(ubi, "multiple regions, not implemented");
ubi               593 drivers/mtd/ubi/build.c 	if (ubi->vid_hdr_offset < 0)
ubi               601 drivers/mtd/ubi/build.c 	ubi->peb_size   = ubi->mtd->erasesize;
ubi               602 drivers/mtd/ubi/build.c 	ubi->peb_count  = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
ubi               603 drivers/mtd/ubi/build.c 	ubi->flash_size = ubi->mtd->size;
ubi               605 drivers/mtd/ubi/build.c 	if (mtd_can_have_bb(ubi->mtd)) {
ubi               606 drivers/mtd/ubi/build.c 		ubi->bad_allowed = 1;
ubi               607 drivers/mtd/ubi/build.c 		ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
ubi               610 drivers/mtd/ubi/build.c 	if (ubi->mtd->type == MTD_NORFLASH) {
ubi               611 drivers/mtd/ubi/build.c 		ubi_assert(ubi->mtd->writesize == 1);
ubi               612 drivers/mtd/ubi/build.c 		ubi->nor_flash = 1;
ubi               615 drivers/mtd/ubi/build.c 	ubi->min_io_size = ubi->mtd->writesize;
ubi               616 drivers/mtd/ubi/build.c 	ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
ubi               623 drivers/mtd/ubi/build.c 	if (!is_power_of_2(ubi->min_io_size)) {
ubi               624 drivers/mtd/ubi/build.c 		ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
ubi               625 drivers/mtd/ubi/build.c 			ubi->min_io_size);
ubi               629 drivers/mtd/ubi/build.c 	ubi_assert(ubi->hdrs_min_io_size > 0);
ubi               630 drivers/mtd/ubi/build.c 	ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
ubi               631 drivers/mtd/ubi/build.c 	ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
ubi               633 drivers/mtd/ubi/build.c 	ubi->max_write_size = ubi->mtd->writebufsize;
ubi               638 drivers/mtd/ubi/build.c 	if (ubi->max_write_size < ubi->min_io_size ||
ubi               639 drivers/mtd/ubi/build.c 	    ubi->max_write_size % ubi->min_io_size ||
ubi               640 drivers/mtd/ubi/build.c 	    !is_power_of_2(ubi->max_write_size)) {
ubi               641 drivers/mtd/ubi/build.c 		ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
ubi               642 drivers/mtd/ubi/build.c 			ubi->max_write_size, ubi->min_io_size);
ubi               647 drivers/mtd/ubi/build.c 	ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi               648 drivers/mtd/ubi/build.c 	ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
ubi               650 drivers/mtd/ubi/build.c 	dbg_gen("min_io_size      %d", ubi->min_io_size);
ubi               651 drivers/mtd/ubi/build.c 	dbg_gen("max_write_size   %d", ubi->max_write_size);
ubi               652 drivers/mtd/ubi/build.c 	dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
ubi               653 drivers/mtd/ubi/build.c 	dbg_gen("ec_hdr_alsize    %d", ubi->ec_hdr_alsize);
ubi               654 drivers/mtd/ubi/build.c 	dbg_gen("vid_hdr_alsize   %d", ubi->vid_hdr_alsize);
ubi               656 drivers/mtd/ubi/build.c 	if (ubi->vid_hdr_offset == 0)
ubi               658 drivers/mtd/ubi/build.c 		ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
ubi               659 drivers/mtd/ubi/build.c 				      ubi->ec_hdr_alsize;
ubi               661 drivers/mtd/ubi/build.c 		ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
ubi               662 drivers/mtd/ubi/build.c 						~(ubi->hdrs_min_io_size - 1);
ubi               663 drivers/mtd/ubi/build.c 		ubi->vid_hdr_shift = ubi->vid_hdr_offset -
ubi               664 drivers/mtd/ubi/build.c 						ubi->vid_hdr_aloffset;
ubi               668 drivers/mtd/ubi/build.c 	ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi               669 drivers/mtd/ubi/build.c 	ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
ubi               671 drivers/mtd/ubi/build.c 	dbg_gen("vid_hdr_offset   %d", ubi->vid_hdr_offset);
ubi               672 drivers/mtd/ubi/build.c 	dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
ubi               673 drivers/mtd/ubi/build.c 	dbg_gen("vid_hdr_shift    %d", ubi->vid_hdr_shift);
ubi               674 drivers/mtd/ubi/build.c 	dbg_gen("leb_start        %d", ubi->leb_start);
ubi               677 drivers/mtd/ubi/build.c 	if (ubi->vid_hdr_shift % 4) {
ubi               678 drivers/mtd/ubi/build.c 		ubi_err(ubi, "unaligned VID header shift %d",
ubi               679 drivers/mtd/ubi/build.c 			ubi->vid_hdr_shift);
ubi               684 drivers/mtd/ubi/build.c 	if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
ubi               685 drivers/mtd/ubi/build.c 	    ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
ubi               686 drivers/mtd/ubi/build.c 	    ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
ubi               687 drivers/mtd/ubi/build.c 	    ubi->leb_start & (ubi->min_io_size - 1)) {
ubi               688 drivers/mtd/ubi/build.c 		ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
ubi               689 drivers/mtd/ubi/build.c 			ubi->vid_hdr_offset, ubi->leb_start);
ubi               697 drivers/mtd/ubi/build.c 	ubi->max_erroneous = ubi->peb_count / 10;
ubi               698 drivers/mtd/ubi/build.c 	if (ubi->max_erroneous < 16)
ubi               699 drivers/mtd/ubi/build.c 		ubi->max_erroneous = 16;
ubi               700 drivers/mtd/ubi/build.c 	dbg_gen("max_erroneous    %d", ubi->max_erroneous);
ubi               707 drivers/mtd/ubi/build.c 	if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
ubi               708 drivers/mtd/ubi/build.c 		ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
ubi               709 drivers/mtd/ubi/build.c 		ubi->ro_mode = 1;
ubi               712 drivers/mtd/ubi/build.c 	ubi->leb_size = ubi->peb_size - ubi->leb_start;
ubi               714 drivers/mtd/ubi/build.c 	if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
ubi               715 drivers/mtd/ubi/build.c 		ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
ubi               716 drivers/mtd/ubi/build.c 			ubi->mtd->index);
ubi               717 drivers/mtd/ubi/build.c 		ubi->ro_mode = 1;
ubi               741 drivers/mtd/ubi/build.c static int autoresize(struct ubi_device *ubi, int vol_id)
ubi               744 drivers/mtd/ubi/build.c 	struct ubi_volume *vol = ubi->volumes[vol_id];
ubi               747 drivers/mtd/ubi/build.c 	if (ubi->ro_mode) {
ubi               748 drivers/mtd/ubi/build.c 		ubi_warn(ubi, "skip auto-resize because of R/O mode");
ubi               757 drivers/mtd/ubi/build.c 	ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
ubi               759 drivers/mtd/ubi/build.c 	if (ubi->avail_pebs == 0) {
ubi               766 drivers/mtd/ubi/build.c 		vtbl_rec = ubi->vtbl[vol_id];
ubi               767 drivers/mtd/ubi/build.c 		err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
ubi               769 drivers/mtd/ubi/build.c 			ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
ubi               774 drivers/mtd/ubi/build.c 					old_reserved_pebs + ubi->avail_pebs);
ubi               776 drivers/mtd/ubi/build.c 			ubi_err(ubi, "cannot auto-resize volume %d",
ubi               783 drivers/mtd/ubi/build.c 	ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
ubi               807 drivers/mtd/ubi/build.c 	struct ubi_device *ubi;
ubi               823 drivers/mtd/ubi/build.c 		ubi = ubi_devices[i];
ubi               824 drivers/mtd/ubi/build.c 		if (ubi && mtd->index == ubi->mtd->index) {
ubi               877 drivers/mtd/ubi/build.c 	ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
ubi               878 drivers/mtd/ubi/build.c 	if (!ubi)
ubi               881 drivers/mtd/ubi/build.c 	device_initialize(&ubi->dev);
ubi               882 drivers/mtd/ubi/build.c 	ubi->dev.release = dev_release;
ubi               883 drivers/mtd/ubi/build.c 	ubi->dev.class = &ubi_class;
ubi               884 drivers/mtd/ubi/build.c 	ubi->dev.groups = ubi_dev_groups;
ubi               886 drivers/mtd/ubi/build.c 	ubi->mtd = mtd;
ubi               887 drivers/mtd/ubi/build.c 	ubi->ubi_num = ubi_num;
ubi               888 drivers/mtd/ubi/build.c 	ubi->vid_hdr_offset = vid_hdr_offset;
ubi               889 drivers/mtd/ubi/build.c 	ubi->autoresize_vol_id = -1;
ubi               892 drivers/mtd/ubi/build.c 	ubi->fm_pool.used = ubi->fm_pool.size = 0;
ubi               893 drivers/mtd/ubi/build.c 	ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
ubi               899 drivers/mtd/ubi/build.c 	ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
ubi               900 drivers/mtd/ubi/build.c 		ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
ubi               901 drivers/mtd/ubi/build.c 	ubi->fm_pool.max_size = max(ubi->fm_pool.max_size,
ubi               904 drivers/mtd/ubi/build.c 	ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
ubi               905 drivers/mtd/ubi/build.c 	ubi->fm_disabled = !fm_autoconvert;
ubi               907 drivers/mtd/ubi/build.c 		ubi_enable_dbg_chk_fastmap(ubi);
ubi               909 drivers/mtd/ubi/build.c 	if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
ubi               911 drivers/mtd/ubi/build.c 		ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
ubi               913 drivers/mtd/ubi/build.c 		ubi->fm_disabled = 1;
ubi               916 drivers/mtd/ubi/build.c 	ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
ubi               917 drivers/mtd/ubi/build.c 	ubi_msg(ubi, "default fastmap WL pool size: %d",
ubi               918 drivers/mtd/ubi/build.c 		ubi->fm_wl_pool.max_size);
ubi               920 drivers/mtd/ubi/build.c 	ubi->fm_disabled = 1;
ubi               922 drivers/mtd/ubi/build.c 	mutex_init(&ubi->buf_mutex);
ubi               923 drivers/mtd/ubi/build.c 	mutex_init(&ubi->ckvol_mutex);
ubi               924 drivers/mtd/ubi/build.c 	mutex_init(&ubi->device_mutex);
ubi               925 drivers/mtd/ubi/build.c 	spin_lock_init(&ubi->volumes_lock);
ubi               926 drivers/mtd/ubi/build.c 	init_rwsem(&ubi->fm_protect);
ubi               927 drivers/mtd/ubi/build.c 	init_rwsem(&ubi->fm_eba_sem);
ubi               929 drivers/mtd/ubi/build.c 	ubi_msg(ubi, "attaching mtd%d", mtd->index);
ubi               931 drivers/mtd/ubi/build.c 	err = io_init(ubi, max_beb_per1024);
ubi               936 drivers/mtd/ubi/build.c 	ubi->peb_buf = vmalloc(ubi->peb_size);
ubi               937 drivers/mtd/ubi/build.c 	if (!ubi->peb_buf)
ubi               941 drivers/mtd/ubi/build.c 	ubi->fm_size = ubi_calc_fm_size(ubi);
ubi               942 drivers/mtd/ubi/build.c 	ubi->fm_buf = vzalloc(ubi->fm_size);
ubi               943 drivers/mtd/ubi/build.c 	if (!ubi->fm_buf)
ubi               946 drivers/mtd/ubi/build.c 	err = ubi_attach(ubi, 0);
ubi               948 drivers/mtd/ubi/build.c 		ubi_err(ubi, "failed to attach mtd%d, error %d",
ubi               953 drivers/mtd/ubi/build.c 	if (ubi->autoresize_vol_id != -1) {
ubi               954 drivers/mtd/ubi/build.c 		err = autoresize(ubi, ubi->autoresize_vol_id);
ubi               960 drivers/mtd/ubi/build.c 	ubi_devices[ubi_num] = ubi;
ubi               962 drivers/mtd/ubi/build.c 	err = uif_init(ubi);
ubi               966 drivers/mtd/ubi/build.c 	err = ubi_debugfs_init_dev(ubi);
ubi               970 drivers/mtd/ubi/build.c 	ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
ubi               971 drivers/mtd/ubi/build.c 	if (IS_ERR(ubi->bgt_thread)) {
ubi               972 drivers/mtd/ubi/build.c 		err = PTR_ERR(ubi->bgt_thread);
ubi               973 drivers/mtd/ubi/build.c 		ubi_err(ubi, "cannot spawn \"%s\", error %d",
ubi               974 drivers/mtd/ubi/build.c 			ubi->bgt_name, err);
ubi               978 drivers/mtd/ubi/build.c 	ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
ubi               979 drivers/mtd/ubi/build.c 		mtd->index, mtd->name, ubi->flash_size >> 20);
ubi               980 drivers/mtd/ubi/build.c 	ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
ubi               981 drivers/mtd/ubi/build.c 		ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
ubi               982 drivers/mtd/ubi/build.c 	ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
ubi               983 drivers/mtd/ubi/build.c 		ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
ubi               984 drivers/mtd/ubi/build.c 	ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
ubi               985 drivers/mtd/ubi/build.c 		ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
ubi               986 drivers/mtd/ubi/build.c 	ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
ubi               987 drivers/mtd/ubi/build.c 		ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
ubi               988 drivers/mtd/ubi/build.c 	ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
ubi               989 drivers/mtd/ubi/build.c 		ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
ubi               990 drivers/mtd/ubi/build.c 		ubi->vtbl_slots);
ubi               991 drivers/mtd/ubi/build.c 	ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
ubi               992 drivers/mtd/ubi/build.c 		ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
ubi               993 drivers/mtd/ubi/build.c 		ubi->image_seq);
ubi               994 drivers/mtd/ubi/build.c 	ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
ubi               995 drivers/mtd/ubi/build.c 		ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
ubi              1001 drivers/mtd/ubi/build.c 	spin_lock(&ubi->wl_lock);
ubi              1002 drivers/mtd/ubi/build.c 	ubi->thread_enabled = 1;
ubi              1003 drivers/mtd/ubi/build.c 	wake_up_process(ubi->bgt_thread);
ubi              1004 drivers/mtd/ubi/build.c 	spin_unlock(&ubi->wl_lock);
ubi              1006 drivers/mtd/ubi/build.c 	ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
ubi              1010 drivers/mtd/ubi/build.c 	ubi_debugfs_exit_dev(ubi);
ubi              1012 drivers/mtd/ubi/build.c 	uif_close(ubi);
ubi              1015 drivers/mtd/ubi/build.c 	ubi_wl_close(ubi);
ubi              1016 drivers/mtd/ubi/build.c 	ubi_free_internal_volumes(ubi);
ubi              1017 drivers/mtd/ubi/build.c 	vfree(ubi->vtbl);
ubi              1019 drivers/mtd/ubi/build.c 	vfree(ubi->peb_buf);
ubi              1020 drivers/mtd/ubi/build.c 	vfree(ubi->fm_buf);
ubi              1021 drivers/mtd/ubi/build.c 	put_device(&ubi->dev);
ubi              1040 drivers/mtd/ubi/build.c 	struct ubi_device *ubi;
ubi              1045 drivers/mtd/ubi/build.c 	ubi = ubi_get_device(ubi_num);
ubi              1046 drivers/mtd/ubi/build.c 	if (!ubi)
ubi              1050 drivers/mtd/ubi/build.c 	put_device(&ubi->dev);
ubi              1051 drivers/mtd/ubi/build.c 	ubi->ref_count -= 1;
ubi              1052 drivers/mtd/ubi/build.c 	if (ubi->ref_count) {
ubi              1058 drivers/mtd/ubi/build.c 		ubi_err(ubi, "%s reference count %d, destroy anyway",
ubi              1059 drivers/mtd/ubi/build.c 			ubi->ubi_name, ubi->ref_count);
ubi              1064 drivers/mtd/ubi/build.c 	ubi_assert(ubi_num == ubi->ubi_num);
ubi              1065 drivers/mtd/ubi/build.c 	ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
ubi              1066 drivers/mtd/ubi/build.c 	ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
ubi              1072 drivers/mtd/ubi/build.c 	if (!ubi_dbg_chk_fastmap(ubi))
ubi              1073 drivers/mtd/ubi/build.c 		ubi_update_fastmap(ubi);
ubi              1079 drivers/mtd/ubi/build.c 	if (ubi->bgt_thread)
ubi              1080 drivers/mtd/ubi/build.c 		kthread_stop(ubi->bgt_thread);
ubi              1083 drivers/mtd/ubi/build.c 	cancel_work_sync(&ubi->fm_work);
ubi              1085 drivers/mtd/ubi/build.c 	ubi_debugfs_exit_dev(ubi);
ubi              1086 drivers/mtd/ubi/build.c 	uif_close(ubi);
ubi              1088 drivers/mtd/ubi/build.c 	ubi_wl_close(ubi);
ubi              1089 drivers/mtd/ubi/build.c 	ubi_free_internal_volumes(ubi);
ubi              1090 drivers/mtd/ubi/build.c 	vfree(ubi->vtbl);
ubi              1091 drivers/mtd/ubi/build.c 	vfree(ubi->peb_buf);
ubi              1092 drivers/mtd/ubi/build.c 	vfree(ubi->fm_buf);
ubi              1093 drivers/mtd/ubi/build.c 	ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
ubi              1094 drivers/mtd/ubi/build.c 	put_mtd_device(ubi->mtd);
ubi              1095 drivers/mtd/ubi/build.c 	put_device(&ubi->dev);
ubi                49 drivers/mtd/ubi/cdev.c 	spin_lock(&vol->ubi->volumes_lock);
ubi                53 drivers/mtd/ubi/cdev.c 		ubi_err(vol->ubi, "%d users for volume %d", users, vol->vol_id);
ubi                61 drivers/mtd/ubi/cdev.c 	spin_unlock(&vol->ubi->volumes_lock);
ubi                75 drivers/mtd/ubi/cdev.c 	spin_lock(&vol->ubi->volumes_lock);
ubi                87 drivers/mtd/ubi/cdev.c 	spin_unlock(&vol->ubi->volumes_lock);
ubi               123 drivers/mtd/ubi/cdev.c 		vol->ubi->ubi_num, vol->vol_id, desc->mode);
ubi               126 drivers/mtd/ubi/cdev.c 		ubi_warn(vol->ubi, "update of volume %d not finished, volume is damaged",
ubi               133 drivers/mtd/ubi/cdev.c 			vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num,
ubi               150 drivers/mtd/ubi/cdev.c 		ubi_err(vol->ubi, "updating");
ubi               161 drivers/mtd/ubi/cdev.c 	struct ubi_device *ubi = desc->vol->ubi;
ubi               165 drivers/mtd/ubi/cdev.c 	err = ubi_sync(ubi->ubi_num);
ubi               176 drivers/mtd/ubi/cdev.c 	struct ubi_device *ubi = vol->ubi;
ubi               185 drivers/mtd/ubi/cdev.c 		ubi_err(vol->ubi, "updating");
ubi               189 drivers/mtd/ubi/cdev.c 		ubi_err(vol->ubi, "damaged volume, update marker is set");
ubi               203 drivers/mtd/ubi/cdev.c 		tbuf_size = ALIGN(count, ubi->min_io_size);
ubi               217 drivers/mtd/ubi/cdev.c 		err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
ubi               253 drivers/mtd/ubi/cdev.c 	struct ubi_device *ubi = vol->ubi;
ubi               268 drivers/mtd/ubi/cdev.c 	if (off & (ubi->min_io_size - 1)) {
ubi               269 drivers/mtd/ubi/cdev.c 		ubi_err(ubi, "unaligned position");
ubi               277 drivers/mtd/ubi/cdev.c 	if (count & (ubi->min_io_size - 1)) {
ubi               278 drivers/mtd/ubi/cdev.c 		ubi_err(ubi, "unaligned write length");
ubi               284 drivers/mtd/ubi/cdev.c 		tbuf_size = ALIGN(count, ubi->min_io_size);
ubi               303 drivers/mtd/ubi/cdev.c 		err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len);
ubi               329 drivers/mtd/ubi/cdev.c 	struct ubi_device *ubi = vol->ubi;
ubi               335 drivers/mtd/ubi/cdev.c 		err = ubi_more_update_data(ubi, vol, buf, count);
ubi               337 drivers/mtd/ubi/cdev.c 		err = ubi_more_leb_change_data(ubi, vol, buf, count);
ubi               340 drivers/mtd/ubi/cdev.c 		ubi_err(ubi, "cannot accept more %zd bytes of data, error %d",
ubi               361 drivers/mtd/ubi/cdev.c 		err = ubi_check_volume(ubi, vol->vol_id);
ubi               366 drivers/mtd/ubi/cdev.c 			ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
ubi               367 drivers/mtd/ubi/cdev.c 				 vol->vol_id, ubi->ubi_num);
ubi               371 drivers/mtd/ubi/cdev.c 		ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
ubi               384 drivers/mtd/ubi/cdev.c 	struct ubi_device *ubi = vol->ubi;
ubi               420 drivers/mtd/ubi/cdev.c 		err = ubi_start_update(ubi, vol, bytes);
ubi               422 drivers/mtd/ubi/cdev.c 			ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
ubi               456 drivers/mtd/ubi/cdev.c 		err = ubi_start_leb_change(ubi, vol, &req);
ubi               485 drivers/mtd/ubi/cdev.c 		err = ubi_eba_unmap_leb(ubi, vol, lnum);
ubi               489 drivers/mtd/ubi/cdev.c 		err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
ubi               548 drivers/mtd/ubi/cdev.c 			mutex_lock(&ubi->device_mutex);
ubi               550 drivers/mtd/ubi/cdev.c 			mutex_unlock(&ubi->device_mutex);
ubi               593 drivers/mtd/ubi/cdev.c static int verify_mkvol_req(const struct ubi_device *ubi,
ubi               602 drivers/mtd/ubi/cdev.c 	if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) &&
ubi               623 drivers/mtd/ubi/cdev.c 	if (req->alignment > ubi->leb_size)
ubi               626 drivers/mtd/ubi/cdev.c 	n = req->alignment & (ubi->min_io_size - 1);
ubi               645 drivers/mtd/ubi/cdev.c 	ubi_err(ubi, "bad volume creation request");
ubi               657 drivers/mtd/ubi/cdev.c static int verify_rsvol_req(const struct ubi_device *ubi,
ubi               663 drivers/mtd/ubi/cdev.c 	if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots)
ubi               679 drivers/mtd/ubi/cdev.c static int rename_volumes(struct ubi_device *ubi,
ubi               695 drivers/mtd/ubi/cdev.c 		    req->ents[i].vol_id >= ubi->vtbl_slots)
ubi               711 drivers/mtd/ubi/cdev.c 				ubi_err(ubi, "duplicated volume id %d",
ubi               716 drivers/mtd/ubi/cdev.c 				ubi_err(ubi, "duplicated volume name \"%s\"",
ubi               736 drivers/mtd/ubi/cdev.c 		re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_METAONLY);
ubi               739 drivers/mtd/ubi/cdev.c 			ubi_err(ubi, "cannot open volume %d, error %d",
ubi               790 drivers/mtd/ubi/cdev.c 		desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
ubi               799 drivers/mtd/ubi/cdev.c 			ubi_err(ubi, "cannot open volume \"%s\", error %d",
ubi               818 drivers/mtd/ubi/cdev.c 	mutex_lock(&ubi->device_mutex);
ubi               819 drivers/mtd/ubi/cdev.c 	err = ubi_rename_volumes(ubi, &rename_list);
ubi               820 drivers/mtd/ubi/cdev.c 	mutex_unlock(&ubi->device_mutex);
ubi               835 drivers/mtd/ubi/cdev.c 	struct ubi_device *ubi;
ubi               842 drivers/mtd/ubi/cdev.c 	ubi = ubi_get_by_major(imajor(file->f_mapping->host));
ubi               843 drivers/mtd/ubi/cdev.c 	if (!ubi)
ubi               859 drivers/mtd/ubi/cdev.c 		err = verify_mkvol_req(ubi, &req);
ubi               863 drivers/mtd/ubi/cdev.c 		mutex_lock(&ubi->device_mutex);
ubi               864 drivers/mtd/ubi/cdev.c 		err = ubi_create_volume(ubi, &req);
ubi               865 drivers/mtd/ubi/cdev.c 		mutex_unlock(&ubi->device_mutex);
ubi               888 drivers/mtd/ubi/cdev.c 		desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
ubi               894 drivers/mtd/ubi/cdev.c 		mutex_lock(&ubi->device_mutex);
ubi               896 drivers/mtd/ubi/cdev.c 		mutex_unlock(&ubi->device_mutex);
ubi               920 drivers/mtd/ubi/cdev.c 		err = verify_rsvol_req(ubi, &req);
ubi               924 drivers/mtd/ubi/cdev.c 		desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE);
ubi               933 drivers/mtd/ubi/cdev.c 		mutex_lock(&ubi->device_mutex);
ubi               935 drivers/mtd/ubi/cdev.c 		mutex_unlock(&ubi->device_mutex);
ubi               959 drivers/mtd/ubi/cdev.c 		err = rename_volumes(ubi, req);
ubi               975 drivers/mtd/ubi/cdev.c 		err = ubi_bitflip_check(ubi, pnum, 0);
ubi               990 drivers/mtd/ubi/cdev.c 		err = ubi_bitflip_check(ubi, pnum, 1);
ubi               999 drivers/mtd/ubi/cdev.c 	ubi_put_device(ubi);
ubi                22 drivers/mtd/ubi/debug.c void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
ubi                27 drivers/mtd/ubi/debug.c 	loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
ubi                32 drivers/mtd/ubi/debug.c 	err = mtd_read(ubi->mtd, addr, len, &read, buf);
ubi                34 drivers/mtd/ubi/debug.c 		ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
ubi                39 drivers/mtd/ubi/debug.c 	ubi_msg(ubi, "dumping %d bytes of data from PEB %d, offset %d",
ubi               252 drivers/mtd/ubi/debug.c 	struct ubi_device *ubi;
ubi               257 drivers/mtd/ubi/debug.c 	ubi = ubi_get_device(ubi_num);
ubi               258 drivers/mtd/ubi/debug.c 	if (!ubi)
ubi               260 drivers/mtd/ubi/debug.c 	d = &ubi->dbg;
ubi               305 drivers/mtd/ubi/debug.c 	ubi_put_device(ubi);
ubi               315 drivers/mtd/ubi/debug.c 	struct ubi_device *ubi;
ubi               321 drivers/mtd/ubi/debug.c 	ubi = ubi_get_device(ubi_num);
ubi               322 drivers/mtd/ubi/debug.c 	if (!ubi)
ubi               324 drivers/mtd/ubi/debug.c 	d = &ubi->dbg;
ubi               373 drivers/mtd/ubi/debug.c 	ubi_put_device(ubi);
ubi               393 drivers/mtd/ubi/debug.c 	struct ubi_device *ubi = s->private;
ubi               395 drivers/mtd/ubi/debug.c 	if (*pos < ubi->peb_count)
ubi               406 drivers/mtd/ubi/debug.c 	struct ubi_device *ubi = s->private;
ubi               410 drivers/mtd/ubi/debug.c 	if (*pos < ubi->peb_count)
ubi               422 drivers/mtd/ubi/debug.c 	struct ubi_device *ubi = s->private;
ubi               432 drivers/mtd/ubi/debug.c 	err = ubi_io_is_bad(ubi, *block_number);
ubi               436 drivers/mtd/ubi/debug.c 	spin_lock(&ubi->wl_lock);
ubi               438 drivers/mtd/ubi/debug.c 	wl = ubi->lookuptbl[*block_number];
ubi               442 drivers/mtd/ubi/debug.c 	spin_unlock(&ubi->wl_lock);
ubi               480 drivers/mtd/ubi/debug.c 	struct ubi_device *ubi = s->private;
ubi               482 drivers/mtd/ubi/debug.c 	ubi_put_device(ubi);
ubi               502 drivers/mtd/ubi/debug.c int ubi_debugfs_init_dev(struct ubi_device *ubi)
ubi               505 drivers/mtd/ubi/debug.c 	unsigned long ubi_num = ubi->ubi_num;
ubi               508 drivers/mtd/ubi/debug.c 	struct ubi_debug_info *d = &ubi->dbg;
ubi               514 drivers/mtd/ubi/debug.c 		     ubi->ubi_num);
ubi               603 drivers/mtd/ubi/debug.c 	ubi_err(ubi, "cannot create \"%s\" debugfs file or directory, error %d\n",
ubi               612 drivers/mtd/ubi/debug.c void ubi_debugfs_exit_dev(struct ubi_device *ubi)
ubi               615 drivers/mtd/ubi/debug.c 		debugfs_remove_recursive(ubi->dbg.dfs_dir);
ubi               625 drivers/mtd/ubi/debug.c int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
ubi               629 drivers/mtd/ubi/debug.c 	if ((ubi->dbg.emulate_power_cut & caller) == 0)
ubi               632 drivers/mtd/ubi/debug.c 	if (ubi->dbg.power_cut_counter == 0) {
ubi               633 drivers/mtd/ubi/debug.c 		ubi->dbg.power_cut_counter = ubi->dbg.power_cut_min;
ubi               635 drivers/mtd/ubi/debug.c 		if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) {
ubi               636 drivers/mtd/ubi/debug.c 			range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
ubi               637 drivers/mtd/ubi/debug.c 			ubi->dbg.power_cut_counter += prandom_u32() % range;
ubi               642 drivers/mtd/ubi/debug.c 	ubi->dbg.power_cut_counter--;
ubi               643 drivers/mtd/ubi/debug.c 	if (ubi->dbg.power_cut_counter)
ubi               646 drivers/mtd/ubi/debug.c 	ubi_msg(ubi, "XXXXXXXXXXXXXXX emulating a power cut XXXXXXXXXXXXXXXX");
ubi               647 drivers/mtd/ubi/debug.c 	ubi_ro_mode(ubi);
ubi                11 drivers/mtd/ubi/debug.h void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
ubi                48 drivers/mtd/ubi/debug.h int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
ubi                52 drivers/mtd/ubi/debug.h int ubi_debugfs_init_dev(struct ubi_device *ubi);
ubi                53 drivers/mtd/ubi/debug.h void ubi_debugfs_exit_dev(struct ubi_device *ubi);
ubi                62 drivers/mtd/ubi/debug.h static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
ubi                64 drivers/mtd/ubi/debug.h 	return ubi->dbg.disable_bgt;
ubi                73 drivers/mtd/ubi/debug.h static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
ubi                75 drivers/mtd/ubi/debug.h 	if (ubi->dbg.emulate_bitflips)
ubi                87 drivers/mtd/ubi/debug.h static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
ubi                89 drivers/mtd/ubi/debug.h 	if (ubi->dbg.emulate_io_failures)
ubi               101 drivers/mtd/ubi/debug.h static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
ubi               103 drivers/mtd/ubi/debug.h 	if (ubi->dbg.emulate_io_failures)
ubi               108 drivers/mtd/ubi/debug.h static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
ubi               110 drivers/mtd/ubi/debug.h 	return ubi->dbg.chk_io;
ubi               113 drivers/mtd/ubi/debug.h static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
ubi               115 drivers/mtd/ubi/debug.h 	return ubi->dbg.chk_gen;
ubi               118 drivers/mtd/ubi/debug.h static inline int ubi_dbg_chk_fastmap(const struct ubi_device *ubi)
ubi               120 drivers/mtd/ubi/debug.h 	return ubi->dbg.chk_fastmap;
ubi               123 drivers/mtd/ubi/debug.h static inline void ubi_enable_dbg_chk_fastmap(struct ubi_device *ubi)
ubi               125 drivers/mtd/ubi/debug.h 	ubi->dbg.chk_fastmap = 1;
ubi               128 drivers/mtd/ubi/debug.h int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
ubi                71 drivers/mtd/ubi/eba.c unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
ubi                75 drivers/mtd/ubi/eba.c 	spin_lock(&ubi->ltree_lock);
ubi                76 drivers/mtd/ubi/eba.c 	sqnum = ubi->global_sqnum++;
ubi                77 drivers/mtd/ubi/eba.c 	spin_unlock(&ubi->ltree_lock);
ubi                90 drivers/mtd/ubi/eba.c static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
ubi               211 drivers/mtd/ubi/eba.c static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
ubi               216 drivers/mtd/ubi/eba.c 	p = ubi->ltree.rb_node;
ubi               250 drivers/mtd/ubi/eba.c static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
ubi               264 drivers/mtd/ubi/eba.c 	spin_lock(&ubi->ltree_lock);
ubi               265 drivers/mtd/ubi/eba.c 	le1 = ltree_lookup(ubi, vol_id, lnum);
ubi               283 drivers/mtd/ubi/eba.c 		p = &ubi->ltree.rb_node;
ubi               302 drivers/mtd/ubi/eba.c 		rb_insert_color(&le->rb, &ubi->ltree);
ubi               305 drivers/mtd/ubi/eba.c 	spin_unlock(&ubi->ltree_lock);
ubi               320 drivers/mtd/ubi/eba.c static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
ubi               324 drivers/mtd/ubi/eba.c 	le = ltree_add_entry(ubi, vol_id, lnum);
ubi               337 drivers/mtd/ubi/eba.c static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
ubi               341 drivers/mtd/ubi/eba.c 	spin_lock(&ubi->ltree_lock);
ubi               342 drivers/mtd/ubi/eba.c 	le = ltree_lookup(ubi, vol_id, lnum);
ubi               347 drivers/mtd/ubi/eba.c 		rb_erase(&le->rb, &ubi->ltree);
ubi               350 drivers/mtd/ubi/eba.c 	spin_unlock(&ubi->ltree_lock);
ubi               362 drivers/mtd/ubi/eba.c static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
ubi               366 drivers/mtd/ubi/eba.c 	le = ltree_add_entry(ubi, vol_id, lnum);
ubi               384 drivers/mtd/ubi/eba.c static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
ubi               388 drivers/mtd/ubi/eba.c 	le = ltree_add_entry(ubi, vol_id, lnum);
ubi               395 drivers/mtd/ubi/eba.c 	spin_lock(&ubi->ltree_lock);
ubi               399 drivers/mtd/ubi/eba.c 		rb_erase(&le->rb, &ubi->ltree);
ubi               402 drivers/mtd/ubi/eba.c 	spin_unlock(&ubi->ltree_lock);
ubi               413 drivers/mtd/ubi/eba.c static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
ubi               417 drivers/mtd/ubi/eba.c 	spin_lock(&ubi->ltree_lock);
ubi               418 drivers/mtd/ubi/eba.c 	le = ltree_lookup(ubi, vol_id, lnum);
ubi               423 drivers/mtd/ubi/eba.c 		rb_erase(&le->rb, &ubi->ltree);
ubi               426 drivers/mtd/ubi/eba.c 	spin_unlock(&ubi->ltree_lock);
ubi               451 drivers/mtd/ubi/eba.c int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               456 drivers/mtd/ubi/eba.c 	if (ubi->ro_mode)
ubi               459 drivers/mtd/ubi/eba.c 	err = leb_write_lock(ubi, vol_id, lnum);
ubi               470 drivers/mtd/ubi/eba.c 	down_read(&ubi->fm_eba_sem);
ubi               472 drivers/mtd/ubi/eba.c 	up_read(&ubi->fm_eba_sem);
ubi               473 drivers/mtd/ubi/eba.c 	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
ubi               476 drivers/mtd/ubi/eba.c 	leb_write_unlock(ubi, vol_id, lnum);
ubi               498 drivers/mtd/ubi/eba.c static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
ubi               505 drivers/mtd/ubi/eba.c 	if (!ubi->fast_attach)
ubi               511 drivers/mtd/ubi/eba.c 	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
ubi               515 drivers/mtd/ubi/eba.c 	err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0);
ubi               532 drivers/mtd/ubi/eba.c 		down_read(&ubi->fm_eba_sem);
ubi               534 drivers/mtd/ubi/eba.c 		up_read(&ubi->fm_eba_sem);
ubi               535 drivers/mtd/ubi/eba.c 		ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
ubi               539 drivers/mtd/ubi/eba.c 		ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
ubi               553 drivers/mtd/ubi/eba.c 			ubi_err(ubi, "EBA mismatch! PEB %i is LEB %i:%i instead of LEB %i:%i",
ubi               555 drivers/mtd/ubi/eba.c 			ubi_ro_mode(ubi);
ubi               570 drivers/mtd/ubi/eba.c static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
ubi               596 drivers/mtd/ubi/eba.c int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
ubi               604 drivers/mtd/ubi/eba.c 	err = leb_read_lock(ubi, vol_id, lnum);
ubi               610 drivers/mtd/ubi/eba.c 		err = check_mapping(ubi, vol, lnum, &pnum);
ubi               623 drivers/mtd/ubi/eba.c 		leb_read_unlock(ubi, vol_id, lnum);
ubi               637 drivers/mtd/ubi/eba.c 		vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
ubi               645 drivers/mtd/ubi/eba.c 		err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
ubi               658 drivers/mtd/ubi/eba.c 					ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
ubi               675 drivers/mtd/ubi/eba.c 					if (ubi->fast_attach) {
ubi               679 drivers/mtd/ubi/eba.c 						ubi_ro_mode(ubi);
ubi               694 drivers/mtd/ubi/eba.c 	err = ubi_io_read_data(ubi, buf, pnum, offset, len);
ubi               703 drivers/mtd/ubi/eba.c 				ubi_msg(ubi, "force data checking");
ubi               714 drivers/mtd/ubi/eba.c 			ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
ubi               722 drivers/mtd/ubi/eba.c 		err = ubi_wl_scrub_peb(ubi, pnum);
ubi               724 drivers/mtd/ubi/eba.c 	leb_read_unlock(ubi, vol_id, lnum);
ubi               730 drivers/mtd/ubi/eba.c 	leb_read_unlock(ubi, vol_id, lnum);
ubi               748 drivers/mtd/ubi/eba.c int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               764 drivers/mtd/ubi/eba.c 		ret = ubi_eba_read_leb(ubi, vol, lnum,
ubi               811 drivers/mtd/ubi/eba.c 	struct ubi_device *ubi = vol->ubi;
ubi               818 drivers/mtd/ubi/eba.c 	new_pnum = ubi_wl_get_peb(ubi);
ubi               824 drivers/mtd/ubi/eba.c 	ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
ubi               827 drivers/mtd/ubi/eba.c 	err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
ubi               837 drivers/mtd/ubi/eba.c 	mutex_lock(&ubi->buf_mutex);
ubi               838 drivers/mtd/ubi/eba.c 	memset(ubi->peb_buf + offset, 0xFF, len);
ubi               842 drivers/mtd/ubi/eba.c 		err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
ubi               849 drivers/mtd/ubi/eba.c 	memcpy(ubi->peb_buf + offset, buf, len);
ubi               852 drivers/mtd/ubi/eba.c 	crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
ubi               853 drivers/mtd/ubi/eba.c 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi               857 drivers/mtd/ubi/eba.c 	err = ubi_io_write_vid_hdr(ubi, new_pnum, vidb);
ubi               861 drivers/mtd/ubi/eba.c 	err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
ubi               864 drivers/mtd/ubi/eba.c 	mutex_unlock(&ubi->buf_mutex);
ubi               870 drivers/mtd/ubi/eba.c 	up_read(&ubi->fm_eba_sem);
ubi               873 drivers/mtd/ubi/eba.c 		ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
ubi               874 drivers/mtd/ubi/eba.c 		ubi_msg(ubi, "data was successfully recovered");
ubi               880 drivers/mtd/ubi/eba.c 		ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
ubi               881 drivers/mtd/ubi/eba.c 		ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
ubi               903 drivers/mtd/ubi/eba.c static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
ubi               906 drivers/mtd/ubi/eba.c 	int err, idx = vol_id2idx(ubi, vol_id), tries;
ubi               907 drivers/mtd/ubi/eba.c 	struct ubi_volume *vol = ubi->volumes[idx];
ubi               910 drivers/mtd/ubi/eba.c 	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
ubi               922 drivers/mtd/ubi/eba.c 		ubi_msg(ubi, "try again");
ubi               949 drivers/mtd/ubi/eba.c 	struct ubi_device *ubi = vol->ubi;
ubi               952 drivers/mtd/ubi/eba.c 	pnum = ubi_wl_get_peb(ubi);
ubi               963 drivers/mtd/ubi/eba.c 	err = ubi_io_write_vid_hdr(ubi, pnum, vidb);
ubi               965 drivers/mtd/ubi/eba.c 		ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
ubi               971 drivers/mtd/ubi/eba.c 		err = ubi_io_write_data(ubi, buf, pnum, offset, len);
ubi               973 drivers/mtd/ubi/eba.c 			ubi_warn(ubi,
ubi               983 drivers/mtd/ubi/eba.c 	up_read(&ubi->fm_eba_sem);
ubi               986 drivers/mtd/ubi/eba.c 		err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
ubi               988 drivers/mtd/ubi/eba.c 		err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
ubi              1008 drivers/mtd/ubi/eba.c int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
ubi              1015 drivers/mtd/ubi/eba.c 	if (ubi->ro_mode)
ubi              1018 drivers/mtd/ubi/eba.c 	err = leb_write_lock(ubi, vol_id, lnum);
ubi              1024 drivers/mtd/ubi/eba.c 		err = check_mapping(ubi, vol, lnum, &pnum);
ubi              1033 drivers/mtd/ubi/eba.c 		err = ubi_io_write_data(ubi, buf, pnum, offset, len);
ubi              1035 drivers/mtd/ubi/eba.c 			ubi_warn(ubi, "failed to write data to PEB %d", pnum);
ubi              1036 drivers/mtd/ubi/eba.c 			if (err == -EIO && ubi->bad_allowed)
ubi              1037 drivers/mtd/ubi/eba.c 				err = recover_peb(ubi, pnum, vol_id, lnum, buf,
ubi              1048 drivers/mtd/ubi/eba.c 	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
ubi              1050 drivers/mtd/ubi/eba.c 		leb_write_unlock(ubi, vol_id, lnum);
ubi              1057 drivers/mtd/ubi/eba.c 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi              1060 drivers/mtd/ubi/eba.c 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
ubi              1065 drivers/mtd/ubi/eba.c 		if (err != -EIO || !ubi->bad_allowed)
ubi              1074 drivers/mtd/ubi/eba.c 		vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi              1075 drivers/mtd/ubi/eba.c 		ubi_msg(ubi, "try another PEB");
ubi              1082 drivers/mtd/ubi/eba.c 		ubi_ro_mode(ubi);
ubi              1084 drivers/mtd/ubi/eba.c 	leb_write_unlock(ubi, vol_id, lnum);
ubi              1111 drivers/mtd/ubi/eba.c int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
ubi              1119 drivers/mtd/ubi/eba.c 	if (ubi->ro_mode)
ubi              1124 drivers/mtd/ubi/eba.c 		len = ALIGN(data_size, ubi->min_io_size);
ubi              1126 drivers/mtd/ubi/eba.c 		ubi_assert(!(len & (ubi->min_io_size - 1)));
ubi              1128 drivers/mtd/ubi/eba.c 	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
ubi              1134 drivers/mtd/ubi/eba.c 	err = leb_write_lock(ubi, vol_id, lnum);
ubi              1138 drivers/mtd/ubi/eba.c 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi              1141 drivers/mtd/ubi/eba.c 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
ubi              1154 drivers/mtd/ubi/eba.c 		if (err != -EIO || !ubi->bad_allowed)
ubi              1157 drivers/mtd/ubi/eba.c 		vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi              1158 drivers/mtd/ubi/eba.c 		ubi_msg(ubi, "try another PEB");
ubi              1162 drivers/mtd/ubi/eba.c 		ubi_ro_mode(ubi);
ubi              1164 drivers/mtd/ubi/eba.c 	leb_write_unlock(ubi, vol_id, lnum);
ubi              1189 drivers/mtd/ubi/eba.c int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
ubi              1197 drivers/mtd/ubi/eba.c 	if (ubi->ro_mode)
ubi              1205 drivers/mtd/ubi/eba.c 		err = ubi_eba_unmap_leb(ubi, vol, lnum);
ubi              1208 drivers/mtd/ubi/eba.c 		return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
ubi              1211 drivers/mtd/ubi/eba.c 	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
ubi              1217 drivers/mtd/ubi/eba.c 	mutex_lock(&ubi->alc_mutex);
ubi              1218 drivers/mtd/ubi/eba.c 	err = leb_write_lock(ubi, vol_id, lnum);
ubi              1222 drivers/mtd/ubi/eba.c 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi              1225 drivers/mtd/ubi/eba.c 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
ubi              1238 drivers/mtd/ubi/eba.c 		if (err != -EIO || !ubi->bad_allowed)
ubi              1241 drivers/mtd/ubi/eba.c 		vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi              1242 drivers/mtd/ubi/eba.c 		ubi_msg(ubi, "try another PEB");
ubi              1251 drivers/mtd/ubi/eba.c 		ubi_ro_mode(ubi);
ubi              1253 drivers/mtd/ubi/eba.c 	leb_write_unlock(ubi, vol_id, lnum);
ubi              1256 drivers/mtd/ubi/eba.c 	mutex_unlock(&ubi->alc_mutex);
ubi              1302 drivers/mtd/ubi/eba.c int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
ubi              1310 drivers/mtd/ubi/eba.c 	ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
ubi              1319 drivers/mtd/ubi/eba.c 		aldata_size = ALIGN(data_size, ubi->min_io_size);
ubi              1322 drivers/mtd/ubi/eba.c 			    ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
ubi              1324 drivers/mtd/ubi/eba.c 	idx = vol_id2idx(ubi, vol_id);
ubi              1325 drivers/mtd/ubi/eba.c 	spin_lock(&ubi->volumes_lock);
ubi              1332 drivers/mtd/ubi/eba.c 	vol = ubi->volumes[idx];
ubi              1333 drivers/mtd/ubi/eba.c 	spin_unlock(&ubi->volumes_lock);
ubi              1355 drivers/mtd/ubi/eba.c 	err = leb_write_trylock(ubi, vol_id, lnum);
ubi              1379 drivers/mtd/ubi/eba.c 	mutex_lock(&ubi->buf_mutex);
ubi              1381 drivers/mtd/ubi/eba.c 	err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
ubi              1383 drivers/mtd/ubi/eba.c 		ubi_warn(ubi, "error %d while reading data from PEB %d",
ubi              1401 drivers/mtd/ubi/eba.c 			ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
ubi              1404 drivers/mtd/ubi/eba.c 	crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
ubi              1418 drivers/mtd/ubi/eba.c 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi              1420 drivers/mtd/ubi/eba.c 	err = ubi_io_write_vid_hdr(ubi, to, vidb);
ubi              1430 drivers/mtd/ubi/eba.c 	err = ubi_io_read_vid_hdr(ubi, to, vidb, 1);
ubi              1433 drivers/mtd/ubi/eba.c 			ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
ubi              1443 drivers/mtd/ubi/eba.c 		err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
ubi              1457 drivers/mtd/ubi/eba.c 	mutex_unlock(&ubi->buf_mutex);
ubi              1459 drivers/mtd/ubi/eba.c 	leb_write_unlock(ubi, vol_id, lnum);
ubi              1481 drivers/mtd/ubi/eba.c static void print_rsvd_warning(struct ubi_device *ubi,
ubi              1489 drivers/mtd/ubi/eba.c 		int min = ubi->beb_rsvd_level / 10;
ubi              1493 drivers/mtd/ubi/eba.c 		if (ubi->beb_rsvd_pebs > min)
ubi              1497 drivers/mtd/ubi/eba.c 	ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
ubi              1498 drivers/mtd/ubi/eba.c 		 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
ubi              1499 drivers/mtd/ubi/eba.c 	if (ubi->corr_peb_count)
ubi              1500 drivers/mtd/ubi/eba.c 		ubi_warn(ubi, "%d PEBs are corrupted and not used",
ubi              1501 drivers/mtd/ubi/eba.c 			 ubi->corr_peb_count);
ubi              1514 drivers/mtd/ubi/eba.c int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
ubi              1524 drivers/mtd/ubi/eba.c 	num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
ubi              1537 drivers/mtd/ubi/eba.c 		vol = ubi->volumes[i];
ubi              1560 drivers/mtd/ubi/eba.c 		av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
ubi              1567 drivers/mtd/ubi/eba.c 		av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
ubi              1580 drivers/mtd/ubi/eba.c 				ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
ubi              1590 drivers/mtd/ubi/eba.c 		if (!ubi->volumes[i])
ubi              1610 drivers/mtd/ubi/eba.c int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
ubi              1620 drivers/mtd/ubi/eba.c 	spin_lock_init(&ubi->ltree_lock);
ubi              1621 drivers/mtd/ubi/eba.c 	mutex_init(&ubi->alc_mutex);
ubi              1622 drivers/mtd/ubi/eba.c 	ubi->ltree = RB_ROOT;
ubi              1624 drivers/mtd/ubi/eba.c 	ubi->global_sqnum = ai->max_sqnum + 1;
ubi              1625 drivers/mtd/ubi/eba.c 	num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
ubi              1630 drivers/mtd/ubi/eba.c 		vol = ubi->volumes[i];
ubi              1644 drivers/mtd/ubi/eba.c 		av = ubi_find_av(ai, idx2vol_id(ubi, i));
ubi              1664 drivers/mtd/ubi/eba.c 	if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
ubi              1665 drivers/mtd/ubi/eba.c 		ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
ubi              1666 drivers/mtd/ubi/eba.c 			ubi->avail_pebs, EBA_RESERVED_PEBS);
ubi              1667 drivers/mtd/ubi/eba.c 		if (ubi->corr_peb_count)
ubi              1668 drivers/mtd/ubi/eba.c 			ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi              1669 drivers/mtd/ubi/eba.c 				ubi->corr_peb_count);
ubi              1673 drivers/mtd/ubi/eba.c 	ubi->avail_pebs -= EBA_RESERVED_PEBS;
ubi              1674 drivers/mtd/ubi/eba.c 	ubi->rsvd_pebs += EBA_RESERVED_PEBS;
ubi              1676 drivers/mtd/ubi/eba.c 	if (ubi->bad_allowed) {
ubi              1677 drivers/mtd/ubi/eba.c 		ubi_calculate_reserved(ubi);
ubi              1679 drivers/mtd/ubi/eba.c 		if (ubi->avail_pebs < ubi->beb_rsvd_level) {
ubi              1681 drivers/mtd/ubi/eba.c 			ubi->beb_rsvd_pebs = ubi->avail_pebs;
ubi              1682 drivers/mtd/ubi/eba.c 			print_rsvd_warning(ubi, ai);
ubi              1684 drivers/mtd/ubi/eba.c 			ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
ubi              1686 drivers/mtd/ubi/eba.c 		ubi->avail_pebs -= ubi->beb_rsvd_pebs;
ubi              1687 drivers/mtd/ubi/eba.c 		ubi->rsvd_pebs  += ubi->beb_rsvd_pebs;
ubi              1695 drivers/mtd/ubi/eba.c 		if (!ubi->volumes[i])
ubi              1697 drivers/mtd/ubi/eba.c 		ubi_eba_replace_table(ubi->volumes[i], NULL);
ubi                14 drivers/mtd/ubi/fastmap-wl.c 	struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
ubi                16 drivers/mtd/ubi/fastmap-wl.c 	ubi_update_fastmap(ubi);
ubi                17 drivers/mtd/ubi/fastmap-wl.c 	spin_lock(&ubi->wl_lock);
ubi                18 drivers/mtd/ubi/fastmap-wl.c 	ubi->fm_work_scheduled = 0;
ubi                19 drivers/mtd/ubi/fastmap-wl.c 	spin_unlock(&ubi->wl_lock);
ubi                47 drivers/mtd/ubi/fastmap-wl.c static void return_unused_pool_pebs(struct ubi_device *ubi,
ubi                54 drivers/mtd/ubi/fastmap-wl.c 		e = ubi->lookuptbl[pool->pebs[i]];
ubi                55 drivers/mtd/ubi/fastmap-wl.c 		wl_tree_add(e, &ubi->free);
ubi                56 drivers/mtd/ubi/fastmap-wl.c 		ubi->free_count++;
ubi                81 drivers/mtd/ubi/fastmap-wl.c struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
ubi                85 drivers/mtd/ubi/fastmap-wl.c 	if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
ubi                89 drivers/mtd/ubi/fastmap-wl.c 		e = find_anchor_wl_entry(&ubi->free);
ubi                91 drivers/mtd/ubi/fastmap-wl.c 		e = find_mean_wl_entry(ubi, &ubi->free);
ubi                96 drivers/mtd/ubi/fastmap-wl.c 	self_check_in_wl_tree(ubi, e, &ubi->free);
ubi               100 drivers/mtd/ubi/fastmap-wl.c 	rb_erase(&e->u.rb, &ubi->free);
ubi               101 drivers/mtd/ubi/fastmap-wl.c 	ubi->free_count--;
ubi               110 drivers/mtd/ubi/fastmap-wl.c void ubi_refill_pools(struct ubi_device *ubi)
ubi               112 drivers/mtd/ubi/fastmap-wl.c 	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
ubi               113 drivers/mtd/ubi/fastmap-wl.c 	struct ubi_fm_pool *pool = &ubi->fm_pool;
ubi               117 drivers/mtd/ubi/fastmap-wl.c 	spin_lock(&ubi->wl_lock);
ubi               119 drivers/mtd/ubi/fastmap-wl.c 	return_unused_pool_pebs(ubi, wl_pool);
ubi               120 drivers/mtd/ubi/fastmap-wl.c 	return_unused_pool_pebs(ubi, pool);
ubi               128 drivers/mtd/ubi/fastmap-wl.c 			if (!ubi->free.rb_node)
ubi               131 drivers/mtd/ubi/fastmap-wl.c 			e = wl_get_wle(ubi);
ubi               141 drivers/mtd/ubi/fastmap-wl.c 			if (!ubi->free.rb_node ||
ubi               142 drivers/mtd/ubi/fastmap-wl.c 			   (ubi->free_count - ubi->beb_rsvd_pebs < 5))
ubi               145 drivers/mtd/ubi/fastmap-wl.c 			e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
ubi               146 drivers/mtd/ubi/fastmap-wl.c 			self_check_in_wl_tree(ubi, e, &ubi->free);
ubi               147 drivers/mtd/ubi/fastmap-wl.c 			rb_erase(&e->u.rb, &ubi->free);
ubi               148 drivers/mtd/ubi/fastmap-wl.c 			ubi->free_count--;
ubi               162 drivers/mtd/ubi/fastmap-wl.c 	spin_unlock(&ubi->wl_lock);
ubi               174 drivers/mtd/ubi/fastmap-wl.c static int produce_free_peb(struct ubi_device *ubi)
ubi               178 drivers/mtd/ubi/fastmap-wl.c 	while (!ubi->free.rb_node && ubi->works_count) {
ubi               180 drivers/mtd/ubi/fastmap-wl.c 		err = do_work(ubi);
ubi               197 drivers/mtd/ubi/fastmap-wl.c int ubi_wl_get_peb(struct ubi_device *ubi)
ubi               200 drivers/mtd/ubi/fastmap-wl.c 	struct ubi_fm_pool *pool = &ubi->fm_pool;
ubi               201 drivers/mtd/ubi/fastmap-wl.c 	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
ubi               204 drivers/mtd/ubi/fastmap-wl.c 	down_read(&ubi->fm_eba_sem);
ubi               205 drivers/mtd/ubi/fastmap-wl.c 	spin_lock(&ubi->wl_lock);
ubi               210 drivers/mtd/ubi/fastmap-wl.c 		spin_unlock(&ubi->wl_lock);
ubi               211 drivers/mtd/ubi/fastmap-wl.c 		up_read(&ubi->fm_eba_sem);
ubi               212 drivers/mtd/ubi/fastmap-wl.c 		ret = ubi_update_fastmap(ubi);
ubi               214 drivers/mtd/ubi/fastmap-wl.c 			ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
ubi               215 drivers/mtd/ubi/fastmap-wl.c 			down_read(&ubi->fm_eba_sem);
ubi               218 drivers/mtd/ubi/fastmap-wl.c 		down_read(&ubi->fm_eba_sem);
ubi               219 drivers/mtd/ubi/fastmap-wl.c 		spin_lock(&ubi->wl_lock);
ubi               223 drivers/mtd/ubi/fastmap-wl.c 		spin_unlock(&ubi->wl_lock);
ubi               226 drivers/mtd/ubi/fastmap-wl.c 			ubi_err(ubi, "Unable to get a free PEB from user WL pool");
ubi               230 drivers/mtd/ubi/fastmap-wl.c 		up_read(&ubi->fm_eba_sem);
ubi               231 drivers/mtd/ubi/fastmap-wl.c 		ret = produce_free_peb(ubi);
ubi               233 drivers/mtd/ubi/fastmap-wl.c 			down_read(&ubi->fm_eba_sem);
ubi               241 drivers/mtd/ubi/fastmap-wl.c 	prot_queue_add(ubi, ubi->lookuptbl[ret]);
ubi               242 drivers/mtd/ubi/fastmap-wl.c 	spin_unlock(&ubi->wl_lock);
ubi               251 drivers/mtd/ubi/fastmap-wl.c static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
ubi               253 drivers/mtd/ubi/fastmap-wl.c 	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
ubi               256 drivers/mtd/ubi/fastmap-wl.c 	ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
ubi               262 drivers/mtd/ubi/fastmap-wl.c 		if (!ubi->fm_work_scheduled) {
ubi               263 drivers/mtd/ubi/fastmap-wl.c 			ubi->fm_work_scheduled = 1;
ubi               264 drivers/mtd/ubi/fastmap-wl.c 			schedule_work(&ubi->fm_work);
ubi               270 drivers/mtd/ubi/fastmap-wl.c 	return ubi->lookuptbl[pnum];
ubi               277 drivers/mtd/ubi/fastmap-wl.c int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
ubi               281 drivers/mtd/ubi/fastmap-wl.c 	spin_lock(&ubi->wl_lock);
ubi               282 drivers/mtd/ubi/fastmap-wl.c 	if (ubi->wl_scheduled) {
ubi               283 drivers/mtd/ubi/fastmap-wl.c 		spin_unlock(&ubi->wl_lock);
ubi               286 drivers/mtd/ubi/fastmap-wl.c 	ubi->wl_scheduled = 1;
ubi               287 drivers/mtd/ubi/fastmap-wl.c 	spin_unlock(&ubi->wl_lock);
ubi               291 drivers/mtd/ubi/fastmap-wl.c 		spin_lock(&ubi->wl_lock);
ubi               292 drivers/mtd/ubi/fastmap-wl.c 		ubi->wl_scheduled = 0;
ubi               293 drivers/mtd/ubi/fastmap-wl.c 		spin_unlock(&ubi->wl_lock);
ubi               299 drivers/mtd/ubi/fastmap-wl.c 	__schedule_ubi_work(ubi, wrk);
ubi               313 drivers/mtd/ubi/fastmap-wl.c int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
ubi               322 drivers/mtd/ubi/fastmap-wl.c 	ubi_assert(pnum < ubi->peb_count);
ubi               324 drivers/mtd/ubi/fastmap-wl.c 	spin_lock(&ubi->wl_lock);
ubi               325 drivers/mtd/ubi/fastmap-wl.c 	e = ubi->lookuptbl[pnum];
ubi               334 drivers/mtd/ubi/fastmap-wl.c 		ubi->lookuptbl[pnum] = e;
ubi               337 drivers/mtd/ubi/fastmap-wl.c 	spin_unlock(&ubi->wl_lock);
ubi               340 drivers/mtd/ubi/fastmap-wl.c 	return schedule_erase(ubi, e, vol_id, lnum, torture, true);
ubi               352 drivers/mtd/ubi/fastmap-wl.c static void ubi_fastmap_close(struct ubi_device *ubi)
ubi               356 drivers/mtd/ubi/fastmap-wl.c 	return_unused_pool_pebs(ubi, &ubi->fm_pool);
ubi               357 drivers/mtd/ubi/fastmap-wl.c 	return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
ubi               359 drivers/mtd/ubi/fastmap-wl.c 	if (ubi->fm) {
ubi               360 drivers/mtd/ubi/fastmap-wl.c 		for (i = 0; i < ubi->fm->used_blocks; i++)
ubi               361 drivers/mtd/ubi/fastmap-wl.c 			kfree(ubi->fm->e[i]);
ubi               363 drivers/mtd/ubi/fastmap-wl.c 	kfree(ubi->fm);
ubi               374 drivers/mtd/ubi/fastmap-wl.c static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
ubi               377 drivers/mtd/ubi/fastmap-wl.c 	if (e && !ubi->fm_disabled && !ubi->fm &&
ubi                16 drivers/mtd/ubi/fastmap.c static inline unsigned long *init_seen(struct ubi_device *ubi)
ubi                20 drivers/mtd/ubi/fastmap.c 	if (!ubi_dbg_chk_fastmap(ubi))
ubi                23 drivers/mtd/ubi/fastmap.c 	ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
ubi                46 drivers/mtd/ubi/fastmap.c static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
ubi                48 drivers/mtd/ubi/fastmap.c 	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
ubi                59 drivers/mtd/ubi/fastmap.c static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
ubi                63 drivers/mtd/ubi/fastmap.c 	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
ubi                66 drivers/mtd/ubi/fastmap.c 	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
ubi                67 drivers/mtd/ubi/fastmap.c 		if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
ubi                68 drivers/mtd/ubi/fastmap.c 			ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
ubi                80 drivers/mtd/ubi/fastmap.c size_t ubi_calc_fm_size(struct ubi_device *ubi)
ubi                88 drivers/mtd/ubi/fastmap.c 		(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
ubi                90 drivers/mtd/ubi/fastmap.c 		(ubi->peb_count * sizeof(__be32))) +
ubi                92 drivers/mtd/ubi/fastmap.c 	return roundup(size, ubi->leb_size);
ubi               104 drivers/mtd/ubi/fastmap.c static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
ubi               109 drivers/mtd/ubi/fastmap.c 	new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
ubi               242 drivers/mtd/ubi/fastmap.c static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi               274 drivers/mtd/ubi/fastmap.c 		cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
ubi               336 drivers/mtd/ubi/fastmap.c static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi               352 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "orphaned volume in fastmap pool!");
ubi               359 drivers/mtd/ubi/fastmap.c 	return update_vol(ubi, ai, av, new_vh, new_aeb);
ubi               400 drivers/mtd/ubi/fastmap.c static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi               410 drivers/mtd/ubi/fastmap.c 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
ubi               414 drivers/mtd/ubi/fastmap.c 	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
ubi               434 drivers/mtd/ubi/fastmap.c 		if (ubi_io_is_bad(ubi, pnum)) {
ubi               435 drivers/mtd/ubi/fastmap.c 			ubi_err(ubi, "bad PEB in fastmap pool!");
ubi               440 drivers/mtd/ubi/fastmap.c 		err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
ubi               442 drivers/mtd/ubi/fastmap.c 			ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
ubi               455 drivers/mtd/ubi/fastmap.c 		if (image_seq && (image_seq != ubi->image_seq)) {
ubi               456 drivers/mtd/ubi/fastmap.c 			ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
ubi               457 drivers/mtd/ubi/fastmap.c 				be32_to_cpu(ech->image_seq), ubi->image_seq);
ubi               462 drivers/mtd/ubi/fastmap.c 		err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
ubi               493 drivers/mtd/ubi/fastmap.c 			err = process_pool_aeb(ubi, ai, vh, new_aeb);
ubi               500 drivers/mtd/ubi/fastmap.c 			ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
ubi               546 drivers/mtd/ubi/fastmap.c static int ubi_attach_fastmap(struct ubi_device *ubi,
ubi               560 drivers/mtd/ubi/fastmap.c 	size_t fm_pos = 0, fm_size = ubi->fm_size;
ubi               562 drivers/mtd/ubi/fastmap.c 	void *fm_raw = ubi->fm_buf;
ubi               580 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
ubi               590 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
ubi               600 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
ubi               611 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "bad pool size: %i", pool_size);
ubi               616 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
ubi               623 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
ubi               629 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "bad maximal WL pool size: %i",
ubi               689 drivers/mtd/ubi/fastmap.c 			ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
ubi               702 drivers/mtd/ubi/fastmap.c 				ubi_err(ubi, "volume (ID %i) already exists",
ubi               719 drivers/mtd/ubi/fastmap.c 			ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
ubi               739 drivers/mtd/ubi/fastmap.c 				ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
ubi               755 drivers/mtd/ubi/fastmap.c 	ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
ubi               759 drivers/mtd/ubi/fastmap.c 	ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
ubi               780 drivers/mtd/ubi/fastmap.c 	if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
ubi               850 drivers/mtd/ubi/fastmap.c int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi               879 drivers/mtd/ubi/fastmap.c 	down_write(&ubi->fm_protect);
ubi               880 drivers/mtd/ubi/fastmap.c 	memset(ubi->fm_buf, 0, ubi->fm_size);
ubi               895 drivers/mtd/ubi/fastmap.c 	ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
ubi               902 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
ubi               909 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "bad fastmap version: %i, expected: %i",
ubi               917 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "number of fastmap blocks is invalid: %i",
ubi               923 drivers/mtd/ubi/fastmap.c 	fm_size = ubi->leb_size * used_blocks;
ubi               924 drivers/mtd/ubi/fastmap.c 	if (fm_size != ubi->fm_size) {
ubi               925 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
ubi               926 drivers/mtd/ubi/fastmap.c 			fm_size, ubi->fm_size);
ubi               931 drivers/mtd/ubi/fastmap.c 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
ubi               937 drivers/mtd/ubi/fastmap.c 	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
ubi               950 drivers/mtd/ubi/fastmap.c 		if (ubi_io_is_bad(ubi, pnum)) {
ubi               956 drivers/mtd/ubi/fastmap.c 			ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
ubi               962 drivers/mtd/ubi/fastmap.c 		ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
ubi               964 drivers/mtd/ubi/fastmap.c 			ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
ubi               973 drivers/mtd/ubi/fastmap.c 		if (!ubi->image_seq)
ubi               974 drivers/mtd/ubi/fastmap.c 			ubi->image_seq = image_seq;
ubi               980 drivers/mtd/ubi/fastmap.c 		if (image_seq && (image_seq != ubi->image_seq)) {
ubi               981 drivers/mtd/ubi/fastmap.c 			ubi_err(ubi, "wrong image seq:%d instead of %d",
ubi               982 drivers/mtd/ubi/fastmap.c 				be32_to_cpu(ech->image_seq), ubi->image_seq);
ubi               987 drivers/mtd/ubi/fastmap.c 		ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
ubi               989 drivers/mtd/ubi/fastmap.c 			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
ubi               996 drivers/mtd/ubi/fastmap.c 				ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
ubi              1004 drivers/mtd/ubi/fastmap.c 				ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
ubi              1015 drivers/mtd/ubi/fastmap.c 		ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
ubi              1016 drivers/mtd/ubi/fastmap.c 				       pnum, 0, ubi->leb_size);
ubi              1018 drivers/mtd/ubi/fastmap.c 			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
ubi              1027 drivers/mtd/ubi/fastmap.c 	fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
ubi              1030 drivers/mtd/ubi/fastmap.c 	crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
ubi              1032 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "fastmap data CRC is invalid");
ubi              1033 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
ubi              1043 drivers/mtd/ubi/fastmap.c 	ret = ubi_attach_fastmap(ubi, ai, fm);
ubi              1067 drivers/mtd/ubi/fastmap.c 	ubi->fm = fm;
ubi              1068 drivers/mtd/ubi/fastmap.c 	ubi->fm_pool.max_size = ubi->fm->max_pool_size;
ubi              1069 drivers/mtd/ubi/fastmap.c 	ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
ubi              1070 drivers/mtd/ubi/fastmap.c 	ubi_msg(ubi, "attached by fastmap");
ubi              1071 drivers/mtd/ubi/fastmap.c 	ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
ubi              1072 drivers/mtd/ubi/fastmap.c 	ubi_msg(ubi, "fastmap WL pool size: %d",
ubi              1073 drivers/mtd/ubi/fastmap.c 		ubi->fm_wl_pool.max_size);
ubi              1074 drivers/mtd/ubi/fastmap.c 	ubi->fm_disabled = 0;
ubi              1075 drivers/mtd/ubi/fastmap.c 	ubi->fast_attach = 1;
ubi              1080 drivers/mtd/ubi/fastmap.c 	up_write(&ubi->fm_protect);
ubi              1082 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
ubi              1096 drivers/mtd/ubi/fastmap.c 	struct ubi_device *ubi = vol->ubi;
ubi              1098 drivers/mtd/ubi/fastmap.c 	if (!ubi->fast_attach)
ubi              1121 drivers/mtd/ubi/fastmap.c static int ubi_write_fastmap(struct ubi_device *ubi,
ubi              1142 drivers/mtd/ubi/fastmap.c 	fm_raw = ubi->fm_buf;
ubi              1143 drivers/mtd/ubi/fastmap.c 	memset(ubi->fm_buf, 0, ubi->fm_size);
ubi              1145 drivers/mtd/ubi/fastmap.c 	avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
ubi              1151 drivers/mtd/ubi/fastmap.c 	dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
ubi              1160 drivers/mtd/ubi/fastmap.c 	seen_pebs = init_seen(ubi);
ubi              1166 drivers/mtd/ubi/fastmap.c 	spin_lock(&ubi->volumes_lock);
ubi              1167 drivers/mtd/ubi/fastmap.c 	spin_lock(&ubi->wl_lock);
ubi              1171 drivers/mtd/ubi/fastmap.c 	ubi_assert(fm_pos <= ubi->fm_size);
ubi              1175 drivers/mtd/ubi/fastmap.c 	ubi_assert(fm_pos <= ubi->fm_size);
ubi              1193 drivers/mtd/ubi/fastmap.c 	fmpl->size = cpu_to_be16(ubi->fm_pool.size);
ubi              1194 drivers/mtd/ubi/fastmap.c 	fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
ubi              1196 drivers/mtd/ubi/fastmap.c 	for (i = 0; i < ubi->fm_pool.size; i++) {
ubi              1197 drivers/mtd/ubi/fastmap.c 		fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
ubi              1198 drivers/mtd/ubi/fastmap.c 		set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
ubi              1204 drivers/mtd/ubi/fastmap.c 	fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
ubi              1205 drivers/mtd/ubi/fastmap.c 	fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
ubi              1207 drivers/mtd/ubi/fastmap.c 	for (i = 0; i < ubi->fm_wl_pool.size; i++) {
ubi              1208 drivers/mtd/ubi/fastmap.c 		fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
ubi              1209 drivers/mtd/ubi/fastmap.c 		set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
ubi              1212 drivers/mtd/ubi/fastmap.c 	ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
ubi              1216 drivers/mtd/ubi/fastmap.c 		set_seen(ubi, wl_e->pnum, seen_pebs);
ubi              1221 drivers/mtd/ubi/fastmap.c 		ubi_assert(fm_pos <= ubi->fm_size);
ubi              1225 drivers/mtd/ubi/fastmap.c 	ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
ubi              1229 drivers/mtd/ubi/fastmap.c 		set_seen(ubi, wl_e->pnum, seen_pebs);
ubi              1234 drivers/mtd/ubi/fastmap.c 		ubi_assert(fm_pos <= ubi->fm_size);
ubi              1237 drivers/mtd/ubi/fastmap.c 	ubi_for_each_protected_peb(ubi, i, wl_e) {
ubi              1241 drivers/mtd/ubi/fastmap.c 		set_seen(ubi, wl_e->pnum, seen_pebs);
ubi              1246 drivers/mtd/ubi/fastmap.c 		ubi_assert(fm_pos <= ubi->fm_size);
ubi              1250 drivers/mtd/ubi/fastmap.c 	ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
ubi              1254 drivers/mtd/ubi/fastmap.c 		set_seen(ubi, wl_e->pnum, seen_pebs);
ubi              1259 drivers/mtd/ubi/fastmap.c 		ubi_assert(fm_pos <= ubi->fm_size);
ubi              1264 drivers/mtd/ubi/fastmap.c 	list_for_each_entry(ubi_wrk, &ubi->works, list) {
ubi              1272 drivers/mtd/ubi/fastmap.c 			set_seen(ubi, wl_e->pnum, seen_pebs);
ubi              1277 drivers/mtd/ubi/fastmap.c 			ubi_assert(fm_pos <= ubi->fm_size);
ubi              1283 drivers/mtd/ubi/fastmap.c 		vol = ubi->volumes[i];
ubi              1292 drivers/mtd/ubi/fastmap.c 		ubi_assert(fm_pos <= ubi->fm_size);
ubi              1306 drivers/mtd/ubi/fastmap.c 		ubi_assert(fm_pos <= ubi->fm_size);
ubi              1319 drivers/mtd/ubi/fastmap.c 	fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
ubi              1321 drivers/mtd/ubi/fastmap.c 	avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi              1324 drivers/mtd/ubi/fastmap.c 	spin_unlock(&ubi->wl_lock);
ubi              1325 drivers/mtd/ubi/fastmap.c 	spin_unlock(&ubi->volumes_lock);
ubi              1328 drivers/mtd/ubi/fastmap.c 	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
ubi              1330 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
ubi              1336 drivers/mtd/ubi/fastmap.c 		set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
ubi              1342 drivers/mtd/ubi/fastmap.c 					   ubi->fm_size));
ubi              1345 drivers/mtd/ubi/fastmap.c 		dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi              1349 drivers/mtd/ubi/fastmap.c 		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
ubi              1351 drivers/mtd/ubi/fastmap.c 			ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
ubi              1358 drivers/mtd/ubi/fastmap.c 		ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
ubi              1359 drivers/mtd/ubi/fastmap.c 					new_fm->e[i]->pnum, 0, ubi->leb_size);
ubi              1361 drivers/mtd/ubi/fastmap.c 			ubi_err(ubi, "unable to write fastmap to PEB %i!",
ubi              1368 drivers/mtd/ubi/fastmap.c 	ubi->fm = new_fm;
ubi              1370 drivers/mtd/ubi/fastmap.c 	ret = self_check_seen(ubi, seen_pebs);
ubi              1391 drivers/mtd/ubi/fastmap.c static int erase_block(struct ubi_device *ubi, int pnum)
ubi              1397 drivers/mtd/ubi/fastmap.c 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
ubi              1401 drivers/mtd/ubi/fastmap.c 	ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
ubi              1409 drivers/mtd/ubi/fastmap.c 	ret = ubi_io_sync_erase(ubi, pnum, 0);
ubi              1421 drivers/mtd/ubi/fastmap.c 	ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
ubi              1443 drivers/mtd/ubi/fastmap.c static int invalidate_fastmap(struct ubi_device *ubi)
ubi              1451 drivers/mtd/ubi/fastmap.c 	if (!ubi->fm)
ubi              1454 drivers/mtd/ubi/fastmap.c 	ubi->fm = NULL;
ubi              1461 drivers/mtd/ubi/fastmap.c 	vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
ubi              1468 drivers/mtd/ubi/fastmap.c 	e = ubi_wl_get_fm_peb(ubi, 1);
ubi              1476 drivers/mtd/ubi/fastmap.c 	vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi              1477 drivers/mtd/ubi/fastmap.c 	ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
ubi              1479 drivers/mtd/ubi/fastmap.c 		ubi_wl_put_fm_peb(ubi, e, 0, 0);
ubi              1486 drivers/mtd/ubi/fastmap.c 	ubi->fm = fm;
ubi              1503 drivers/mtd/ubi/fastmap.c static void return_fm_pebs(struct ubi_device *ubi,
ubi              1513 drivers/mtd/ubi/fastmap.c 			ubi_wl_put_fm_peb(ubi, fm->e[i], i,
ubi              1527 drivers/mtd/ubi/fastmap.c int ubi_update_fastmap(struct ubi_device *ubi)
ubi              1533 drivers/mtd/ubi/fastmap.c 	down_write(&ubi->fm_protect);
ubi              1534 drivers/mtd/ubi/fastmap.c 	down_write(&ubi->work_sem);
ubi              1535 drivers/mtd/ubi/fastmap.c 	down_write(&ubi->fm_eba_sem);
ubi              1537 drivers/mtd/ubi/fastmap.c 	ubi_refill_pools(ubi);
ubi              1539 drivers/mtd/ubi/fastmap.c 	if (ubi->ro_mode || ubi->fm_disabled) {
ubi              1540 drivers/mtd/ubi/fastmap.c 		up_write(&ubi->fm_eba_sem);
ubi              1541 drivers/mtd/ubi/fastmap.c 		up_write(&ubi->work_sem);
ubi              1542 drivers/mtd/ubi/fastmap.c 		up_write(&ubi->fm_protect);
ubi              1546 drivers/mtd/ubi/fastmap.c 	ret = ubi_ensure_anchor_pebs(ubi);
ubi              1548 drivers/mtd/ubi/fastmap.c 		up_write(&ubi->fm_eba_sem);
ubi              1549 drivers/mtd/ubi/fastmap.c 		up_write(&ubi->work_sem);
ubi              1550 drivers/mtd/ubi/fastmap.c 		up_write(&ubi->fm_protect);
ubi              1556 drivers/mtd/ubi/fastmap.c 		up_write(&ubi->fm_eba_sem);
ubi              1557 drivers/mtd/ubi/fastmap.c 		up_write(&ubi->work_sem);
ubi              1558 drivers/mtd/ubi/fastmap.c 		up_write(&ubi->fm_protect);
ubi              1562 drivers/mtd/ubi/fastmap.c 	new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
ubi              1563 drivers/mtd/ubi/fastmap.c 	old_fm = ubi->fm;
ubi              1564 drivers/mtd/ubi/fastmap.c 	ubi->fm = NULL;
ubi              1567 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "fastmap too large");
ubi              1573 drivers/mtd/ubi/fastmap.c 		spin_lock(&ubi->wl_lock);
ubi              1574 drivers/mtd/ubi/fastmap.c 		tmp_e = ubi_wl_get_fm_peb(ubi, 0);
ubi              1575 drivers/mtd/ubi/fastmap.c 		spin_unlock(&ubi->wl_lock);
ubi              1579 drivers/mtd/ubi/fastmap.c 				ret = erase_block(ubi, old_fm->e[i]->pnum);
ubi              1581 drivers/mtd/ubi/fastmap.c 					ubi_err(ubi, "could not erase old fastmap PEB");
ubi              1584 drivers/mtd/ubi/fastmap.c 						ubi_wl_put_fm_peb(ubi, new_fm->e[j],
ubi              1593 drivers/mtd/ubi/fastmap.c 				ubi_err(ubi, "could not get any free erase block");
ubi              1596 drivers/mtd/ubi/fastmap.c 					ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
ubi              1607 drivers/mtd/ubi/fastmap.c 				ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
ubi              1617 drivers/mtd/ubi/fastmap.c 			ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
ubi              1623 drivers/mtd/ubi/fastmap.c 	spin_lock(&ubi->wl_lock);
ubi              1624 drivers/mtd/ubi/fastmap.c 	tmp_e = ubi_wl_get_fm_peb(ubi, 1);
ubi              1625 drivers/mtd/ubi/fastmap.c 	spin_unlock(&ubi->wl_lock);
ubi              1630 drivers/mtd/ubi/fastmap.c 			ret = erase_block(ubi, old_fm->e[0]->pnum);
ubi              1632 drivers/mtd/ubi/fastmap.c 				ubi_err(ubi, "could not erase old anchor PEB");
ubi              1635 drivers/mtd/ubi/fastmap.c 					ubi_wl_put_fm_peb(ubi, new_fm->e[i],
ubi              1646 drivers/mtd/ubi/fastmap.c 			ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
ubi              1653 drivers/mtd/ubi/fastmap.c 			ubi_err(ubi, "could not find any anchor PEB");
ubi              1656 drivers/mtd/ubi/fastmap.c 				ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
ubi              1666 drivers/mtd/ubi/fastmap.c 	ret = ubi_write_fastmap(ubi, new_fm);
ubi              1672 drivers/mtd/ubi/fastmap.c 	up_write(&ubi->fm_eba_sem);
ubi              1673 drivers/mtd/ubi/fastmap.c 	up_write(&ubi->work_sem);
ubi              1674 drivers/mtd/ubi/fastmap.c 	up_write(&ubi->fm_protect);
ubi              1679 drivers/mtd/ubi/fastmap.c 	ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
ubi              1681 drivers/mtd/ubi/fastmap.c 	ret = invalidate_fastmap(ubi);
ubi              1683 drivers/mtd/ubi/fastmap.c 		ubi_err(ubi, "Unable to invalidate current fastmap!");
ubi              1684 drivers/mtd/ubi/fastmap.c 		ubi_ro_mode(ubi);
ubi              1686 drivers/mtd/ubi/fastmap.c 		return_fm_pebs(ubi, old_fm);
ubi              1687 drivers/mtd/ubi/fastmap.c 		return_fm_pebs(ubi, new_fm);
ubi                81 drivers/mtd/ubi/io.c static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
ubi                82 drivers/mtd/ubi/io.c static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
ubi                83 drivers/mtd/ubi/io.c static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
ubi                85 drivers/mtd/ubi/io.c static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
ubi                86 drivers/mtd/ubi/io.c static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
ubi                88 drivers/mtd/ubi/io.c static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
ubi               113 drivers/mtd/ubi/io.c int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
ubi               122 drivers/mtd/ubi/io.c 	ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
ubi               123 drivers/mtd/ubi/io.c 	ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
ubi               126 drivers/mtd/ubi/io.c 	err = self_check_not_bad(ubi, pnum);
ubi               152 drivers/mtd/ubi/io.c 	addr = (loff_t)pnum * ubi->peb_size + offset;
ubi               154 drivers/mtd/ubi/io.c 	err = mtd_read(ubi->mtd, addr, len, &read, buf);
ubi               167 drivers/mtd/ubi/io.c 			ubi_msg(ubi, "fixable bit-flip detected at PEB %d",
ubi               174 drivers/mtd/ubi/io.c 			ubi_warn(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
ubi               180 drivers/mtd/ubi/io.c 		ubi_err(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
ubi               196 drivers/mtd/ubi/io.c 		if (ubi_dbg_is_bitflip(ubi)) {
ubi               222 drivers/mtd/ubi/io.c int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
ubi               231 drivers/mtd/ubi/io.c 	ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
ubi               232 drivers/mtd/ubi/io.c 	ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
ubi               233 drivers/mtd/ubi/io.c 	ubi_assert(offset % ubi->hdrs_min_io_size == 0);
ubi               234 drivers/mtd/ubi/io.c 	ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
ubi               236 drivers/mtd/ubi/io.c 	if (ubi->ro_mode) {
ubi               237 drivers/mtd/ubi/io.c 		ubi_err(ubi, "read-only mode");
ubi               241 drivers/mtd/ubi/io.c 	err = self_check_not_bad(ubi, pnum);
ubi               246 drivers/mtd/ubi/io.c 	err = ubi_self_check_all_ff(ubi, pnum, offset, len);
ubi               250 drivers/mtd/ubi/io.c 	if (offset >= ubi->leb_start) {
ubi               255 drivers/mtd/ubi/io.c 		err = self_check_peb_ec_hdr(ubi, pnum);
ubi               258 drivers/mtd/ubi/io.c 		err = self_check_peb_vid_hdr(ubi, pnum);
ubi               263 drivers/mtd/ubi/io.c 	if (ubi_dbg_is_write_failure(ubi)) {
ubi               264 drivers/mtd/ubi/io.c 		ubi_err(ubi, "cannot write %d bytes to PEB %d:%d (emulated)",
ubi               270 drivers/mtd/ubi/io.c 	addr = (loff_t)pnum * ubi->peb_size + offset;
ubi               271 drivers/mtd/ubi/io.c 	err = mtd_write(ubi->mtd, addr, len, &written, buf);
ubi               273 drivers/mtd/ubi/io.c 		ubi_err(ubi, "error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
ubi               276 drivers/mtd/ubi/io.c 		ubi_dump_flash(ubi, pnum, offset, len);
ubi               281 drivers/mtd/ubi/io.c 		err = self_check_write(ubi, buf, pnum, offset, len);
ubi               290 drivers/mtd/ubi/io.c 		len = ubi->peb_size - offset;
ubi               292 drivers/mtd/ubi/io.c 			err = ubi_self_check_all_ff(ubi, pnum, offset, len);
ubi               307 drivers/mtd/ubi/io.c static int do_sync_erase(struct ubi_device *ubi, int pnum)
ubi               313 drivers/mtd/ubi/io.c 	ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
ubi               315 drivers/mtd/ubi/io.c 	if (ubi->ro_mode) {
ubi               316 drivers/mtd/ubi/io.c 		ubi_err(ubi, "read-only mode");
ubi               323 drivers/mtd/ubi/io.c 	ei.addr     = (loff_t)pnum * ubi->peb_size;
ubi               324 drivers/mtd/ubi/io.c 	ei.len      = ubi->peb_size;
ubi               326 drivers/mtd/ubi/io.c 	err = mtd_erase(ubi->mtd, &ei);
ubi               329 drivers/mtd/ubi/io.c 			ubi_warn(ubi, "error %d while erasing PEB %d, retry",
ubi               334 drivers/mtd/ubi/io.c 		ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err);
ubi               339 drivers/mtd/ubi/io.c 	err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
ubi               343 drivers/mtd/ubi/io.c 	if (ubi_dbg_is_erase_failure(ubi)) {
ubi               344 drivers/mtd/ubi/io.c 		ubi_err(ubi, "cannot erase PEB %d (emulated)", pnum);
ubi               363 drivers/mtd/ubi/io.c static int torture_peb(struct ubi_device *ubi, int pnum)
ubi               367 drivers/mtd/ubi/io.c 	ubi_msg(ubi, "run torture test for PEB %d", pnum);
ubi               371 drivers/mtd/ubi/io.c 	mutex_lock(&ubi->buf_mutex);
ubi               373 drivers/mtd/ubi/io.c 		err = do_sync_erase(ubi, pnum);
ubi               378 drivers/mtd/ubi/io.c 		err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
ubi               382 drivers/mtd/ubi/io.c 		err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
ubi               384 drivers/mtd/ubi/io.c 			ubi_err(ubi, "erased PEB %d, but a non-0xFF byte found",
ubi               391 drivers/mtd/ubi/io.c 		memset(ubi->peb_buf, patterns[i], ubi->peb_size);
ubi               392 drivers/mtd/ubi/io.c 		err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
ubi               396 drivers/mtd/ubi/io.c 		memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
ubi               397 drivers/mtd/ubi/io.c 		err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
ubi               401 drivers/mtd/ubi/io.c 		err = ubi_check_pattern(ubi->peb_buf, patterns[i],
ubi               402 drivers/mtd/ubi/io.c 					ubi->peb_size);
ubi               404 drivers/mtd/ubi/io.c 			ubi_err(ubi, "pattern %x checking failed for PEB %d",
ubi               412 drivers/mtd/ubi/io.c 	ubi_msg(ubi, "PEB %d passed torture test, do not mark it as bad", pnum);
ubi               415 drivers/mtd/ubi/io.c 	mutex_unlock(&ubi->buf_mutex);
ubi               422 drivers/mtd/ubi/io.c 		ubi_err(ubi, "read problems on freshly erased PEB %d, must be bad",
ubi               449 drivers/mtd/ubi/io.c static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
ubi               474 drivers/mtd/ubi/io.c 	addr = (loff_t)pnum * ubi->peb_size;
ubi               475 drivers/mtd/ubi/io.c 	err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
ubi               478 drivers/mtd/ubi/io.c 		err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
ubi               483 drivers/mtd/ubi/io.c 	ubi_init_vid_buf(ubi, &vidb, &vid_hdr);
ubi               486 drivers/mtd/ubi/io.c 	err = ubi_io_read_vid_hdr(ubi, pnum, &vidb, 0);
ubi               489 drivers/mtd/ubi/io.c 		addr += ubi->vid_hdr_aloffset;
ubi               490 drivers/mtd/ubi/io.c 		err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
ubi               502 drivers/mtd/ubi/io.c 	ubi_err(ubi, "cannot invalidate PEB %d, write returned %d", pnum, err);
ubi               503 drivers/mtd/ubi/io.c 	ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
ubi               523 drivers/mtd/ubi/io.c int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
ubi               527 drivers/mtd/ubi/io.c 	ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
ubi               529 drivers/mtd/ubi/io.c 	err = self_check_not_bad(ubi, pnum);
ubi               533 drivers/mtd/ubi/io.c 	if (ubi->ro_mode) {
ubi               534 drivers/mtd/ubi/io.c 		ubi_err(ubi, "read-only mode");
ubi               538 drivers/mtd/ubi/io.c 	if (ubi->nor_flash) {
ubi               539 drivers/mtd/ubi/io.c 		err = nor_erase_prepare(ubi, pnum);
ubi               545 drivers/mtd/ubi/io.c 		ret = torture_peb(ubi, pnum);
ubi               550 drivers/mtd/ubi/io.c 	err = do_sync_erase(ubi, pnum);
ubi               565 drivers/mtd/ubi/io.c int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
ubi               567 drivers/mtd/ubi/io.c 	struct mtd_info *mtd = ubi->mtd;
ubi               569 drivers/mtd/ubi/io.c 	ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
ubi               571 drivers/mtd/ubi/io.c 	if (ubi->bad_allowed) {
ubi               574 drivers/mtd/ubi/io.c 		ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
ubi               576 drivers/mtd/ubi/io.c 			ubi_err(ubi, "error %d while checking if PEB %d is bad",
ubi               594 drivers/mtd/ubi/io.c int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
ubi               597 drivers/mtd/ubi/io.c 	struct mtd_info *mtd = ubi->mtd;
ubi               599 drivers/mtd/ubi/io.c 	ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
ubi               601 drivers/mtd/ubi/io.c 	if (ubi->ro_mode) {
ubi               602 drivers/mtd/ubi/io.c 		ubi_err(ubi, "read-only mode");
ubi               606 drivers/mtd/ubi/io.c 	if (!ubi->bad_allowed)
ubi               609 drivers/mtd/ubi/io.c 	err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
ubi               611 drivers/mtd/ubi/io.c 		ubi_err(ubi, "cannot mark PEB %d bad, error %d", pnum, err);
ubi               623 drivers/mtd/ubi/io.c static int validate_ec_hdr(const struct ubi_device *ubi,
ubi               634 drivers/mtd/ubi/io.c 		ubi_err(ubi, "node with incompatible UBI version found: this UBI version is %d, image version is %d",
ubi               639 drivers/mtd/ubi/io.c 	if (vid_hdr_offset != ubi->vid_hdr_offset) {
ubi               640 drivers/mtd/ubi/io.c 		ubi_err(ubi, "bad VID header offset %d, expected %d",
ubi               641 drivers/mtd/ubi/io.c 			vid_hdr_offset, ubi->vid_hdr_offset);
ubi               645 drivers/mtd/ubi/io.c 	if (leb_start != ubi->leb_start) {
ubi               646 drivers/mtd/ubi/io.c 		ubi_err(ubi, "bad data offset %d, expected %d",
ubi               647 drivers/mtd/ubi/io.c 			leb_start, ubi->leb_start);
ubi               652 drivers/mtd/ubi/io.c 		ubi_err(ubi, "bad erase counter %lld", ec);
ubi               659 drivers/mtd/ubi/io.c 	ubi_err(ubi, "bad EC header");
ubi               687 drivers/mtd/ubi/io.c int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
ubi               694 drivers/mtd/ubi/io.c 	ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
ubi               696 drivers/mtd/ubi/io.c 	read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
ubi               725 drivers/mtd/ubi/io.c 				ubi_warn(ubi, "no EC header found at PEB %d, only 0xFF bytes",
ubi               740 drivers/mtd/ubi/io.c 			ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
ubi               754 drivers/mtd/ubi/io.c 			ubi_warn(ubi, "bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
ubi               768 drivers/mtd/ubi/io.c 	err = validate_ec_hdr(ubi, ec_hdr);
ubi               770 drivers/mtd/ubi/io.c 		ubi_err(ubi, "validation failed for PEB %d", pnum);
ubi               796 drivers/mtd/ubi/io.c int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
ubi               803 drivers/mtd/ubi/io.c 	ubi_assert(pnum >= 0 &&  pnum < ubi->peb_count);
ubi               807 drivers/mtd/ubi/io.c 	ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
ubi               808 drivers/mtd/ubi/io.c 	ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
ubi               809 drivers/mtd/ubi/io.c 	ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
ubi               813 drivers/mtd/ubi/io.c 	err = self_check_ec_hdr(ubi, pnum, ec_hdr);
ubi               817 drivers/mtd/ubi/io.c 	if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
ubi               820 drivers/mtd/ubi/io.c 	err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
ubi               832 drivers/mtd/ubi/io.c static int validate_vid_hdr(const struct ubi_device *ubi,
ubi               844 drivers/mtd/ubi/io.c 	int usable_leb_size = ubi->leb_size - data_pad;
ubi               847 drivers/mtd/ubi/io.c 		ubi_err(ubi, "bad copy_flag");
ubi               853 drivers/mtd/ubi/io.c 		ubi_err(ubi, "negative values");
ubi               858 drivers/mtd/ubi/io.c 		ubi_err(ubi, "bad vol_id");
ubi               863 drivers/mtd/ubi/io.c 		ubi_err(ubi, "bad compat");
ubi               870 drivers/mtd/ubi/io.c 		ubi_err(ubi, "bad compat");
ubi               875 drivers/mtd/ubi/io.c 		ubi_err(ubi, "bad vol_type");
ubi               879 drivers/mtd/ubi/io.c 	if (data_pad >= ubi->leb_size / 2) {
ubi               880 drivers/mtd/ubi/io.c 		ubi_err(ubi, "bad data_pad");
ubi               884 drivers/mtd/ubi/io.c 	if (data_size > ubi->leb_size) {
ubi               885 drivers/mtd/ubi/io.c 		ubi_err(ubi, "bad data_size");
ubi               897 drivers/mtd/ubi/io.c 			ubi_err(ubi, "zero used_ebs");
ubi               901 drivers/mtd/ubi/io.c 			ubi_err(ubi, "zero data_size");
ubi               906 drivers/mtd/ubi/io.c 				ubi_err(ubi, "bad data_size");
ubi               911 drivers/mtd/ubi/io.c 				ubi_err(ubi, "bad data_size at last LEB");
ubi               915 drivers/mtd/ubi/io.c 			ubi_err(ubi, "too high lnum");
ubi               921 drivers/mtd/ubi/io.c 				ubi_err(ubi, "non-zero data CRC");
ubi               925 drivers/mtd/ubi/io.c 				ubi_err(ubi, "non-zero data_size");
ubi               930 drivers/mtd/ubi/io.c 				ubi_err(ubi, "zero data_size of copy");
ubi               935 drivers/mtd/ubi/io.c 			ubi_err(ubi, "bad used_ebs");
ubi               943 drivers/mtd/ubi/io.c 	ubi_err(ubi, "bad VID header");
ubi               964 drivers/mtd/ubi/io.c int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
ubi               973 drivers/mtd/ubi/io.c 	ubi_assert(pnum >= 0 &&  pnum < ubi->peb_count);
ubi               975 drivers/mtd/ubi/io.c 	read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi               976 drivers/mtd/ubi/io.c 			  ubi->vid_hdr_shift + UBI_VID_HDR_SIZE);
ubi               987 drivers/mtd/ubi/io.c 				ubi_warn(ubi, "no VID header found at PEB %d, only 0xFF bytes",
ubi               998 drivers/mtd/ubi/io.c 			ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
ubi              1012 drivers/mtd/ubi/io.c 			ubi_warn(ubi, "bad CRC at PEB %d, calculated %#08x, read %#08x",
ubi              1024 drivers/mtd/ubi/io.c 	err = validate_vid_hdr(ubi, vid_hdr);
ubi              1026 drivers/mtd/ubi/io.c 		ubi_err(ubi, "validation failed for PEB %d", pnum);
ubi              1048 drivers/mtd/ubi/io.c int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
ubi              1057 drivers/mtd/ubi/io.c 	ubi_assert(pnum >= 0 &&  pnum < ubi->peb_count);
ubi              1059 drivers/mtd/ubi/io.c 	err = self_check_peb_ec_hdr(ubi, pnum);
ubi              1068 drivers/mtd/ubi/io.c 	err = self_check_vid_hdr(ubi, pnum, vid_hdr);
ubi              1072 drivers/mtd/ubi/io.c 	if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
ubi              1075 drivers/mtd/ubi/io.c 	err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi              1076 drivers/mtd/ubi/io.c 			   ubi->vid_hdr_alsize);
ubi              1088 drivers/mtd/ubi/io.c static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
ubi              1092 drivers/mtd/ubi/io.c 	if (!ubi_dbg_chk_io(ubi))
ubi              1095 drivers/mtd/ubi/io.c 	err = ubi_io_is_bad(ubi, pnum);
ubi              1099 drivers/mtd/ubi/io.c 	ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi              1113 drivers/mtd/ubi/io.c static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
ubi              1119 drivers/mtd/ubi/io.c 	if (!ubi_dbg_chk_io(ubi))
ubi              1124 drivers/mtd/ubi/io.c 		ubi_err(ubi, "bad magic %#08x, must be %#08x",
ubi              1129 drivers/mtd/ubi/io.c 	err = validate_ec_hdr(ubi, ec_hdr);
ubi              1131 drivers/mtd/ubi/io.c 		ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi              1151 drivers/mtd/ubi/io.c static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
ubi              1157 drivers/mtd/ubi/io.c 	if (!ubi_dbg_chk_io(ubi))
ubi              1160 drivers/mtd/ubi/io.c 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
ubi              1164 drivers/mtd/ubi/io.c 	err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
ubi              1171 drivers/mtd/ubi/io.c 		ubi_err(ubi, "bad CRC, calculated %#08x, read %#08x",
ubi              1173 drivers/mtd/ubi/io.c 		ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi              1180 drivers/mtd/ubi/io.c 	err = self_check_ec_hdr(ubi, pnum, ec_hdr);
ubi              1196 drivers/mtd/ubi/io.c static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
ubi              1202 drivers/mtd/ubi/io.c 	if (!ubi_dbg_chk_io(ubi))
ubi              1207 drivers/mtd/ubi/io.c 		ubi_err(ubi, "bad VID header magic %#08x at PEB %d, must be %#08x",
ubi              1212 drivers/mtd/ubi/io.c 	err = validate_vid_hdr(ubi, vid_hdr);
ubi              1214 drivers/mtd/ubi/io.c 		ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi              1221 drivers/mtd/ubi/io.c 	ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi              1236 drivers/mtd/ubi/io.c static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
ubi              1244 drivers/mtd/ubi/io.c 	if (!ubi_dbg_chk_io(ubi))
ubi              1247 drivers/mtd/ubi/io.c 	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
ubi              1253 drivers/mtd/ubi/io.c 	err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi              1254 drivers/mtd/ubi/io.c 			  ubi->vid_hdr_alsize);
ubi              1261 drivers/mtd/ubi/io.c 		ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
ubi              1263 drivers/mtd/ubi/io.c 		ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi              1270 drivers/mtd/ubi/io.c 	err = self_check_vid_hdr(ubi, pnum, vid_hdr);
ubi              1289 drivers/mtd/ubi/io.c static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
ubi              1295 drivers/mtd/ubi/io.c 	loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
ubi              1297 drivers/mtd/ubi/io.c 	if (!ubi_dbg_chk_io(ubi))
ubi              1302 drivers/mtd/ubi/io.c 		ubi_err(ubi, "cannot allocate memory to check writes");
ubi              1306 drivers/mtd/ubi/io.c 	err = mtd_read(ubi->mtd, addr, len, &read, buf1);
ubi              1318 drivers/mtd/ubi/io.c 		ubi_err(ubi, "self-check failed for PEB %d:%d, len %d",
ubi              1320 drivers/mtd/ubi/io.c 		ubi_msg(ubi, "data differ at position %d", i);
ubi              1322 drivers/mtd/ubi/io.c 		ubi_msg(ubi, "hex dump of the original buffer from %d to %d",
ubi              1326 drivers/mtd/ubi/io.c 		ubi_msg(ubi, "hex dump of the read buffer from %d to %d",
ubi              1354 drivers/mtd/ubi/io.c int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
ubi              1359 drivers/mtd/ubi/io.c 	loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
ubi              1361 drivers/mtd/ubi/io.c 	if (!ubi_dbg_chk_io(ubi))
ubi              1366 drivers/mtd/ubi/io.c 		ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
ubi              1370 drivers/mtd/ubi/io.c 	err = mtd_read(ubi->mtd, addr, len, &read, buf);
ubi              1372 drivers/mtd/ubi/io.c 		ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
ubi              1379 drivers/mtd/ubi/io.c 		ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
ubi              1388 drivers/mtd/ubi/io.c 	ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi              1389 drivers/mtd/ubi/io.c 	ubi_msg(ubi, "hex dump of the %d-%d region", offset, offset + len);
ubi                26 drivers/mtd/ubi/kapi.c void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di)
ubi                28 drivers/mtd/ubi/kapi.c 	di->ubi_num = ubi->ubi_num;
ubi                29 drivers/mtd/ubi/kapi.c 	di->leb_size = ubi->leb_size;
ubi                30 drivers/mtd/ubi/kapi.c 	di->leb_start = ubi->leb_start;
ubi                31 drivers/mtd/ubi/kapi.c 	di->min_io_size = ubi->min_io_size;
ubi                32 drivers/mtd/ubi/kapi.c 	di->max_write_size = ubi->max_write_size;
ubi                33 drivers/mtd/ubi/kapi.c 	di->ro_mode = ubi->ro_mode;
ubi                34 drivers/mtd/ubi/kapi.c 	di->cdev = ubi->cdev.dev;
ubi                48 drivers/mtd/ubi/kapi.c 	struct ubi_device *ubi;
ubi                52 drivers/mtd/ubi/kapi.c 	ubi = ubi_get_device(ubi_num);
ubi                53 drivers/mtd/ubi/kapi.c 	if (!ubi)
ubi                55 drivers/mtd/ubi/kapi.c 	ubi_do_get_device_info(ubi, di);
ubi                56 drivers/mtd/ubi/kapi.c 	ubi_put_device(ubi);
ubi                67 drivers/mtd/ubi/kapi.c void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
ubi                71 drivers/mtd/ubi/kapi.c 	vi->ubi_num = ubi->ubi_num;
ubi                92 drivers/mtd/ubi/kapi.c 	ubi_do_get_volume_info(desc->vol->ubi, desc->vol, vi);
ubi               118 drivers/mtd/ubi/kapi.c 	struct ubi_device *ubi;
ubi               133 drivers/mtd/ubi/kapi.c 	ubi = ubi_get_device(ubi_num);
ubi               134 drivers/mtd/ubi/kapi.c 	if (!ubi)
ubi               137 drivers/mtd/ubi/kapi.c 	if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
ubi               152 drivers/mtd/ubi/kapi.c 	spin_lock(&ubi->volumes_lock);
ubi               153 drivers/mtd/ubi/kapi.c 	vol = ubi->volumes[vol_id];
ubi               186 drivers/mtd/ubi/kapi.c 	spin_unlock(&ubi->volumes_lock);
ubi               191 drivers/mtd/ubi/kapi.c 	mutex_lock(&ubi->ckvol_mutex);
ubi               194 drivers/mtd/ubi/kapi.c 		err = ubi_check_volume(ubi, vol_id);
ubi               196 drivers/mtd/ubi/kapi.c 			mutex_unlock(&ubi->ckvol_mutex);
ubi               201 drivers/mtd/ubi/kapi.c 			ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
ubi               202 drivers/mtd/ubi/kapi.c 				 vol_id, ubi->ubi_num);
ubi               207 drivers/mtd/ubi/kapi.c 	mutex_unlock(&ubi->ckvol_mutex);
ubi               212 drivers/mtd/ubi/kapi.c 	spin_unlock(&ubi->volumes_lock);
ubi               217 drivers/mtd/ubi/kapi.c 	ubi_err(ubi, "cannot open device %d, volume %d, error %d",
ubi               219 drivers/mtd/ubi/kapi.c 	ubi_put_device(ubi);
ubi               236 drivers/mtd/ubi/kapi.c 	struct ubi_device *ubi;
ubi               251 drivers/mtd/ubi/kapi.c 	ubi = ubi_get_device(ubi_num);
ubi               252 drivers/mtd/ubi/kapi.c 	if (!ubi)
ubi               255 drivers/mtd/ubi/kapi.c 	spin_lock(&ubi->volumes_lock);
ubi               257 drivers/mtd/ubi/kapi.c 	for (i = 0; i < ubi->vtbl_slots; i++) {
ubi               258 drivers/mtd/ubi/kapi.c 		struct ubi_volume *vol = ubi->volumes[i];
ubi               265 drivers/mtd/ubi/kapi.c 	spin_unlock(&ubi->volumes_lock);
ubi               276 drivers/mtd/ubi/kapi.c 	ubi_put_device(ubi);
ubi               328 drivers/mtd/ubi/kapi.c 	struct ubi_device *ubi = vol->ubi;
ubi               331 drivers/mtd/ubi/kapi.c 		ubi->ubi_num, vol->vol_id, desc->mode);
ubi               333 drivers/mtd/ubi/kapi.c 	spin_lock(&ubi->volumes_lock);
ubi               349 drivers/mtd/ubi/kapi.c 	spin_unlock(&ubi->volumes_lock);
ubi               353 drivers/mtd/ubi/kapi.c 	ubi_put_device(ubi);
ubi               372 drivers/mtd/ubi/kapi.c 	struct ubi_device *ubi = vol->ubi;
ubi               375 drivers/mtd/ubi/kapi.c 	if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
ubi               426 drivers/mtd/ubi/kapi.c 	struct ubi_device *ubi = vol->ubi;
ubi               438 drivers/mtd/ubi/kapi.c 	err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
ubi               440 drivers/mtd/ubi/kapi.c 		ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
ubi               466 drivers/mtd/ubi/kapi.c 	struct ubi_device *ubi = vol->ubi;
ubi               478 drivers/mtd/ubi/kapi.c 	err = ubi_eba_read_leb_sg(ubi, vol, sgl, lnum, offset, len, check);
ubi               480 drivers/mtd/ubi/kapi.c 		ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
ubi               517 drivers/mtd/ubi/kapi.c 	struct ubi_device *ubi = vol->ubi;
ubi               522 drivers/mtd/ubi/kapi.c 	if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
ubi               530 drivers/mtd/ubi/kapi.c 	    offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
ubi               539 drivers/mtd/ubi/kapi.c 	return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len);
ubi               562 drivers/mtd/ubi/kapi.c 	struct ubi_device *ubi = vol->ubi;
ubi               567 drivers/mtd/ubi/kapi.c 	if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
ubi               574 drivers/mtd/ubi/kapi.c 	    len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
ubi               583 drivers/mtd/ubi/kapi.c 	return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len);
ubi               602 drivers/mtd/ubi/kapi.c 	struct ubi_device *ubi = vol->ubi;
ubi               616 drivers/mtd/ubi/kapi.c 	err = ubi_eba_unmap_leb(ubi, vol, lnum);
ubi               620 drivers/mtd/ubi/kapi.c 	return ubi_wl_flush(ubi, vol->vol_id, lnum);
ubi               663 drivers/mtd/ubi/kapi.c 	struct ubi_device *ubi = vol->ubi;
ubi               676 drivers/mtd/ubi/kapi.c 	return ubi_eba_unmap_leb(ubi, vol, lnum);
ubi               699 drivers/mtd/ubi/kapi.c 	struct ubi_device *ubi = vol->ubi;
ubi               715 drivers/mtd/ubi/kapi.c 	return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
ubi               761 drivers/mtd/ubi/kapi.c 	struct ubi_device *ubi;
ubi               763 drivers/mtd/ubi/kapi.c 	ubi = ubi_get_device(ubi_num);
ubi               764 drivers/mtd/ubi/kapi.c 	if (!ubi)
ubi               767 drivers/mtd/ubi/kapi.c 	mtd_sync(ubi->mtd);
ubi               768 drivers/mtd/ubi/kapi.c 	ubi_put_device(ubi);
ubi               787 drivers/mtd/ubi/kapi.c 	struct ubi_device *ubi;
ubi               790 drivers/mtd/ubi/kapi.c 	ubi = ubi_get_device(ubi_num);
ubi               791 drivers/mtd/ubi/kapi.c 	if (!ubi)
ubi               794 drivers/mtd/ubi/kapi.c 	err = ubi_wl_flush(ubi, vol_id, lnum);
ubi               795 drivers/mtd/ubi/kapi.c 	ubi_put_device(ubi);
ubi                22 drivers/mtd/ubi/misc.c int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
ubi                27 drivers/mtd/ubi/misc.c 	ubi_assert(!(length & (ubi->min_io_size - 1)));
ubi                34 drivers/mtd/ubi/misc.c 	length = ALIGN(i + 1, ubi->min_io_size);
ubi                48 drivers/mtd/ubi/misc.c int ubi_check_volume(struct ubi_device *ubi, int vol_id)
ubi                52 drivers/mtd/ubi/misc.c 	struct ubi_volume *vol = ubi->volumes[vol_id];
ubi                71 drivers/mtd/ubi/misc.c 		err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
ubi                92 drivers/mtd/ubi/misc.c void ubi_update_reserved(struct ubi_device *ubi)
ubi                94 drivers/mtd/ubi/misc.c 	int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
ubi                96 drivers/mtd/ubi/misc.c 	if (need <= 0 || ubi->avail_pebs == 0)
ubi                99 drivers/mtd/ubi/misc.c 	need = min_t(int, need, ubi->avail_pebs);
ubi               100 drivers/mtd/ubi/misc.c 	ubi->avail_pebs -= need;
ubi               101 drivers/mtd/ubi/misc.c 	ubi->rsvd_pebs += need;
ubi               102 drivers/mtd/ubi/misc.c 	ubi->beb_rsvd_pebs += need;
ubi               103 drivers/mtd/ubi/misc.c 	ubi_msg(ubi, "reserved more %d PEBs for bad PEB handling", need);
ubi               111 drivers/mtd/ubi/misc.c void ubi_calculate_reserved(struct ubi_device *ubi)
ubi               117 drivers/mtd/ubi/misc.c 	ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
ubi               118 drivers/mtd/ubi/misc.c 	if (ubi->beb_rsvd_level < 0) {
ubi               119 drivers/mtd/ubi/misc.c 		ubi->beb_rsvd_level = 0;
ubi               120 drivers/mtd/ubi/misc.c 		ubi_warn(ubi, "number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
ubi               121 drivers/mtd/ubi/misc.c 			 ubi->bad_peb_count, ubi->bad_peb_limit);
ubi               145 drivers/mtd/ubi/misc.c void ubi_msg(const struct ubi_device *ubi, const char *fmt, ...)
ubi               155 drivers/mtd/ubi/misc.c 	pr_notice(UBI_NAME_STR "%d: %pV\n", ubi->ubi_num, &vaf);
ubi               161 drivers/mtd/ubi/misc.c void ubi_warn(const struct ubi_device *ubi, const char *fmt, ...)
ubi               172 drivers/mtd/ubi/misc.c 		ubi->ubi_num, __builtin_return_address(0), &vaf);
ubi               178 drivers/mtd/ubi/misc.c void ubi_err(const struct ubi_device *ubi, const char *fmt, ...)
ubi               189 drivers/mtd/ubi/misc.c 	       ubi->ubi_num, __builtin_return_address(0), &vaf);
ubi                43 drivers/mtd/ubi/ubi.h void ubi_msg(const struct ubi_device *ubi, const char *fmt, ...);
ubi                47 drivers/mtd/ubi/ubi.h void ubi_warn(const struct ubi_device *ubi, const char *fmt, ...);
ubi                51 drivers/mtd/ubi/ubi.h void ubi_err(const struct ubi_device *ubi, const char *fmt, ...);
ubi               341 drivers/mtd/ubi/ubi.h 	struct ubi_device *ubi;
ubi               802 drivers/mtd/ubi/ubi.h 	int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int shutdown);
ubi               825 drivers/mtd/ubi/ubi.h int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
ubi               831 drivers/mtd/ubi/ubi.h struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
ubi               833 drivers/mtd/ubi/ubi.h int ubi_attach(struct ubi_device *ubi, int force_scan);
ubi               837 drivers/mtd/ubi/ubi.h int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
ubi               839 drivers/mtd/ubi/ubi.h int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
ubi               841 drivers/mtd/ubi/ubi.h int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai);
ubi               844 drivers/mtd/ubi/ubi.h int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
ubi               847 drivers/mtd/ubi/ubi.h int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list);
ubi               848 drivers/mtd/ubi/ubi.h int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol);
ubi               849 drivers/mtd/ubi/ubi.h void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol);
ubi               852 drivers/mtd/ubi/ubi.h int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               854 drivers/mtd/ubi/ubi.h int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               856 drivers/mtd/ubi/ubi.h int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               858 drivers/mtd/ubi/ubi.h int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               862 drivers/mtd/ubi/ubi.h int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
ubi               864 drivers/mtd/ubi/ubi.h int ubi_check_volume(struct ubi_device *ubi, int vol_id);
ubi               865 drivers/mtd/ubi/ubi.h void ubi_update_reserved(struct ubi_device *ubi);
ubi               866 drivers/mtd/ubi/ubi.h void ubi_calculate_reserved(struct ubi_device *ubi);
ubi               884 drivers/mtd/ubi/ubi.h int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               886 drivers/mtd/ubi/ubi.h int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
ubi               888 drivers/mtd/ubi/ubi.h int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               891 drivers/mtd/ubi/ubi.h int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
ubi               893 drivers/mtd/ubi/ubi.h int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               895 drivers/mtd/ubi/ubi.h int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               897 drivers/mtd/ubi/ubi.h int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
ubi               899 drivers/mtd/ubi/ubi.h int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
ubi               900 drivers/mtd/ubi/ubi.h unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
ubi               901 drivers/mtd/ubi/ubi.h int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
ubi               905 drivers/mtd/ubi/ubi.h int ubi_wl_get_peb(struct ubi_device *ubi);
ubi               906 drivers/mtd/ubi/ubi.h int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
ubi               908 drivers/mtd/ubi/ubi.h int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum);
ubi               909 drivers/mtd/ubi/ubi.h int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
ubi               910 drivers/mtd/ubi/ubi.h int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
ubi               911 drivers/mtd/ubi/ubi.h void ubi_wl_close(struct ubi_device *ubi);
ubi               913 drivers/mtd/ubi/ubi.h struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
ubi               914 drivers/mtd/ubi/ubi.h int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
ubi               917 drivers/mtd/ubi/ubi.h void ubi_refill_pools(struct ubi_device *ubi);
ubi               918 drivers/mtd/ubi/ubi.h int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
ubi               919 drivers/mtd/ubi/ubi.h int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force_scrub);
ubi               922 drivers/mtd/ubi/ubi.h int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
ubi               924 drivers/mtd/ubi/ubi.h int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
ubi               926 drivers/mtd/ubi/ubi.h int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture);
ubi               927 drivers/mtd/ubi/ubi.h int ubi_io_is_bad(const struct ubi_device *ubi, int pnum);
ubi               928 drivers/mtd/ubi/ubi.h int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum);
ubi               929 drivers/mtd/ubi/ubi.h int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
ubi               931 drivers/mtd/ubi/ubi.h int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
ubi               933 drivers/mtd/ubi/ubi.h int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
ubi               935 drivers/mtd/ubi/ubi.h int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
ubi               943 drivers/mtd/ubi/ubi.h void ubi_put_device(struct ubi_device *ubi);
ubi               946 drivers/mtd/ubi/ubi.h int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               948 drivers/mtd/ubi/ubi.h int ubi_notify_all(struct ubi_device *ubi, int ntype,
ubi               951 drivers/mtd/ubi/ubi.h void ubi_free_internal_volumes(struct ubi_device *ubi);
ubi               954 drivers/mtd/ubi/ubi.h void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
ubi               955 drivers/mtd/ubi/ubi.h void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               958 drivers/mtd/ubi/ubi.h int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
ubi               963 drivers/mtd/ubi/ubi.h size_t ubi_calc_fm_size(struct ubi_device *ubi);
ubi               964 drivers/mtd/ubi/ubi.h int ubi_update_fastmap(struct ubi_device *ubi);
ubi               965 drivers/mtd/ubi/ubi.h int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi               970 drivers/mtd/ubi/ubi.h static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
ubi              1000 drivers/mtd/ubi/ubi.h #define ubi_for_each_free_peb(ubi, e, tmp_rb)	\
ubi              1001 drivers/mtd/ubi/ubi.h 	ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb)
ubi              1009 drivers/mtd/ubi/ubi.h #define ubi_for_each_used_peb(ubi, e, tmp_rb)	\
ubi              1010 drivers/mtd/ubi/ubi.h 	ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb)
ubi              1018 drivers/mtd/ubi/ubi.h #define ubi_for_each_scrub_peb(ubi, e, tmp_rb)	\
ubi              1019 drivers/mtd/ubi/ubi.h 	ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb)
ubi              1027 drivers/mtd/ubi/ubi.h #define ubi_for_each_protected_peb(ubi, i, e)	\
ubi              1029 drivers/mtd/ubi/ubi.h 		list_for_each_entry((e), &(ubi->pq[(i)]), u.list)
ubi              1066 drivers/mtd/ubi/ubi.h static inline void ubi_init_vid_buf(const struct ubi_device *ubi,
ubi              1071 drivers/mtd/ubi/ubi.h 		memset(buf, 0, ubi->vid_hdr_alsize);
ubi              1074 drivers/mtd/ubi/ubi.h 	vidb->hdr = buf + ubi->vid_hdr_shift;
ubi              1083 drivers/mtd/ubi/ubi.h ubi_alloc_vid_buf(const struct ubi_device *ubi, gfp_t gfp_flags)
ubi              1092 drivers/mtd/ubi/ubi.h 	buf = kmalloc(ubi->vid_hdr_alsize, gfp_flags);
ubi              1098 drivers/mtd/ubi/ubi.h 	ubi_init_vid_buf(ubi, vidb, buf);
ubi              1130 drivers/mtd/ubi/ubi.h static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf,
ubi              1134 drivers/mtd/ubi/ubi.h 	return ubi_io_read(ubi, buf, pnum, offset + ubi->leb_start, len);
ubi              1142 drivers/mtd/ubi/ubi.h static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
ubi              1146 drivers/mtd/ubi/ubi.h 	return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len);
ubi              1153 drivers/mtd/ubi/ubi.h static inline void ubi_ro_mode(struct ubi_device *ubi)
ubi              1155 drivers/mtd/ubi/ubi.h 	if (!ubi->ro_mode) {
ubi              1156 drivers/mtd/ubi/ubi.h 		ubi->ro_mode = 1;
ubi              1157 drivers/mtd/ubi/ubi.h 		ubi_warn(ubi, "switch to read-only mode");
ubi              1167 drivers/mtd/ubi/ubi.h static inline int vol_id2idx(const struct ubi_device *ubi, int vol_id)
ubi              1170 drivers/mtd/ubi/ubi.h 		return vol_id - UBI_INTERNAL_VOL_START + ubi->vtbl_slots;
ubi              1180 drivers/mtd/ubi/ubi.h static inline int idx2vol_id(const struct ubi_device *ubi, int idx)
ubi              1182 drivers/mtd/ubi/ubi.h 	if (idx >= ubi->vtbl_slots)
ubi              1183 drivers/mtd/ubi/ubi.h 		return idx - ubi->vtbl_slots + UBI_INTERNAL_VOL_START;
ubi              1211 drivers/mtd/ubi/ubi.h static inline struct ubi_wl_entry *ubi_find_fm_block(const struct ubi_device *ubi,
ubi              1216 drivers/mtd/ubi/ubi.h 	if (ubi->fm) {
ubi              1217 drivers/mtd/ubi/ubi.h 		for (i = 0; i < ubi->fm->used_blocks; i++) {
ubi              1218 drivers/mtd/ubi/ubi.h 			if (ubi->fm->e[i]->pnum == pnum)
ubi              1219 drivers/mtd/ubi/ubi.h 				return ubi->fm->e[i];
ubi                41 drivers/mtd/ubi/upd.c static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
ubi                49 drivers/mtd/ubi/upd.c 		ubi_assert(ubi->vtbl[vol->vol_id].upd_marker);
ubi                54 drivers/mtd/ubi/upd.c 	vtbl_rec = ubi->vtbl[vol->vol_id];
ubi                57 drivers/mtd/ubi/upd.c 	mutex_lock(&ubi->device_mutex);
ubi                58 drivers/mtd/ubi/upd.c 	err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
ubi                60 drivers/mtd/ubi/upd.c 	mutex_unlock(&ubi->device_mutex);
ubi                74 drivers/mtd/ubi/upd.c static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
ubi                82 drivers/mtd/ubi/upd.c 	vtbl_rec = ubi->vtbl[vol->vol_id];
ubi                97 drivers/mtd/ubi/upd.c 	mutex_lock(&ubi->device_mutex);
ubi                98 drivers/mtd/ubi/upd.c 	err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
ubi               100 drivers/mtd/ubi/upd.c 	mutex_unlock(&ubi->device_mutex);
ubi               114 drivers/mtd/ubi/upd.c int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               123 drivers/mtd/ubi/upd.c 	vol->upd_buf = vmalloc(ubi->leb_size);
ubi               127 drivers/mtd/ubi/upd.c 	err = set_update_marker(ubi, vol);
ubi               133 drivers/mtd/ubi/upd.c 		err = ubi_eba_unmap_leb(ubi, vol, i);
ubi               138 drivers/mtd/ubi/upd.c 	err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
ubi               143 drivers/mtd/ubi/upd.c 		err = clear_update_marker(ubi, vol, 0);
ubi               168 drivers/mtd/ubi/upd.c int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               176 drivers/mtd/ubi/upd.c 		return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0);
ubi               183 drivers/mtd/ubi/upd.c 	vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
ubi               219 drivers/mtd/ubi/upd.c static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
ubi               225 drivers/mtd/ubi/upd.c 		int l = ALIGN(len, ubi->min_io_size);
ubi               228 drivers/mtd/ubi/upd.c 		len = ubi_calc_data_len(ubi, buf, l);
ubi               234 drivers/mtd/ubi/upd.c 		err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len);
ubi               246 drivers/mtd/ubi/upd.c 		err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, used_ebs);
ubi               265 drivers/mtd/ubi/upd.c int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               273 drivers/mtd/ubi/upd.c 	if (ubi->ro_mode)
ubi               309 drivers/mtd/ubi/upd.c 			err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len,
ubi               337 drivers/mtd/ubi/upd.c 			err = write_leb(ubi, vol, lnum, vol->upd_buf,
ubi               351 drivers/mtd/ubi/upd.c 		err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
ubi               355 drivers/mtd/ubi/upd.c 		err = clear_update_marker(ubi, vol, vol->upd_bytes);
ubi               380 drivers/mtd/ubi/upd.c int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
ubi               388 drivers/mtd/ubi/upd.c 	if (ubi->ro_mode)
ubi               401 drivers/mtd/ubi/upd.c 		int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size);
ubi               405 drivers/mtd/ubi/upd.c 		len = ubi_calc_data_len(ubi, vol->upd_buf, len);
ubi               406 drivers/mtd/ubi/upd.c 		err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
ubi                19 drivers/mtd/ubi/vmt.c static int self_check_volumes(struct ubi_device *ubi);
ubi                59 drivers/mtd/ubi/vmt.c 	struct ubi_device *ubi;
ubi                61 drivers/mtd/ubi/vmt.c 	ubi = ubi_get_device(vol->ubi->ubi_num);
ubi                62 drivers/mtd/ubi/vmt.c 	if (!ubi)
ubi                65 drivers/mtd/ubi/vmt.c 	spin_lock(&ubi->volumes_lock);
ubi                66 drivers/mtd/ubi/vmt.c 	if (!ubi->volumes[vol->vol_id]) {
ubi                67 drivers/mtd/ubi/vmt.c 		spin_unlock(&ubi->volumes_lock);
ubi                68 drivers/mtd/ubi/vmt.c 		ubi_put_device(ubi);
ubi                73 drivers/mtd/ubi/vmt.c 	spin_unlock(&ubi->volumes_lock);
ubi               102 drivers/mtd/ubi/vmt.c 	spin_lock(&ubi->volumes_lock);
ubi               105 drivers/mtd/ubi/vmt.c 	spin_unlock(&ubi->volumes_lock);
ubi               106 drivers/mtd/ubi/vmt.c 	ubi_put_device(ubi);
ubi               144 drivers/mtd/ubi/vmt.c int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
ubi               151 drivers/mtd/ubi/vmt.c 	if (ubi->ro_mode)
ubi               160 drivers/mtd/ubi/vmt.c 	vol->dev.parent = &ubi->dev;
ubi               167 drivers/mtd/ubi/vmt.c 	spin_lock(&ubi->volumes_lock);
ubi               171 drivers/mtd/ubi/vmt.c 		for (i = 0; i < ubi->vtbl_slots; i++)
ubi               172 drivers/mtd/ubi/vmt.c 			if (!ubi->volumes[i]) {
ubi               178 drivers/mtd/ubi/vmt.c 			ubi_err(ubi, "out of volume IDs");
ubi               186 drivers/mtd/ubi/vmt.c 		ubi->ubi_num, vol_id, (unsigned long long)req->bytes,
ubi               191 drivers/mtd/ubi/vmt.c 	if (ubi->volumes[vol_id]) {
ubi               192 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "volume %d already exists", vol_id);
ubi               197 drivers/mtd/ubi/vmt.c 	for (i = 0; i < ubi->vtbl_slots; i++)
ubi               198 drivers/mtd/ubi/vmt.c 		if (ubi->volumes[i] &&
ubi               199 drivers/mtd/ubi/vmt.c 		    ubi->volumes[i]->name_len == req->name_len &&
ubi               200 drivers/mtd/ubi/vmt.c 		    !strcmp(ubi->volumes[i]->name, req->name)) {
ubi               201 drivers/mtd/ubi/vmt.c 			ubi_err(ubi, "volume \"%s\" exists (ID %d)",
ubi               207 drivers/mtd/ubi/vmt.c 	vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
ubi               212 drivers/mtd/ubi/vmt.c 	if (vol->reserved_pebs > ubi->avail_pebs) {
ubi               213 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "not enough PEBs, only %d available",
ubi               214 drivers/mtd/ubi/vmt.c 			ubi->avail_pebs);
ubi               215 drivers/mtd/ubi/vmt.c 		if (ubi->corr_peb_count)
ubi               216 drivers/mtd/ubi/vmt.c 			ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi               217 drivers/mtd/ubi/vmt.c 				ubi->corr_peb_count);
ubi               221 drivers/mtd/ubi/vmt.c 	ubi->avail_pebs -= vol->reserved_pebs;
ubi               222 drivers/mtd/ubi/vmt.c 	ubi->rsvd_pebs += vol->reserved_pebs;
ubi               223 drivers/mtd/ubi/vmt.c 	spin_unlock(&ubi->volumes_lock);
ubi               227 drivers/mtd/ubi/vmt.c 	vol->data_pad  = ubi->leb_size % vol->alignment;
ubi               231 drivers/mtd/ubi/vmt.c 	vol->ubi = ubi;
ubi               237 drivers/mtd/ubi/vmt.c 	err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
ubi               265 drivers/mtd/ubi/vmt.c 	spin_lock(&ubi->volumes_lock);
ubi               266 drivers/mtd/ubi/vmt.c 	ubi->volumes[vol_id] = vol;
ubi               267 drivers/mtd/ubi/vmt.c 	ubi->vol_count += 1;
ubi               268 drivers/mtd/ubi/vmt.c 	spin_unlock(&ubi->volumes_lock);
ubi               274 drivers/mtd/ubi/vmt.c 	vol->dev.devt = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
ubi               275 drivers/mtd/ubi/vmt.c 	dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
ubi               278 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "cannot add device");
ubi               298 drivers/mtd/ubi/vmt.c 	err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
ubi               302 drivers/mtd/ubi/vmt.c 	ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
ubi               303 drivers/mtd/ubi/vmt.c 	self_check_volumes(ubi);
ubi               314 drivers/mtd/ubi/vmt.c 	spin_lock(&ubi->volumes_lock);
ubi               315 drivers/mtd/ubi/vmt.c 	ubi->volumes[vol_id] = NULL;
ubi               316 drivers/mtd/ubi/vmt.c 	ubi->vol_count -= 1;
ubi               317 drivers/mtd/ubi/vmt.c 	spin_unlock(&ubi->volumes_lock);
ubi               320 drivers/mtd/ubi/vmt.c 	spin_lock(&ubi->volumes_lock);
ubi               321 drivers/mtd/ubi/vmt.c 	ubi->rsvd_pebs -= vol->reserved_pebs;
ubi               322 drivers/mtd/ubi/vmt.c 	ubi->avail_pebs += vol->reserved_pebs;
ubi               324 drivers/mtd/ubi/vmt.c 	spin_unlock(&ubi->volumes_lock);
ubi               326 drivers/mtd/ubi/vmt.c 	ubi_err(ubi, "cannot create volume %d, error %d", vol_id, err);
ubi               343 drivers/mtd/ubi/vmt.c 	struct ubi_device *ubi = vol->ubi;
ubi               346 drivers/mtd/ubi/vmt.c 	dbg_gen("remove device %d, volume %d", ubi->ubi_num, vol_id);
ubi               348 drivers/mtd/ubi/vmt.c 	ubi_assert(vol == ubi->volumes[vol_id]);
ubi               350 drivers/mtd/ubi/vmt.c 	if (ubi->ro_mode)
ubi               353 drivers/mtd/ubi/vmt.c 	spin_lock(&ubi->volumes_lock);
ubi               362 drivers/mtd/ubi/vmt.c 	ubi->volumes[vol_id] = NULL;
ubi               363 drivers/mtd/ubi/vmt.c 	spin_unlock(&ubi->volumes_lock);
ubi               366 drivers/mtd/ubi/vmt.c 		err = ubi_change_vtbl_record(ubi, vol_id, NULL);
ubi               372 drivers/mtd/ubi/vmt.c 		err = ubi_eba_unmap_leb(ubi, vol, i);
ubi               380 drivers/mtd/ubi/vmt.c 	spin_lock(&ubi->volumes_lock);
ubi               381 drivers/mtd/ubi/vmt.c 	ubi->rsvd_pebs -= reserved_pebs;
ubi               382 drivers/mtd/ubi/vmt.c 	ubi->avail_pebs += reserved_pebs;
ubi               383 drivers/mtd/ubi/vmt.c 	ubi_update_reserved(ubi);
ubi               384 drivers/mtd/ubi/vmt.c 	ubi->vol_count -= 1;
ubi               385 drivers/mtd/ubi/vmt.c 	spin_unlock(&ubi->volumes_lock);
ubi               387 drivers/mtd/ubi/vmt.c 	ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
ubi               389 drivers/mtd/ubi/vmt.c 		self_check_volumes(ubi);
ubi               394 drivers/mtd/ubi/vmt.c 	ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err);
ubi               395 drivers/mtd/ubi/vmt.c 	spin_lock(&ubi->volumes_lock);
ubi               396 drivers/mtd/ubi/vmt.c 	ubi->volumes[vol_id] = vol;
ubi               398 drivers/mtd/ubi/vmt.c 	spin_unlock(&ubi->volumes_lock);
ubi               415 drivers/mtd/ubi/vmt.c 	struct ubi_device *ubi = vol->ubi;
ubi               420 drivers/mtd/ubi/vmt.c 	if (ubi->ro_mode)
ubi               424 drivers/mtd/ubi/vmt.c 		ubi->ubi_num, vol_id, vol->reserved_pebs, reserved_pebs);
ubi               428 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "too small size %d, %d LEBs contain data",
ubi               441 drivers/mtd/ubi/vmt.c 	spin_lock(&ubi->volumes_lock);
ubi               443 drivers/mtd/ubi/vmt.c 		spin_unlock(&ubi->volumes_lock);
ubi               447 drivers/mtd/ubi/vmt.c 	spin_unlock(&ubi->volumes_lock);
ubi               452 drivers/mtd/ubi/vmt.c 		spin_lock(&ubi->volumes_lock);
ubi               453 drivers/mtd/ubi/vmt.c 		if (pebs > ubi->avail_pebs) {
ubi               454 drivers/mtd/ubi/vmt.c 			ubi_err(ubi, "not enough PEBs: requested %d, available %d",
ubi               455 drivers/mtd/ubi/vmt.c 				pebs, ubi->avail_pebs);
ubi               456 drivers/mtd/ubi/vmt.c 			if (ubi->corr_peb_count)
ubi               457 drivers/mtd/ubi/vmt.c 				ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi               458 drivers/mtd/ubi/vmt.c 					ubi->corr_peb_count);
ubi               459 drivers/mtd/ubi/vmt.c 			spin_unlock(&ubi->volumes_lock);
ubi               463 drivers/mtd/ubi/vmt.c 		ubi->avail_pebs -= pebs;
ubi               464 drivers/mtd/ubi/vmt.c 		ubi->rsvd_pebs += pebs;
ubi               467 drivers/mtd/ubi/vmt.c 		spin_unlock(&ubi->volumes_lock);
ubi               472 drivers/mtd/ubi/vmt.c 			err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
ubi               476 drivers/mtd/ubi/vmt.c 		spin_lock(&ubi->volumes_lock);
ubi               477 drivers/mtd/ubi/vmt.c 		ubi->rsvd_pebs += pebs;
ubi               478 drivers/mtd/ubi/vmt.c 		ubi->avail_pebs -= pebs;
ubi               479 drivers/mtd/ubi/vmt.c 		ubi_update_reserved(ubi);
ubi               482 drivers/mtd/ubi/vmt.c 		spin_unlock(&ubi->volumes_lock);
ubi               491 drivers/mtd/ubi/vmt.c 		err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
ubi               497 drivers/mtd/ubi/vmt.c 	vtbl_rec = ubi->vtbl[vol_id];
ubi               499 drivers/mtd/ubi/vmt.c 	err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
ubi               511 drivers/mtd/ubi/vmt.c 	ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
ubi               512 drivers/mtd/ubi/vmt.c 	self_check_volumes(ubi);
ubi               517 drivers/mtd/ubi/vmt.c 		spin_lock(&ubi->volumes_lock);
ubi               518 drivers/mtd/ubi/vmt.c 		ubi->rsvd_pebs -= pebs;
ubi               519 drivers/mtd/ubi/vmt.c 		ubi->avail_pebs += pebs;
ubi               520 drivers/mtd/ubi/vmt.c 		spin_unlock(&ubi->volumes_lock);
ubi               536 drivers/mtd/ubi/vmt.c int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
ubi               541 drivers/mtd/ubi/vmt.c 	err = ubi_vtbl_rename_volumes(ubi, rename_list);
ubi               553 drivers/mtd/ubi/vmt.c 			spin_lock(&ubi->volumes_lock);
ubi               556 drivers/mtd/ubi/vmt.c 			spin_unlock(&ubi->volumes_lock);
ubi               557 drivers/mtd/ubi/vmt.c 			ubi_volume_notify(ubi, vol, UBI_VOLUME_RENAMED);
ubi               562 drivers/mtd/ubi/vmt.c 		self_check_volumes(ubi);
ubi               575 drivers/mtd/ubi/vmt.c int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
ubi               585 drivers/mtd/ubi/vmt.c 	dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
ubi               588 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "cannot add character device for volume %d, error %d",
ubi               594 drivers/mtd/ubi/vmt.c 	vol->dev.parent = &ubi->dev;
ubi               598 drivers/mtd/ubi/vmt.c 	dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
ubi               603 drivers/mtd/ubi/vmt.c 	self_check_volumes(ubi);
ubi               619 drivers/mtd/ubi/vmt.c void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
ubi               623 drivers/mtd/ubi/vmt.c 	ubi->volumes[vol->vol_id] = NULL;
ubi               635 drivers/mtd/ubi/vmt.c static int self_check_volume(struct ubi_device *ubi, int vol_id)
ubi               637 drivers/mtd/ubi/vmt.c 	int idx = vol_id2idx(ubi, vol_id);
ubi               643 drivers/mtd/ubi/vmt.c 	spin_lock(&ubi->volumes_lock);
ubi               644 drivers/mtd/ubi/vmt.c 	reserved_pebs = be32_to_cpu(ubi->vtbl[vol_id].reserved_pebs);
ubi               645 drivers/mtd/ubi/vmt.c 	vol = ubi->volumes[idx];
ubi               649 drivers/mtd/ubi/vmt.c 			ubi_err(ubi, "no volume info, but volume exists");
ubi               652 drivers/mtd/ubi/vmt.c 		spin_unlock(&ubi->volumes_lock);
ubi               658 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "negative values");
ubi               661 drivers/mtd/ubi/vmt.c 	if (vol->alignment > ubi->leb_size || vol->alignment == 0) {
ubi               662 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "bad alignment");
ubi               666 drivers/mtd/ubi/vmt.c 	n = vol->alignment & (ubi->min_io_size - 1);
ubi               668 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "alignment is not multiple of min I/O unit");
ubi               672 drivers/mtd/ubi/vmt.c 	n = ubi->leb_size % vol->alignment;
ubi               674 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "bad data_pad, has to be %lld", n);
ubi               680 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "bad vol_type");
ubi               685 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "update marker and corrupted simultaneously");
ubi               689 drivers/mtd/ubi/vmt.c 	if (vol->reserved_pebs > ubi->good_peb_count) {
ubi               690 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "too large reserved_pebs");
ubi               694 drivers/mtd/ubi/vmt.c 	n = ubi->leb_size - vol->data_pad;
ubi               695 drivers/mtd/ubi/vmt.c 	if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) {
ubi               696 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "bad usable_leb_size, has to be %lld", n);
ubi               701 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "too long volume name, max is %d",
ubi               708 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "bad name_len %lld", n);
ubi               715 drivers/mtd/ubi/vmt.c 			ubi_err(ubi, "corrupted dynamic volume");
ubi               719 drivers/mtd/ubi/vmt.c 			ubi_err(ubi, "bad used_ebs");
ubi               723 drivers/mtd/ubi/vmt.c 			ubi_err(ubi, "bad last_eb_bytes");
ubi               727 drivers/mtd/ubi/vmt.c 			ubi_err(ubi, "bad used_bytes");
ubi               732 drivers/mtd/ubi/vmt.c 			ubi_err(ubi, "bad skip_check");
ubi               737 drivers/mtd/ubi/vmt.c 			ubi_err(ubi, "bad used_ebs");
ubi               742 drivers/mtd/ubi/vmt.c 			ubi_err(ubi, "bad last_eb_bytes");
ubi               747 drivers/mtd/ubi/vmt.c 			ubi_err(ubi, "bad used_bytes");
ubi               752 drivers/mtd/ubi/vmt.c 	alignment  = be32_to_cpu(ubi->vtbl[vol_id].alignment);
ubi               753 drivers/mtd/ubi/vmt.c 	data_pad   = be32_to_cpu(ubi->vtbl[vol_id].data_pad);
ubi               754 drivers/mtd/ubi/vmt.c 	name_len   = be16_to_cpu(ubi->vtbl[vol_id].name_len);
ubi               755 drivers/mtd/ubi/vmt.c 	upd_marker = ubi->vtbl[vol_id].upd_marker;
ubi               756 drivers/mtd/ubi/vmt.c 	name       = &ubi->vtbl[vol_id].name[0];
ubi               757 drivers/mtd/ubi/vmt.c 	if (ubi->vtbl[vol_id].vol_type == UBI_VID_DYNAMIC)
ubi               765 drivers/mtd/ubi/vmt.c 		ubi_err(ubi, "volume info is different");
ubi               769 drivers/mtd/ubi/vmt.c 	spin_unlock(&ubi->volumes_lock);
ubi               773 drivers/mtd/ubi/vmt.c 	ubi_err(ubi, "self-check failed for volume %d", vol_id);
ubi               776 drivers/mtd/ubi/vmt.c 	ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
ubi               778 drivers/mtd/ubi/vmt.c 	spin_unlock(&ubi->volumes_lock);
ubi               788 drivers/mtd/ubi/vmt.c static int self_check_volumes(struct ubi_device *ubi)
ubi               792 drivers/mtd/ubi/vmt.c 	if (!ubi_dbg_chk_gen(ubi))
ubi               795 drivers/mtd/ubi/vmt.c 	for (i = 0; i < ubi->vtbl_slots; i++) {
ubi               796 drivers/mtd/ubi/vmt.c 		err = self_check_volume(ubi, i);
ubi                54 drivers/mtd/ubi/vtbl.c static void self_vtbl_check(const struct ubi_device *ubi);
ubi                63 drivers/mtd/ubi/vtbl.c static int ubi_update_layout_vol(struct ubi_device *ubi)
ubi                68 drivers/mtd/ubi/vtbl.c 	layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
ubi                70 drivers/mtd/ubi/vtbl.c 		err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl,
ubi                71 drivers/mtd/ubi/vtbl.c 						ubi->vtbl_size);
ubi                90 drivers/mtd/ubi/vtbl.c int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
ubi                96 drivers/mtd/ubi/vtbl.c 	ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
ubi               105 drivers/mtd/ubi/vtbl.c 	memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
ubi               106 drivers/mtd/ubi/vtbl.c 	err = ubi_update_layout_vol(ubi);
ubi               108 drivers/mtd/ubi/vtbl.c 	self_vtbl_check(ubi);
ubi               121 drivers/mtd/ubi/vtbl.c int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
ubi               129 drivers/mtd/ubi/vtbl.c 		struct ubi_vtbl_record *vtbl_rec = &ubi->vtbl[vol->vol_id];
ubi               146 drivers/mtd/ubi/vtbl.c 	return ubi_update_layout_vol(ubi);
ubi               157 drivers/mtd/ubi/vtbl.c static int vtbl_check(const struct ubi_device *ubi,
ubi               165 drivers/mtd/ubi/vtbl.c 	for (i = 0; i < ubi->vtbl_slots; i++) {
ubi               178 drivers/mtd/ubi/vtbl.c 			ubi_err(ubi, "bad CRC at record %u: %#08x, not %#08x",
ubi               199 drivers/mtd/ubi/vtbl.c 		if (alignment > ubi->leb_size || alignment == 0) {
ubi               204 drivers/mtd/ubi/vtbl.c 		n = alignment & (ubi->min_io_size - 1);
ubi               210 drivers/mtd/ubi/vtbl.c 		n = ubi->leb_size % alignment;
ubi               212 drivers/mtd/ubi/vtbl.c 			ubi_err(ubi, "bad data_pad, has to be %d", n);
ubi               227 drivers/mtd/ubi/vtbl.c 		if (reserved_pebs > ubi->good_peb_count) {
ubi               228 drivers/mtd/ubi/vtbl.c 			ubi_err(ubi, "too large reserved_pebs %d, good PEBs %d",
ubi               229 drivers/mtd/ubi/vtbl.c 				reserved_pebs, ubi->good_peb_count);
ubi               251 drivers/mtd/ubi/vtbl.c 	for (i = 0; i < ubi->vtbl_slots - 1; i++) {
ubi               252 drivers/mtd/ubi/vtbl.c 		for (n = i + 1; n < ubi->vtbl_slots; n++) {
ubi               258 drivers/mtd/ubi/vtbl.c 				ubi_err(ubi, "volumes %d and %d have the same name \"%s\"",
ubi               270 drivers/mtd/ubi/vtbl.c 	ubi_err(ubi, "volume table check failed: record %d, error %d", i, err);
ubi               285 drivers/mtd/ubi/vtbl.c static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi               295 drivers/mtd/ubi/vtbl.c 	vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
ubi               302 drivers/mtd/ubi/vtbl.c 	new_aeb = ubi_early_get_peb(ubi, ai);
ubi               317 drivers/mtd/ubi/vtbl.c 	err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vidb);
ubi               322 drivers/mtd/ubi/vtbl.c 	err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size);
ubi               330 drivers/mtd/ubi/vtbl.c 	err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
ubi               361 drivers/mtd/ubi/vtbl.c static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
ubi               400 drivers/mtd/ubi/vtbl.c 		leb[aeb->lnum] = vzalloc(ubi->vtbl_size);
ubi               406 drivers/mtd/ubi/vtbl.c 		err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0,
ubi               407 drivers/mtd/ubi/vtbl.c 				       ubi->vtbl_size);
ubi               426 drivers/mtd/ubi/vtbl.c 		leb_corrupted[0] = vtbl_check(ubi, leb[0]);
ubi               435 drivers/mtd/ubi/vtbl.c 						  ubi->vtbl_size);
ubi               437 drivers/mtd/ubi/vtbl.c 			ubi_warn(ubi, "volume table copy #2 is corrupted");
ubi               438 drivers/mtd/ubi/vtbl.c 			err = create_vtbl(ubi, ai, 1, leb[0]);
ubi               441 drivers/mtd/ubi/vtbl.c 			ubi_msg(ubi, "volume table was restored");
ubi               450 drivers/mtd/ubi/vtbl.c 			leb_corrupted[1] = vtbl_check(ubi, leb[1]);
ubi               456 drivers/mtd/ubi/vtbl.c 			ubi_err(ubi, "both volume tables are corrupted");
ubi               460 drivers/mtd/ubi/vtbl.c 		ubi_warn(ubi, "volume table copy #1 is corrupted");
ubi               461 drivers/mtd/ubi/vtbl.c 		err = create_vtbl(ubi, ai, 0, leb[1]);
ubi               464 drivers/mtd/ubi/vtbl.c 		ubi_msg(ubi, "volume table was restored");
ubi               484 drivers/mtd/ubi/vtbl.c static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
ubi               490 drivers/mtd/ubi/vtbl.c 	vtbl = vzalloc(ubi->vtbl_size);
ubi               494 drivers/mtd/ubi/vtbl.c 	for (i = 0; i < ubi->vtbl_slots; i++)
ubi               500 drivers/mtd/ubi/vtbl.c 		err = create_vtbl(ubi, ai, i, vtbl);
ubi               520 drivers/mtd/ubi/vtbl.c static int init_volumes(struct ubi_device *ubi,
ubi               528 drivers/mtd/ubi/vtbl.c 	for (i = 0; i < ubi->vtbl_slots; i++) {
ubi               545 drivers/mtd/ubi/vtbl.c 		vol->usable_leb_size = ubi->leb_size - vol->data_pad;
ubi               555 drivers/mtd/ubi/vtbl.c 			if (ubi->autoresize_vol_id != -1) {
ubi               556 drivers/mtd/ubi/vtbl.c 				ubi_err(ubi, "more than one auto-resize volume (%d and %d)",
ubi               557 drivers/mtd/ubi/vtbl.c 					ubi->autoresize_vol_id, i);
ubi               562 drivers/mtd/ubi/vtbl.c 			ubi->autoresize_vol_id = i;
ubi               565 drivers/mtd/ubi/vtbl.c 		ubi_assert(!ubi->volumes[i]);
ubi               566 drivers/mtd/ubi/vtbl.c 		ubi->volumes[i] = vol;
ubi               567 drivers/mtd/ubi/vtbl.c 		ubi->vol_count += 1;
ubi               568 drivers/mtd/ubi/vtbl.c 		vol->ubi = ubi;
ubi               577 drivers/mtd/ubi/vtbl.c 		err = ubi_fastmap_init_checkmap(vol, ubi->peb_count);
ubi               612 drivers/mtd/ubi/vtbl.c 			ubi_warn(ubi, "static volume %d misses %d LEBs - corrupted",
ubi               635 drivers/mtd/ubi/vtbl.c 	vol->usable_leb_size = ubi->leb_size;
ubi               639 drivers/mtd/ubi/vtbl.c 		(long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
ubi               643 drivers/mtd/ubi/vtbl.c 	ubi_assert(!ubi->volumes[i]);
ubi               644 drivers/mtd/ubi/vtbl.c 	ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol;
ubi               646 drivers/mtd/ubi/vtbl.c 	ubi->vol_count += 1;
ubi               647 drivers/mtd/ubi/vtbl.c 	vol->ubi = ubi;
ubi               652 drivers/mtd/ubi/vtbl.c 	if (reserved_pebs > ubi->avail_pebs) {
ubi               653 drivers/mtd/ubi/vtbl.c 		ubi_err(ubi, "not enough PEBs, required %d, available %d",
ubi               654 drivers/mtd/ubi/vtbl.c 			reserved_pebs, ubi->avail_pebs);
ubi               655 drivers/mtd/ubi/vtbl.c 		if (ubi->corr_peb_count)
ubi               656 drivers/mtd/ubi/vtbl.c 			ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi               657 drivers/mtd/ubi/vtbl.c 				ubi->corr_peb_count);
ubi               660 drivers/mtd/ubi/vtbl.c 	ubi->rsvd_pebs += reserved_pebs;
ubi               661 drivers/mtd/ubi/vtbl.c 	ubi->avail_pebs -= reserved_pebs;
ubi               702 drivers/mtd/ubi/vtbl.c 	ubi_err(vol->ubi, "bad attaching information, error %d", err);
ubi               718 drivers/mtd/ubi/vtbl.c static int check_attaching_info(const struct ubi_device *ubi,
ubi               725 drivers/mtd/ubi/vtbl.c 	if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
ubi               726 drivers/mtd/ubi/vtbl.c 		ubi_err(ubi, "found %d volumes while attaching, maximum is %d + %d",
ubi               727 drivers/mtd/ubi/vtbl.c 			ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
ubi               731 drivers/mtd/ubi/vtbl.c 	if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
ubi               733 drivers/mtd/ubi/vtbl.c 		ubi_err(ubi, "too large volume ID %d found",
ubi               738 drivers/mtd/ubi/vtbl.c 	for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
ubi               742 drivers/mtd/ubi/vtbl.c 		vol = ubi->volumes[i];
ubi               750 drivers/mtd/ubi/vtbl.c 			ubi_assert(i < ubi->vtbl_slots);
ubi               762 drivers/mtd/ubi/vtbl.c 			ubi_msg(ubi, "finish volume %d removal", av->vol_id);
ubi               783 drivers/mtd/ubi/vtbl.c int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
ubi               794 drivers/mtd/ubi/vtbl.c 	ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
ubi               795 drivers/mtd/ubi/vtbl.c 	if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
ubi               796 drivers/mtd/ubi/vtbl.c 		ubi->vtbl_slots = UBI_MAX_VOLUMES;
ubi               798 drivers/mtd/ubi/vtbl.c 	ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
ubi               799 drivers/mtd/ubi/vtbl.c 	ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
ubi               812 drivers/mtd/ubi/vtbl.c 			ubi->vtbl = create_empty_lvol(ubi, ai);
ubi               813 drivers/mtd/ubi/vtbl.c 			if (IS_ERR(ubi->vtbl))
ubi               814 drivers/mtd/ubi/vtbl.c 				return PTR_ERR(ubi->vtbl);
ubi               816 drivers/mtd/ubi/vtbl.c 			ubi_err(ubi, "the layout volume was not found");
ubi               822 drivers/mtd/ubi/vtbl.c 			ubi_err(ubi, "too many LEBs (%d) in layout volume",
ubi               827 drivers/mtd/ubi/vtbl.c 		ubi->vtbl = process_lvol(ubi, ai, av);
ubi               828 drivers/mtd/ubi/vtbl.c 		if (IS_ERR(ubi->vtbl))
ubi               829 drivers/mtd/ubi/vtbl.c 			return PTR_ERR(ubi->vtbl);
ubi               832 drivers/mtd/ubi/vtbl.c 	ubi->avail_pebs = ubi->good_peb_count - ubi->corr_peb_count;
ubi               838 drivers/mtd/ubi/vtbl.c 	err = init_volumes(ubi, ai, ubi->vtbl);
ubi               846 drivers/mtd/ubi/vtbl.c 	err = check_attaching_info(ubi, ai);
ubi               853 drivers/mtd/ubi/vtbl.c 	vfree(ubi->vtbl);
ubi               854 drivers/mtd/ubi/vtbl.c 	for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
ubi               855 drivers/mtd/ubi/vtbl.c 		ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
ubi               856 drivers/mtd/ubi/vtbl.c 		kfree(ubi->volumes[i]);
ubi               857 drivers/mtd/ubi/vtbl.c 		ubi->volumes[i] = NULL;
ubi               866 drivers/mtd/ubi/vtbl.c static void self_vtbl_check(const struct ubi_device *ubi)
ubi               868 drivers/mtd/ubi/vtbl.c 	if (!ubi_dbg_chk_gen(ubi))
ubi               871 drivers/mtd/ubi/vtbl.c 	if (vtbl_check(ubi, ubi->vtbl)) {
ubi               872 drivers/mtd/ubi/vtbl.c 		ubi_err(ubi, "self-check failed");
ubi               125 drivers/mtd/ubi/wl.c static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
ubi               126 drivers/mtd/ubi/wl.c static int self_check_in_wl_tree(const struct ubi_device *ubi,
ubi               128 drivers/mtd/ubi/wl.c static int self_check_in_pq(const struct ubi_device *ubi,
ubi               175 drivers/mtd/ubi/wl.c static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
ubi               177 drivers/mtd/ubi/wl.c 	ubi->lookuptbl[e->pnum] = NULL;
ubi               188 drivers/mtd/ubi/wl.c static int do_work(struct ubi_device *ubi)
ubi               201 drivers/mtd/ubi/wl.c 	down_read(&ubi->work_sem);
ubi               202 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->wl_lock);
ubi               203 drivers/mtd/ubi/wl.c 	if (list_empty(&ubi->works)) {
ubi               204 drivers/mtd/ubi/wl.c 		spin_unlock(&ubi->wl_lock);
ubi               205 drivers/mtd/ubi/wl.c 		up_read(&ubi->work_sem);
ubi               209 drivers/mtd/ubi/wl.c 	wrk = list_entry(ubi->works.next, struct ubi_work, list);
ubi               211 drivers/mtd/ubi/wl.c 	ubi->works_count -= 1;
ubi               212 drivers/mtd/ubi/wl.c 	ubi_assert(ubi->works_count >= 0);
ubi               213 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->wl_lock);
ubi               220 drivers/mtd/ubi/wl.c 	err = wrk->func(ubi, wrk, 0);
ubi               222 drivers/mtd/ubi/wl.c 		ubi_err(ubi, "work failed with error code %d", err);
ubi               223 drivers/mtd/ubi/wl.c 	up_read(&ubi->work_sem);
ubi               275 drivers/mtd/ubi/wl.c static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
ubi               281 drivers/mtd/ubi/wl.c 		list_for_each_entry(p, &ubi->pq[i], u.list)
ubi               298 drivers/mtd/ubi/wl.c static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
ubi               300 drivers/mtd/ubi/wl.c 	int pq_tail = ubi->pq_head - 1;
ubi               305 drivers/mtd/ubi/wl.c 	list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
ubi               318 drivers/mtd/ubi/wl.c static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
ubi               345 drivers/mtd/ubi/wl.c 	if (prev_e && !ubi->fm_disabled &&
ubi               346 drivers/mtd/ubi/wl.c 	    !ubi->fm && e->pnum < UBI_FM_MAX_START)
ubi               361 drivers/mtd/ubi/wl.c static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
ubi               375 drivers/mtd/ubi/wl.c 		e = may_reserve_for_fm(ubi, e, root);
ubi               377 drivers/mtd/ubi/wl.c 		e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
ubi               390 drivers/mtd/ubi/wl.c static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
ubi               394 drivers/mtd/ubi/wl.c 	e = find_mean_wl_entry(ubi, &ubi->free);
ubi               396 drivers/mtd/ubi/wl.c 		ubi_err(ubi, "no free eraseblocks");
ubi               400 drivers/mtd/ubi/wl.c 	self_check_in_wl_tree(ubi, e, &ubi->free);
ubi               406 drivers/mtd/ubi/wl.c 	rb_erase(&e->u.rb, &ubi->free);
ubi               407 drivers/mtd/ubi/wl.c 	ubi->free_count--;
ubi               421 drivers/mtd/ubi/wl.c static int prot_queue_del(struct ubi_device *ubi, int pnum)
ubi               425 drivers/mtd/ubi/wl.c 	e = ubi->lookuptbl[pnum];
ubi               429 drivers/mtd/ubi/wl.c 	if (self_check_in_pq(ubi, e))
ubi               446 drivers/mtd/ubi/wl.c static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
ubi               455 drivers/mtd/ubi/wl.c 	err = self_check_ec(ubi, e->pnum, e->ec);
ubi               459 drivers/mtd/ubi/wl.c 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
ubi               463 drivers/mtd/ubi/wl.c 	err = ubi_io_sync_erase(ubi, e->pnum, torture);
ubi               473 drivers/mtd/ubi/wl.c 		ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
ubi               483 drivers/mtd/ubi/wl.c 	err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
ubi               488 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->wl_lock);
ubi               489 drivers/mtd/ubi/wl.c 	if (e->ec > ubi->max_ec)
ubi               490 drivers/mtd/ubi/wl.c 		ubi->max_ec = e->ec;
ubi               491 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->wl_lock);
ubi               506 drivers/mtd/ubi/wl.c static void serve_prot_queue(struct ubi_device *ubi)
ubi               517 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->wl_lock);
ubi               518 drivers/mtd/ubi/wl.c 	list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
ubi               523 drivers/mtd/ubi/wl.c 		wl_tree_add(e, &ubi->used);
ubi               529 drivers/mtd/ubi/wl.c 			spin_unlock(&ubi->wl_lock);
ubi               535 drivers/mtd/ubi/wl.c 	ubi->pq_head += 1;
ubi               536 drivers/mtd/ubi/wl.c 	if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
ubi               537 drivers/mtd/ubi/wl.c 		ubi->pq_head = 0;
ubi               538 drivers/mtd/ubi/wl.c 	ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
ubi               539 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->wl_lock);
ubi               550 drivers/mtd/ubi/wl.c static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
ubi               552 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->wl_lock);
ubi               553 drivers/mtd/ubi/wl.c 	list_add_tail(&wrk->list, &ubi->works);
ubi               554 drivers/mtd/ubi/wl.c 	ubi_assert(ubi->works_count >= 0);
ubi               555 drivers/mtd/ubi/wl.c 	ubi->works_count += 1;
ubi               556 drivers/mtd/ubi/wl.c 	if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
ubi               557 drivers/mtd/ubi/wl.c 		wake_up_process(ubi->bgt_thread);
ubi               558 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->wl_lock);
ubi               569 drivers/mtd/ubi/wl.c static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
ubi               571 drivers/mtd/ubi/wl.c 	down_read(&ubi->work_sem);
ubi               572 drivers/mtd/ubi/wl.c 	__schedule_ubi_work(ubi, wrk);
ubi               573 drivers/mtd/ubi/wl.c 	up_read(&ubi->work_sem);
ubi               576 drivers/mtd/ubi/wl.c static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
ubi               590 drivers/mtd/ubi/wl.c static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
ubi               611 drivers/mtd/ubi/wl.c 		__schedule_ubi_work(ubi, wl_wrk);
ubi               613 drivers/mtd/ubi/wl.c 		schedule_ubi_work(ubi, wl_wrk);
ubi               617 drivers/mtd/ubi/wl.c static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
ubi               627 drivers/mtd/ubi/wl.c static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
ubi               639 drivers/mtd/ubi/wl.c 	return __erase_worker(ubi, &wl_wrk);
ubi               642 drivers/mtd/ubi/wl.c static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
ubi               654 drivers/mtd/ubi/wl.c static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi               671 drivers/mtd/ubi/wl.c 	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
ubi               677 drivers/mtd/ubi/wl.c 	down_read(&ubi->fm_eba_sem);
ubi               678 drivers/mtd/ubi/wl.c 	mutex_lock(&ubi->move_mutex);
ubi               679 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->wl_lock);
ubi               680 drivers/mtd/ubi/wl.c 	ubi_assert(!ubi->move_from && !ubi->move_to);
ubi               681 drivers/mtd/ubi/wl.c 	ubi_assert(!ubi->move_to_put);
ubi               683 drivers/mtd/ubi/wl.c 	if (!ubi->free.rb_node ||
ubi               684 drivers/mtd/ubi/wl.c 	    (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
ubi               696 drivers/mtd/ubi/wl.c 		       !ubi->free.rb_node, !ubi->used.rb_node);
ubi               703 drivers/mtd/ubi/wl.c 		anchor = !anchor_pebs_available(&ubi->free);
ubi               706 drivers/mtd/ubi/wl.c 		e1 = find_anchor_wl_entry(&ubi->used);
ubi               709 drivers/mtd/ubi/wl.c 		e2 = get_peb_for_wl(ubi);
ubi               719 drivers/mtd/ubi/wl.c 		self_check_in_wl_tree(ubi, e1, &ubi->used);
ubi               720 drivers/mtd/ubi/wl.c 		rb_erase(&e1->u.rb, &ubi->used);
ubi               722 drivers/mtd/ubi/wl.c 	} else if (!ubi->scrub.rb_node) {
ubi               724 drivers/mtd/ubi/wl.c 	if (!ubi->scrub.rb_node) {
ubi               731 drivers/mtd/ubi/wl.c 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
ubi               732 drivers/mtd/ubi/wl.c 		e2 = get_peb_for_wl(ubi);
ubi               741 drivers/mtd/ubi/wl.c 			wl_tree_add(e2, &ubi->free);
ubi               742 drivers/mtd/ubi/wl.c 			ubi->free_count++;
ubi               745 drivers/mtd/ubi/wl.c 		self_check_in_wl_tree(ubi, e1, &ubi->used);
ubi               746 drivers/mtd/ubi/wl.c 		rb_erase(&e1->u.rb, &ubi->used);
ubi               752 drivers/mtd/ubi/wl.c 		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
ubi               753 drivers/mtd/ubi/wl.c 		e2 = get_peb_for_wl(ubi);
ubi               757 drivers/mtd/ubi/wl.c 		self_check_in_wl_tree(ubi, e1, &ubi->scrub);
ubi               758 drivers/mtd/ubi/wl.c 		rb_erase(&e1->u.rb, &ubi->scrub);
ubi               762 drivers/mtd/ubi/wl.c 	ubi->move_from = e1;
ubi               763 drivers/mtd/ubi/wl.c 	ubi->move_to = e2;
ubi               764 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->wl_lock);
ubi               777 drivers/mtd/ubi/wl.c 	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
ubi               804 drivers/mtd/ubi/wl.c 		} else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
ubi               816 drivers/mtd/ubi/wl.c 		ubi_err(ubi, "error %d while reading VID header from PEB %d",
ubi               824 drivers/mtd/ubi/wl.c 	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
ubi               862 drivers/mtd/ubi/wl.c 			if (ubi->erroneous_peb_count > ubi->max_erroneous) {
ubi               863 drivers/mtd/ubi/wl.c 				ubi_err(ubi, "too many erroneous eraseblocks (%d)",
ubi               864 drivers/mtd/ubi/wl.c 					ubi->erroneous_peb_count);
ubi               880 drivers/mtd/ubi/wl.c 		ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
ubi               884 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->wl_lock);
ubi               885 drivers/mtd/ubi/wl.c 	if (!ubi->move_to_put) {
ubi               886 drivers/mtd/ubi/wl.c 		wl_tree_add(e2, &ubi->used);
ubi               889 drivers/mtd/ubi/wl.c 	ubi->move_from = ubi->move_to = NULL;
ubi               890 drivers/mtd/ubi/wl.c 	ubi->move_to_put = ubi->wl_scheduled = 0;
ubi               891 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->wl_lock);
ubi               893 drivers/mtd/ubi/wl.c 	err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
ubi               896 drivers/mtd/ubi/wl.c 			wl_entry_destroy(ubi, e2);
ubi               907 drivers/mtd/ubi/wl.c 		err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
ubi               913 drivers/mtd/ubi/wl.c 	mutex_unlock(&ubi->move_mutex);
ubi               914 drivers/mtd/ubi/wl.c 	up_read(&ubi->fm_eba_sem);
ubi               929 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->wl_lock);
ubi               931 drivers/mtd/ubi/wl.c 		prot_queue_add(ubi, e1);
ubi               933 drivers/mtd/ubi/wl.c 		wl_tree_add(e1, &ubi->erroneous);
ubi               934 drivers/mtd/ubi/wl.c 		ubi->erroneous_peb_count += 1;
ubi               936 drivers/mtd/ubi/wl.c 		wl_tree_add(e1, &ubi->scrub);
ubi               938 drivers/mtd/ubi/wl.c 		wl_tree_add(e1, &ubi->used);
ubi               940 drivers/mtd/ubi/wl.c 		wl_tree_add(e2, &ubi->free);
ubi               941 drivers/mtd/ubi/wl.c 		ubi->free_count++;
ubi               944 drivers/mtd/ubi/wl.c 	ubi_assert(!ubi->move_to_put);
ubi               945 drivers/mtd/ubi/wl.c 	ubi->move_from = ubi->move_to = NULL;
ubi               946 drivers/mtd/ubi/wl.c 	ubi->wl_scheduled = 0;
ubi               947 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->wl_lock);
ubi               951 drivers/mtd/ubi/wl.c 		ensure_wear_leveling(ubi, 1);
ubi               953 drivers/mtd/ubi/wl.c 		err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
ubi               959 drivers/mtd/ubi/wl.c 		err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
ubi               964 drivers/mtd/ubi/wl.c 	mutex_unlock(&ubi->move_mutex);
ubi               965 drivers/mtd/ubi/wl.c 	up_read(&ubi->fm_eba_sem);
ubi               970 drivers/mtd/ubi/wl.c 		ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
ubi               973 drivers/mtd/ubi/wl.c 		ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
ubi               975 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->wl_lock);
ubi               976 drivers/mtd/ubi/wl.c 	ubi->move_from = ubi->move_to = NULL;
ubi               977 drivers/mtd/ubi/wl.c 	ubi->move_to_put = ubi->wl_scheduled = 0;
ubi               978 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->wl_lock);
ubi               981 drivers/mtd/ubi/wl.c 	wl_entry_destroy(ubi, e1);
ubi               982 drivers/mtd/ubi/wl.c 	wl_entry_destroy(ubi, e2);
ubi               985 drivers/mtd/ubi/wl.c 	ubi_ro_mode(ubi);
ubi               986 drivers/mtd/ubi/wl.c 	mutex_unlock(&ubi->move_mutex);
ubi               987 drivers/mtd/ubi/wl.c 	up_read(&ubi->fm_eba_sem);
ubi               992 drivers/mtd/ubi/wl.c 	ubi->wl_scheduled = 0;
ubi               993 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->wl_lock);
ubi               994 drivers/mtd/ubi/wl.c 	mutex_unlock(&ubi->move_mutex);
ubi               995 drivers/mtd/ubi/wl.c 	up_read(&ubi->fm_eba_sem);
ubi              1009 drivers/mtd/ubi/wl.c static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
ubi              1016 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->wl_lock);
ubi              1017 drivers/mtd/ubi/wl.c 	if (ubi->wl_scheduled)
ubi              1025 drivers/mtd/ubi/wl.c 	if (!ubi->scrub.rb_node) {
ubi              1026 drivers/mtd/ubi/wl.c 		if (!ubi->used.rb_node || !ubi->free.rb_node)
ubi              1036 drivers/mtd/ubi/wl.c 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
ubi              1037 drivers/mtd/ubi/wl.c 		e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
ubi              1045 drivers/mtd/ubi/wl.c 	ubi->wl_scheduled = 1;
ubi              1046 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->wl_lock);
ubi              1057 drivers/mtd/ubi/wl.c 		__schedule_ubi_work(ubi, wrk);
ubi              1059 drivers/mtd/ubi/wl.c 		schedule_ubi_work(ubi, wrk);
ubi              1063 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->wl_lock);
ubi              1064 drivers/mtd/ubi/wl.c 	ubi->wl_scheduled = 0;
ubi              1066 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->wl_lock);
ubi              1082 drivers/mtd/ubi/wl.c static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
ubi              1093 drivers/mtd/ubi/wl.c 	err = sync_erase(ubi, e, wl_wrk->torture);
ubi              1095 drivers/mtd/ubi/wl.c 		spin_lock(&ubi->wl_lock);
ubi              1096 drivers/mtd/ubi/wl.c 		wl_tree_add(e, &ubi->free);
ubi              1097 drivers/mtd/ubi/wl.c 		ubi->free_count++;
ubi              1098 drivers/mtd/ubi/wl.c 		spin_unlock(&ubi->wl_lock);
ubi              1104 drivers/mtd/ubi/wl.c 		serve_prot_queue(ubi);
ubi              1107 drivers/mtd/ubi/wl.c 		err = ensure_wear_leveling(ubi, 1);
ubi              1111 drivers/mtd/ubi/wl.c 	ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
ubi              1118 drivers/mtd/ubi/wl.c 		err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
ubi              1120 drivers/mtd/ubi/wl.c 			wl_entry_destroy(ubi, e);
ubi              1127 drivers/mtd/ubi/wl.c 	wl_entry_destroy(ubi, e);
ubi              1138 drivers/mtd/ubi/wl.c 	if (!ubi->bad_allowed) {
ubi              1139 drivers/mtd/ubi/wl.c 		ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
ubi              1143 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->volumes_lock);
ubi              1144 drivers/mtd/ubi/wl.c 	if (ubi->beb_rsvd_pebs == 0) {
ubi              1145 drivers/mtd/ubi/wl.c 		if (ubi->avail_pebs == 0) {
ubi              1146 drivers/mtd/ubi/wl.c 			spin_unlock(&ubi->volumes_lock);
ubi              1147 drivers/mtd/ubi/wl.c 			ubi_err(ubi, "no reserved/available physical eraseblocks");
ubi              1150 drivers/mtd/ubi/wl.c 		ubi->avail_pebs -= 1;
ubi              1153 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->volumes_lock);
ubi              1155 drivers/mtd/ubi/wl.c 	ubi_msg(ubi, "mark PEB %d as bad", pnum);
ubi              1156 drivers/mtd/ubi/wl.c 	err = ubi_io_mark_bad(ubi, pnum);
ubi              1160 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->volumes_lock);
ubi              1161 drivers/mtd/ubi/wl.c 	if (ubi->beb_rsvd_pebs > 0) {
ubi              1167 drivers/mtd/ubi/wl.c 			ubi->avail_pebs += 1;
ubi              1170 drivers/mtd/ubi/wl.c 		ubi->beb_rsvd_pebs -= 1;
ubi              1172 drivers/mtd/ubi/wl.c 	ubi->bad_peb_count += 1;
ubi              1173 drivers/mtd/ubi/wl.c 	ubi->good_peb_count -= 1;
ubi              1174 drivers/mtd/ubi/wl.c 	ubi_calculate_reserved(ubi);
ubi              1176 drivers/mtd/ubi/wl.c 		ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
ubi              1177 drivers/mtd/ubi/wl.c 	else if (ubi->beb_rsvd_pebs)
ubi              1178 drivers/mtd/ubi/wl.c 		ubi_msg(ubi, "%d PEBs left in the reserve",
ubi              1179 drivers/mtd/ubi/wl.c 			ubi->beb_rsvd_pebs);
ubi              1181 drivers/mtd/ubi/wl.c 		ubi_warn(ubi, "last PEB from the reserve was used");
ubi              1182 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->volumes_lock);
ubi              1188 drivers/mtd/ubi/wl.c 		spin_lock(&ubi->volumes_lock);
ubi              1189 drivers/mtd/ubi/wl.c 		ubi->avail_pebs += 1;
ubi              1190 drivers/mtd/ubi/wl.c 		spin_unlock(&ubi->volumes_lock);
ubi              1192 drivers/mtd/ubi/wl.c 	ubi_ro_mode(ubi);
ubi              1196 drivers/mtd/ubi/wl.c static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
ubi              1206 drivers/mtd/ubi/wl.c 		wl_entry_destroy(ubi, e);
ubi              1210 drivers/mtd/ubi/wl.c 	ret = __erase_worker(ubi, wl_wrk);
ubi              1228 drivers/mtd/ubi/wl.c int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
ubi              1236 drivers/mtd/ubi/wl.c 	ubi_assert(pnum < ubi->peb_count);
ubi              1238 drivers/mtd/ubi/wl.c 	down_read(&ubi->fm_protect);
ubi              1241 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->wl_lock);
ubi              1242 drivers/mtd/ubi/wl.c 	e = ubi->lookuptbl[pnum];
ubi              1243 drivers/mtd/ubi/wl.c 	if (e == ubi->move_from) {
ubi              1250 drivers/mtd/ubi/wl.c 		spin_unlock(&ubi->wl_lock);
ubi              1253 drivers/mtd/ubi/wl.c 		mutex_lock(&ubi->move_mutex);
ubi              1254 drivers/mtd/ubi/wl.c 		mutex_unlock(&ubi->move_mutex);
ubi              1256 drivers/mtd/ubi/wl.c 	} else if (e == ubi->move_to) {
ubi              1267 drivers/mtd/ubi/wl.c 		ubi_assert(!ubi->move_to_put);
ubi              1268 drivers/mtd/ubi/wl.c 		ubi->move_to_put = 1;
ubi              1269 drivers/mtd/ubi/wl.c 		spin_unlock(&ubi->wl_lock);
ubi              1270 drivers/mtd/ubi/wl.c 		up_read(&ubi->fm_protect);
ubi              1273 drivers/mtd/ubi/wl.c 		if (in_wl_tree(e, &ubi->used)) {
ubi              1274 drivers/mtd/ubi/wl.c 			self_check_in_wl_tree(ubi, e, &ubi->used);
ubi              1275 drivers/mtd/ubi/wl.c 			rb_erase(&e->u.rb, &ubi->used);
ubi              1276 drivers/mtd/ubi/wl.c 		} else if (in_wl_tree(e, &ubi->scrub)) {
ubi              1277 drivers/mtd/ubi/wl.c 			self_check_in_wl_tree(ubi, e, &ubi->scrub);
ubi              1278 drivers/mtd/ubi/wl.c 			rb_erase(&e->u.rb, &ubi->scrub);
ubi              1279 drivers/mtd/ubi/wl.c 		} else if (in_wl_tree(e, &ubi->erroneous)) {
ubi              1280 drivers/mtd/ubi/wl.c 			self_check_in_wl_tree(ubi, e, &ubi->erroneous);
ubi              1281 drivers/mtd/ubi/wl.c 			rb_erase(&e->u.rb, &ubi->erroneous);
ubi              1282 drivers/mtd/ubi/wl.c 			ubi->erroneous_peb_count -= 1;
ubi              1283 drivers/mtd/ubi/wl.c 			ubi_assert(ubi->erroneous_peb_count >= 0);
ubi              1287 drivers/mtd/ubi/wl.c 			err = prot_queue_del(ubi, e->pnum);
ubi              1289 drivers/mtd/ubi/wl.c 				ubi_err(ubi, "PEB %d not found", pnum);
ubi              1290 drivers/mtd/ubi/wl.c 				ubi_ro_mode(ubi);
ubi              1291 drivers/mtd/ubi/wl.c 				spin_unlock(&ubi->wl_lock);
ubi              1292 drivers/mtd/ubi/wl.c 				up_read(&ubi->fm_protect);
ubi              1297 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->wl_lock);
ubi              1299 drivers/mtd/ubi/wl.c 	err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
ubi              1301 drivers/mtd/ubi/wl.c 		spin_lock(&ubi->wl_lock);
ubi              1302 drivers/mtd/ubi/wl.c 		wl_tree_add(e, &ubi->used);
ubi              1303 drivers/mtd/ubi/wl.c 		spin_unlock(&ubi->wl_lock);
ubi              1306 drivers/mtd/ubi/wl.c 	up_read(&ubi->fm_protect);
ubi              1320 drivers/mtd/ubi/wl.c int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
ubi              1324 drivers/mtd/ubi/wl.c 	ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
ubi              1327 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->wl_lock);
ubi              1328 drivers/mtd/ubi/wl.c 	e = ubi->lookuptbl[pnum];
ubi              1329 drivers/mtd/ubi/wl.c 	if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
ubi              1330 drivers/mtd/ubi/wl.c 				   in_wl_tree(e, &ubi->erroneous)) {
ubi              1331 drivers/mtd/ubi/wl.c 		spin_unlock(&ubi->wl_lock);
ubi              1335 drivers/mtd/ubi/wl.c 	if (e == ubi->move_to) {
ubi              1342 drivers/mtd/ubi/wl.c 		spin_unlock(&ubi->wl_lock);
ubi              1348 drivers/mtd/ubi/wl.c 	if (in_wl_tree(e, &ubi->used)) {
ubi              1349 drivers/mtd/ubi/wl.c 		self_check_in_wl_tree(ubi, e, &ubi->used);
ubi              1350 drivers/mtd/ubi/wl.c 		rb_erase(&e->u.rb, &ubi->used);
ubi              1354 drivers/mtd/ubi/wl.c 		err = prot_queue_del(ubi, e->pnum);
ubi              1356 drivers/mtd/ubi/wl.c 			ubi_err(ubi, "PEB %d not found", pnum);
ubi              1357 drivers/mtd/ubi/wl.c 			ubi_ro_mode(ubi);
ubi              1358 drivers/mtd/ubi/wl.c 			spin_unlock(&ubi->wl_lock);
ubi              1363 drivers/mtd/ubi/wl.c 	wl_tree_add(e, &ubi->scrub);
ubi              1364 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->wl_lock);
ubi              1370 drivers/mtd/ubi/wl.c 	return ensure_wear_leveling(ubi, 0);
ubi              1385 drivers/mtd/ubi/wl.c int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
ubi              1395 drivers/mtd/ubi/wl.c 	       vol_id, lnum, ubi->works_count);
ubi              1401 drivers/mtd/ubi/wl.c 		down_read(&ubi->work_sem);
ubi              1402 drivers/mtd/ubi/wl.c 		spin_lock(&ubi->wl_lock);
ubi              1403 drivers/mtd/ubi/wl.c 		list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
ubi              1407 drivers/mtd/ubi/wl.c 				ubi->works_count -= 1;
ubi              1408 drivers/mtd/ubi/wl.c 				ubi_assert(ubi->works_count >= 0);
ubi              1409 drivers/mtd/ubi/wl.c 				spin_unlock(&ubi->wl_lock);
ubi              1411 drivers/mtd/ubi/wl.c 				err = wrk->func(ubi, wrk, 0);
ubi              1413 drivers/mtd/ubi/wl.c 					up_read(&ubi->work_sem);
ubi              1417 drivers/mtd/ubi/wl.c 				spin_lock(&ubi->wl_lock);
ubi              1422 drivers/mtd/ubi/wl.c 		spin_unlock(&ubi->wl_lock);
ubi              1423 drivers/mtd/ubi/wl.c 		up_read(&ubi->work_sem);
ubi              1430 drivers/mtd/ubi/wl.c 	down_write(&ubi->work_sem);
ubi              1431 drivers/mtd/ubi/wl.c 	up_write(&ubi->work_sem);
ubi              1436 drivers/mtd/ubi/wl.c static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
ubi              1438 drivers/mtd/ubi/wl.c 	if (in_wl_tree(e, &ubi->scrub))
ubi              1440 drivers/mtd/ubi/wl.c 	else if (in_wl_tree(e, &ubi->erroneous))
ubi              1442 drivers/mtd/ubi/wl.c 	else if (ubi->move_from == e)
ubi              1444 drivers/mtd/ubi/wl.c 	else if (ubi->move_to == e)
ubi              1469 drivers/mtd/ubi/wl.c int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
ubi              1474 drivers/mtd/ubi/wl.c 	if (pnum < 0 || pnum >= ubi->peb_count) {
ubi              1483 drivers/mtd/ubi/wl.c 	down_write(&ubi->work_sem);
ubi              1489 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->wl_lock);
ubi              1490 drivers/mtd/ubi/wl.c 	e = ubi->lookuptbl[pnum];
ubi              1492 drivers/mtd/ubi/wl.c 		spin_unlock(&ubi->wl_lock);
ubi              1500 drivers/mtd/ubi/wl.c 	if (!scrub_possible(ubi, e)) {
ubi              1501 drivers/mtd/ubi/wl.c 		spin_unlock(&ubi->wl_lock);
ubi              1505 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->wl_lock);
ubi              1508 drivers/mtd/ubi/wl.c 		mutex_lock(&ubi->buf_mutex);
ubi              1509 drivers/mtd/ubi/wl.c 		err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
ubi              1510 drivers/mtd/ubi/wl.c 		mutex_unlock(&ubi->buf_mutex);
ubi              1517 drivers/mtd/ubi/wl.c 		spin_lock(&ubi->wl_lock);
ubi              1523 drivers/mtd/ubi/wl.c 		e = ubi->lookuptbl[pnum];
ubi              1525 drivers/mtd/ubi/wl.c 			spin_unlock(&ubi->wl_lock);
ubi              1533 drivers/mtd/ubi/wl.c 		if (!scrub_possible(ubi, e)) {
ubi              1534 drivers/mtd/ubi/wl.c 			spin_unlock(&ubi->wl_lock);
ubi              1539 drivers/mtd/ubi/wl.c 		if (in_pq(ubi, e)) {
ubi              1540 drivers/mtd/ubi/wl.c 			prot_queue_del(ubi, e->pnum);
ubi              1541 drivers/mtd/ubi/wl.c 			wl_tree_add(e, &ubi->scrub);
ubi              1542 drivers/mtd/ubi/wl.c 			spin_unlock(&ubi->wl_lock);
ubi              1544 drivers/mtd/ubi/wl.c 			err = ensure_wear_leveling(ubi, 1);
ubi              1545 drivers/mtd/ubi/wl.c 		} else if (in_wl_tree(e, &ubi->used)) {
ubi              1546 drivers/mtd/ubi/wl.c 			rb_erase(&e->u.rb, &ubi->used);
ubi              1547 drivers/mtd/ubi/wl.c 			wl_tree_add(e, &ubi->scrub);
ubi              1548 drivers/mtd/ubi/wl.c 			spin_unlock(&ubi->wl_lock);
ubi              1550 drivers/mtd/ubi/wl.c 			err = ensure_wear_leveling(ubi, 1);
ubi              1551 drivers/mtd/ubi/wl.c 		} else if (in_wl_tree(e, &ubi->free)) {
ubi              1552 drivers/mtd/ubi/wl.c 			rb_erase(&e->u.rb, &ubi->free);
ubi              1553 drivers/mtd/ubi/wl.c 			ubi->free_count--;
ubi              1554 drivers/mtd/ubi/wl.c 			spin_unlock(&ubi->wl_lock);
ubi              1560 drivers/mtd/ubi/wl.c 			err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
ubi              1563 drivers/mtd/ubi/wl.c 			spin_unlock(&ubi->wl_lock);
ubi              1574 drivers/mtd/ubi/wl.c 	up_write(&ubi->work_sem);
ubi              1585 drivers/mtd/ubi/wl.c static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
ubi              1607 drivers/mtd/ubi/wl.c 			wl_entry_destroy(ubi, e);
ubi              1619 drivers/mtd/ubi/wl.c 	struct ubi_device *ubi = u;
ubi              1621 drivers/mtd/ubi/wl.c 	ubi_msg(ubi, "background thread \"%s\" started, PID %d",
ubi              1622 drivers/mtd/ubi/wl.c 		ubi->bgt_name, task_pid_nr(current));
ubi              1634 drivers/mtd/ubi/wl.c 		spin_lock(&ubi->wl_lock);
ubi              1635 drivers/mtd/ubi/wl.c 		if (list_empty(&ubi->works) || ubi->ro_mode ||
ubi              1636 drivers/mtd/ubi/wl.c 		    !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
ubi              1638 drivers/mtd/ubi/wl.c 			spin_unlock(&ubi->wl_lock);
ubi              1642 drivers/mtd/ubi/wl.c 		spin_unlock(&ubi->wl_lock);
ubi              1644 drivers/mtd/ubi/wl.c 		err = do_work(ubi);
ubi              1646 drivers/mtd/ubi/wl.c 			ubi_err(ubi, "%s: work failed with error code %d",
ubi              1647 drivers/mtd/ubi/wl.c 				ubi->bgt_name, err);
ubi              1653 drivers/mtd/ubi/wl.c 				ubi_msg(ubi, "%s: %d consecutive failures",
ubi              1654 drivers/mtd/ubi/wl.c 					ubi->bgt_name, WL_MAX_FAILURES);
ubi              1655 drivers/mtd/ubi/wl.c 				ubi_ro_mode(ubi);
ubi              1656 drivers/mtd/ubi/wl.c 				ubi->thread_enabled = 0;
ubi              1665 drivers/mtd/ubi/wl.c 	dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
ubi              1666 drivers/mtd/ubi/wl.c 	ubi->thread_enabled = 0;
ubi              1674 drivers/mtd/ubi/wl.c static void shutdown_work(struct ubi_device *ubi)
ubi              1676 drivers/mtd/ubi/wl.c 	while (!list_empty(&ubi->works)) {
ubi              1679 drivers/mtd/ubi/wl.c 		wrk = list_entry(ubi->works.next, struct ubi_work, list);
ubi              1681 drivers/mtd/ubi/wl.c 		wrk->func(ubi, wrk, 1);
ubi              1682 drivers/mtd/ubi/wl.c 		ubi->works_count -= 1;
ubi              1683 drivers/mtd/ubi/wl.c 		ubi_assert(ubi->works_count >= 0);
ubi              1693 drivers/mtd/ubi/wl.c static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
ubi              1704 drivers/mtd/ubi/wl.c 	ubi->lookuptbl[e->pnum] = e;
ubi              1707 drivers/mtd/ubi/wl.c 		err = sync_erase(ubi, e, false);
ubi              1711 drivers/mtd/ubi/wl.c 		wl_tree_add(e, &ubi->free);
ubi              1712 drivers/mtd/ubi/wl.c 		ubi->free_count++;
ubi              1714 drivers/mtd/ubi/wl.c 		err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
ubi              1722 drivers/mtd/ubi/wl.c 	wl_entry_destroy(ubi, e);
ubi              1735 drivers/mtd/ubi/wl.c int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
ubi              1743 drivers/mtd/ubi/wl.c 	ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
ubi              1744 drivers/mtd/ubi/wl.c 	spin_lock_init(&ubi->wl_lock);
ubi              1745 drivers/mtd/ubi/wl.c 	mutex_init(&ubi->move_mutex);
ubi              1746 drivers/mtd/ubi/wl.c 	init_rwsem(&ubi->work_sem);
ubi              1747 drivers/mtd/ubi/wl.c 	ubi->max_ec = ai->max_ec;
ubi              1748 drivers/mtd/ubi/wl.c 	INIT_LIST_HEAD(&ubi->works);
ubi              1750 drivers/mtd/ubi/wl.c 	sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
ubi              1753 drivers/mtd/ubi/wl.c 	ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
ubi              1754 drivers/mtd/ubi/wl.c 	if (!ubi->lookuptbl)
ubi              1758 drivers/mtd/ubi/wl.c 		INIT_LIST_HEAD(&ubi->pq[i]);
ubi              1759 drivers/mtd/ubi/wl.c 	ubi->pq_head = 0;
ubi              1761 drivers/mtd/ubi/wl.c 	ubi->free_count = 0;
ubi              1765 drivers/mtd/ubi/wl.c 		err = erase_aeb(ubi, aeb, false);
ubi              1785 drivers/mtd/ubi/wl.c 		wl_tree_add(e, &ubi->free);
ubi              1786 drivers/mtd/ubi/wl.c 		ubi->free_count++;
ubi              1788 drivers/mtd/ubi/wl.c 		ubi->lookuptbl[e->pnum] = e;
ubi              1805 drivers/mtd/ubi/wl.c 			ubi->lookuptbl[e->pnum] = e;
ubi              1810 drivers/mtd/ubi/wl.c 				wl_tree_add(e, &ubi->used);
ubi              1814 drivers/mtd/ubi/wl.c 				wl_tree_add(e, &ubi->scrub);
ubi              1824 drivers/mtd/ubi/wl.c 		e = ubi_find_fm_block(ubi, aeb->pnum);
ubi              1827 drivers/mtd/ubi/wl.c 			ubi_assert(!ubi->lookuptbl[e->pnum]);
ubi              1828 drivers/mtd/ubi/wl.c 			ubi->lookuptbl[e->pnum] = e;
ubi              1838 drivers/mtd/ubi/wl.c 			if (ubi->lookuptbl[aeb->pnum])
ubi              1853 drivers/mtd/ubi/wl.c 			err = erase_aeb(ubi, aeb, sync);
ubi              1863 drivers/mtd/ubi/wl.c 	ubi_assert(ubi->good_peb_count == found_pebs);
ubi              1866 drivers/mtd/ubi/wl.c 	ubi_fastmap_init(ubi, &reserved_pebs);
ubi              1868 drivers/mtd/ubi/wl.c 	if (ubi->avail_pebs < reserved_pebs) {
ubi              1869 drivers/mtd/ubi/wl.c 		ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
ubi              1870 drivers/mtd/ubi/wl.c 			ubi->avail_pebs, reserved_pebs);
ubi              1871 drivers/mtd/ubi/wl.c 		if (ubi->corr_peb_count)
ubi              1872 drivers/mtd/ubi/wl.c 			ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi              1873 drivers/mtd/ubi/wl.c 				ubi->corr_peb_count);
ubi              1877 drivers/mtd/ubi/wl.c 	ubi->avail_pebs -= reserved_pebs;
ubi              1878 drivers/mtd/ubi/wl.c 	ubi->rsvd_pebs += reserved_pebs;
ubi              1881 drivers/mtd/ubi/wl.c 	err = ensure_wear_leveling(ubi, 0);
ubi              1888 drivers/mtd/ubi/wl.c 	shutdown_work(ubi);
ubi              1889 drivers/mtd/ubi/wl.c 	tree_destroy(ubi, &ubi->used);
ubi              1890 drivers/mtd/ubi/wl.c 	tree_destroy(ubi, &ubi->free);
ubi              1891 drivers/mtd/ubi/wl.c 	tree_destroy(ubi, &ubi->scrub);
ubi              1892 drivers/mtd/ubi/wl.c 	kfree(ubi->lookuptbl);
ubi              1900 drivers/mtd/ubi/wl.c static void protection_queue_destroy(struct ubi_device *ubi)
ubi              1906 drivers/mtd/ubi/wl.c 		list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
ubi              1908 drivers/mtd/ubi/wl.c 			wl_entry_destroy(ubi, e);
ubi              1917 drivers/mtd/ubi/wl.c void ubi_wl_close(struct ubi_device *ubi)
ubi              1920 drivers/mtd/ubi/wl.c 	ubi_fastmap_close(ubi);
ubi              1921 drivers/mtd/ubi/wl.c 	shutdown_work(ubi);
ubi              1922 drivers/mtd/ubi/wl.c 	protection_queue_destroy(ubi);
ubi              1923 drivers/mtd/ubi/wl.c 	tree_destroy(ubi, &ubi->used);
ubi              1924 drivers/mtd/ubi/wl.c 	tree_destroy(ubi, &ubi->erroneous);
ubi              1925 drivers/mtd/ubi/wl.c 	tree_destroy(ubi, &ubi->free);
ubi              1926 drivers/mtd/ubi/wl.c 	tree_destroy(ubi, &ubi->scrub);
ubi              1927 drivers/mtd/ubi/wl.c 	kfree(ubi->lookuptbl);
ubi              1940 drivers/mtd/ubi/wl.c static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
ubi              1946 drivers/mtd/ubi/wl.c 	if (!ubi_dbg_chk_gen(ubi))
ubi              1949 drivers/mtd/ubi/wl.c 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
ubi              1953 drivers/mtd/ubi/wl.c 	err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
ubi              1962 drivers/mtd/ubi/wl.c 		ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi              1963 drivers/mtd/ubi/wl.c 		ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
ubi              1983 drivers/mtd/ubi/wl.c static int self_check_in_wl_tree(const struct ubi_device *ubi,
ubi              1986 drivers/mtd/ubi/wl.c 	if (!ubi_dbg_chk_gen(ubi))
ubi              1992 drivers/mtd/ubi/wl.c 	ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
ubi              2006 drivers/mtd/ubi/wl.c static int self_check_in_pq(const struct ubi_device *ubi,
ubi              2009 drivers/mtd/ubi/wl.c 	if (!ubi_dbg_chk_gen(ubi))
ubi              2012 drivers/mtd/ubi/wl.c 	if (in_pq(ubi, e))
ubi              2015 drivers/mtd/ubi/wl.c 	ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
ubi              2021 drivers/mtd/ubi/wl.c static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
ubi              2025 drivers/mtd/ubi/wl.c 	e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
ubi              2026 drivers/mtd/ubi/wl.c 	self_check_in_wl_tree(ubi, e, &ubi->free);
ubi              2027 drivers/mtd/ubi/wl.c 	ubi->free_count--;
ubi              2028 drivers/mtd/ubi/wl.c 	ubi_assert(ubi->free_count >= 0);
ubi              2029 drivers/mtd/ubi/wl.c 	rb_erase(&e->u.rb, &ubi->free);
ubi              2043 drivers/mtd/ubi/wl.c static int produce_free_peb(struct ubi_device *ubi)
ubi              2047 drivers/mtd/ubi/wl.c 	while (!ubi->free.rb_node && ubi->works_count) {
ubi              2048 drivers/mtd/ubi/wl.c 		spin_unlock(&ubi->wl_lock);
ubi              2051 drivers/mtd/ubi/wl.c 		err = do_work(ubi);
ubi              2053 drivers/mtd/ubi/wl.c 		spin_lock(&ubi->wl_lock);
ubi              2069 drivers/mtd/ubi/wl.c int ubi_wl_get_peb(struct ubi_device *ubi)
ubi              2075 drivers/mtd/ubi/wl.c 	down_read(&ubi->fm_eba_sem);
ubi              2076 drivers/mtd/ubi/wl.c 	spin_lock(&ubi->wl_lock);
ubi              2077 drivers/mtd/ubi/wl.c 	if (!ubi->free.rb_node) {
ubi              2078 drivers/mtd/ubi/wl.c 		if (ubi->works_count == 0) {
ubi              2079 drivers/mtd/ubi/wl.c 			ubi_err(ubi, "no free eraseblocks");
ubi              2080 drivers/mtd/ubi/wl.c 			ubi_assert(list_empty(&ubi->works));
ubi              2081 drivers/mtd/ubi/wl.c 			spin_unlock(&ubi->wl_lock);
ubi              2085 drivers/mtd/ubi/wl.c 		err = produce_free_peb(ubi);
ubi              2087 drivers/mtd/ubi/wl.c 			spin_unlock(&ubi->wl_lock);
ubi              2090 drivers/mtd/ubi/wl.c 		spin_unlock(&ubi->wl_lock);
ubi              2091 drivers/mtd/ubi/wl.c 		up_read(&ubi->fm_eba_sem);
ubi              2095 drivers/mtd/ubi/wl.c 	e = wl_get_wle(ubi);
ubi              2096 drivers/mtd/ubi/wl.c 	prot_queue_add(ubi, e);
ubi              2097 drivers/mtd/ubi/wl.c 	spin_unlock(&ubi->wl_lock);
ubi              2099 drivers/mtd/ubi/wl.c 	err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
ubi              2100 drivers/mtd/ubi/wl.c 				    ubi->peb_size - ubi->vid_hdr_aloffset);
ubi              2102 drivers/mtd/ubi/wl.c 		ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
ubi                 8 drivers/mtd/ubi/wl.h static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
ubi                 9 drivers/mtd/ubi/wl.h static void ubi_fastmap_close(struct ubi_device *ubi);
ubi                10 drivers/mtd/ubi/wl.h static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
ubi                13 drivers/mtd/ubi/wl.h 	*count += (ubi->fm_size / ubi->leb_size) * 2;
ubi                14 drivers/mtd/ubi/wl.h 	INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
ubi                16 drivers/mtd/ubi/wl.h static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
ubi                20 drivers/mtd/ubi/wl.h static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
ubi                21 drivers/mtd/ubi/wl.h static inline void ubi_fastmap_close(struct ubi_device *ubi) { }
ubi                22 drivers/mtd/ubi/wl.h static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count) { }
ubi                23 drivers/mtd/ubi/wl.h static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
ubi              2582 fs/ubifs/debug.c 	err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
ubi              2599 fs/ubifs/debug.c 	err = ubi_leb_change(c->ubi, lnum, buf, len);
ubi              2615 fs/ubifs/debug.c 	err = ubi_leb_unmap(c->ubi, lnum);
ubi              2631 fs/ubifs/debug.c 	err = ubi_leb_map(c->ubi, lnum);
ubi                92 fs/ubifs/io.c  	err = ubi_read(c->ubi, lnum, buf, offs, len);
ubi               114 fs/ubifs/io.c  		err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
ubi               134 fs/ubifs/io.c  		err = ubi_leb_change(c->ubi, lnum, buf, len);
ubi               154 fs/ubifs/io.c  		err = ubi_leb_unmap(c->ubi, lnum);
ubi               173 fs/ubifs/io.c  		err = ubi_leb_map(c->ubi, lnum);
ubi               188 fs/ubifs/io.c  	err = ubi_is_mapped(c->ubi, lnum);
ubi              1079 fs/ubifs/io.c  		   offs, ubi_is_mapped(c->ubi, lnum));
ubi              1950 fs/ubifs/super.c 	ubi_close_volume(c->ubi);
ubi              2039 fs/ubifs/super.c 	struct ubi_volume_desc *ubi;
ubi              2047 fs/ubifs/super.c 	ubi = ubi_open_volume_path(name, mode);
ubi              2048 fs/ubifs/super.c 	if (!IS_ERR(ubi))
ubi              2049 fs/ubifs/super.c 		return ubi;
ubi              2083 fs/ubifs/super.c static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
ubi              2124 fs/ubifs/super.c 		ubi_get_volume_info(ubi, &c->vi);
ubi              2138 fs/ubifs/super.c 	c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READWRITE);
ubi              2139 fs/ubifs/super.c 	if (IS_ERR(c->ubi)) {
ubi              2140 fs/ubifs/super.c 		err = PTR_ERR(c->ubi);
ubi              2204 fs/ubifs/super.c 	ubi_close_volume(c->ubi);
ubi              2226 fs/ubifs/super.c 	struct ubi_volume_desc *ubi;
ubi              2238 fs/ubifs/super.c 	ubi = open_ubi(name, UBI_READONLY);
ubi              2239 fs/ubifs/super.c 	if (IS_ERR(ubi)) {
ubi              2242 fs/ubifs/super.c 			       current->pid, name, (int)PTR_ERR(ubi));
ubi              2243 fs/ubifs/super.c 		return ERR_CAST(ubi);
ubi              2246 fs/ubifs/super.c 	c = alloc_ubifs_info(ubi);
ubi              2283 fs/ubifs/super.c 	ubi_close_volume(ubi);
ubi              2290 fs/ubifs/super.c 	ubi_close_volume(ubi);
ubi              1381 fs/ubifs/ubifs.h 	struct ubi_volume_desc *ubi;