Lines Matching refs:ubi
138 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
139 static int self_check_in_wl_tree(const struct ubi_device *ubi,
141 static int self_check_in_pq(const struct ubi_device *ubi,
188 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e) in wl_entry_destroy() argument
190 ubi->lookuptbl[e->pnum] = NULL; in wl_entry_destroy()
201 static int do_work(struct ubi_device *ubi) in do_work() argument
214 down_read(&ubi->work_sem); in do_work()
215 spin_lock(&ubi->wl_lock); in do_work()
216 if (list_empty(&ubi->works)) { in do_work()
217 spin_unlock(&ubi->wl_lock); in do_work()
218 up_read(&ubi->work_sem); in do_work()
222 wrk = list_entry(ubi->works.next, struct ubi_work, list); in do_work()
224 ubi->works_count -= 1; in do_work()
225 ubi_assert(ubi->works_count >= 0); in do_work()
226 spin_unlock(&ubi->wl_lock); in do_work()
233 err = wrk->func(ubi, wrk, 0); in do_work()
235 ubi_err(ubi, "work failed with error code %d", err); in do_work()
236 up_read(&ubi->work_sem); in do_work()
290 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) in prot_queue_add() argument
292 int pq_tail = ubi->pq_head - 1; in prot_queue_add()
297 list_add_tail(&e->u.list, &ubi->pq[pq_tail]); in prot_queue_add()
310 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, in find_wl_entry() argument
337 if (prev_e && !ubi->fm_disabled && in find_wl_entry()
338 !ubi->fm && e->pnum < UBI_FM_MAX_START) in find_wl_entry()
353 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi, in find_mean_wl_entry() argument
367 e = may_reserve_for_fm(ubi, e, root); in find_mean_wl_entry()
369 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2); in find_mean_wl_entry()
382 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi) in wl_get_wle() argument
386 e = find_mean_wl_entry(ubi, &ubi->free); in wl_get_wle()
388 ubi_err(ubi, "no free eraseblocks"); in wl_get_wle()
392 self_check_in_wl_tree(ubi, e, &ubi->free); in wl_get_wle()
398 rb_erase(&e->u.rb, &ubi->free); in wl_get_wle()
399 ubi->free_count--; in wl_get_wle()
413 static int prot_queue_del(struct ubi_device *ubi, int pnum) in prot_queue_del() argument
417 e = ubi->lookuptbl[pnum]; in prot_queue_del()
421 if (self_check_in_pq(ubi, e)) in prot_queue_del()
438 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in sync_erase() argument
447 err = self_check_ec(ubi, e->pnum, e->ec); in sync_erase()
451 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); in sync_erase()
455 err = ubi_io_sync_erase(ubi, e->pnum, torture); in sync_erase()
465 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu", in sync_erase()
475 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); in sync_erase()
480 spin_lock(&ubi->wl_lock); in sync_erase()
481 if (e->ec > ubi->max_ec) in sync_erase()
482 ubi->max_ec = e->ec; in sync_erase()
483 spin_unlock(&ubi->wl_lock); in sync_erase()
498 static void serve_prot_queue(struct ubi_device *ubi) in serve_prot_queue() argument
509 spin_lock(&ubi->wl_lock); in serve_prot_queue()
510 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { in serve_prot_queue()
515 wl_tree_add(e, &ubi->used); in serve_prot_queue()
521 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
527 ubi->pq_head += 1; in serve_prot_queue()
528 if (ubi->pq_head == UBI_PROT_QUEUE_LEN) in serve_prot_queue()
529 ubi->pq_head = 0; in serve_prot_queue()
530 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); in serve_prot_queue()
531 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
542 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in __schedule_ubi_work() argument
544 spin_lock(&ubi->wl_lock); in __schedule_ubi_work()
545 list_add_tail(&wrk->list, &ubi->works); in __schedule_ubi_work()
546 ubi_assert(ubi->works_count >= 0); in __schedule_ubi_work()
547 ubi->works_count += 1; in __schedule_ubi_work()
548 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi)) in __schedule_ubi_work()
549 wake_up_process(ubi->bgt_thread); in __schedule_ubi_work()
550 spin_unlock(&ubi->wl_lock); in __schedule_ubi_work()
561 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in schedule_ubi_work() argument
563 down_read(&ubi->work_sem); in schedule_ubi_work()
564 __schedule_ubi_work(ubi, wrk); in schedule_ubi_work()
565 up_read(&ubi->work_sem); in schedule_ubi_work()
568 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
582 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in schedule_erase() argument
602 schedule_ubi_work(ubi, wl_wrk); in schedule_erase()
606 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
616 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in do_sync_erase() argument
628 return __erase_worker(ubi, &wl_wrk); in do_sync_erase()
642 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, argument
657 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
661 mutex_lock(&ubi->move_mutex);
662 spin_lock(&ubi->wl_lock);
663 ubi_assert(!ubi->move_from && !ubi->move_to);
664 ubi_assert(!ubi->move_to_put);
666 if (!ubi->free.rb_node ||
667 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
679 !ubi->free.rb_node, !ubi->used.rb_node);
686 anchor = !anchor_pebs_avalible(&ubi->free);
689 e1 = find_anchor_wl_entry(&ubi->used);
692 e2 = get_peb_for_wl(ubi);
696 self_check_in_wl_tree(ubi, e1, &ubi->used);
697 rb_erase(&e1->u.rb, &ubi->used);
699 } else if (!ubi->scrub.rb_node) {
701 if (!ubi->scrub.rb_node) {
708 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
709 e2 = get_peb_for_wl(ubi);
718 wl_tree_add(e2, &ubi->free);
719 ubi->free_count++;
722 self_check_in_wl_tree(ubi, e1, &ubi->used);
723 rb_erase(&e1->u.rb, &ubi->used);
729 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
730 e2 = get_peb_for_wl(ubi);
734 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
735 rb_erase(&e1->u.rb, &ubi->scrub);
739 ubi->move_from = e1;
740 ubi->move_to = e2;
741 spin_unlock(&ubi->wl_lock);
754 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
782 ubi_err(ubi, "error %d while reading VID header from PEB %d",
790 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
825 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
826 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
827 ubi->erroneous_peb_count);
842 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
844 ubi_free_vid_hdr(ubi, vid_hdr);
846 spin_lock(&ubi->wl_lock);
847 if (!ubi->move_to_put) {
848 wl_tree_add(e2, &ubi->used);
851 ubi->move_from = ubi->move_to = NULL;
852 ubi->move_to_put = ubi->wl_scheduled = 0;
853 spin_unlock(&ubi->wl_lock);
855 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
858 wl_entry_destroy(ubi, e2);
869 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
875 mutex_unlock(&ubi->move_mutex);
890 spin_lock(&ubi->wl_lock);
892 prot_queue_add(ubi, e1);
894 wl_tree_add(e1, &ubi->erroneous);
895 ubi->erroneous_peb_count += 1;
897 wl_tree_add(e1, &ubi->scrub);
899 wl_tree_add(e1, &ubi->used);
900 ubi_assert(!ubi->move_to_put);
901 ubi->move_from = ubi->move_to = NULL;
902 ubi->wl_scheduled = 0;
903 spin_unlock(&ubi->wl_lock);
905 ubi_free_vid_hdr(ubi, vid_hdr);
906 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
910 mutex_unlock(&ubi->move_mutex);
915 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
918 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
920 spin_lock(&ubi->wl_lock);
921 ubi->move_from = ubi->move_to = NULL;
922 ubi->move_to_put = ubi->wl_scheduled = 0;
923 spin_unlock(&ubi->wl_lock);
925 ubi_free_vid_hdr(ubi, vid_hdr);
926 wl_entry_destroy(ubi, e1);
927 wl_entry_destroy(ubi, e2);
930 ubi_ro_mode(ubi);
931 mutex_unlock(&ubi->move_mutex);
936 ubi->wl_scheduled = 0;
937 spin_unlock(&ubi->wl_lock);
938 mutex_unlock(&ubi->move_mutex);
939 ubi_free_vid_hdr(ubi, vid_hdr);
952 static int ensure_wear_leveling(struct ubi_device *ubi, int nested) argument
959 spin_lock(&ubi->wl_lock);
960 if (ubi->wl_scheduled)
968 if (!ubi->scrub.rb_node) {
969 if (!ubi->used.rb_node || !ubi->free.rb_node)
979 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
980 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
988 ubi->wl_scheduled = 1;
989 spin_unlock(&ubi->wl_lock);
1000 __schedule_ubi_work(ubi, wrk);
1002 schedule_ubi_work(ubi, wrk);
1006 spin_lock(&ubi->wl_lock);
1007 ubi->wl_scheduled = 0;
1009 spin_unlock(&ubi->wl_lock);
1025 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) argument
1036 err = sync_erase(ubi, e, wl_wrk->torture);
1038 spin_lock(&ubi->wl_lock);
1039 wl_tree_add(e, &ubi->free);
1040 ubi->free_count++;
1041 spin_unlock(&ubi->wl_lock);
1047 serve_prot_queue(ubi);
1050 err = ensure_wear_leveling(ubi, 1);
1054 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1061 err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1063 wl_entry_destroy(ubi, e);
1070 wl_entry_destroy(ubi, e);
1081 if (!ubi->bad_allowed) {
1082 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1086 spin_lock(&ubi->volumes_lock);
1087 if (ubi->beb_rsvd_pebs == 0) {
1088 if (ubi->avail_pebs == 0) {
1089 spin_unlock(&ubi->volumes_lock);
1090 ubi_err(ubi, "no reserved/available physical eraseblocks");
1093 ubi->avail_pebs -= 1;
1096 spin_unlock(&ubi->volumes_lock);
1098 ubi_msg(ubi, "mark PEB %d as bad", pnum);
1099 err = ubi_io_mark_bad(ubi, pnum);
1103 spin_lock(&ubi->volumes_lock);
1104 if (ubi->beb_rsvd_pebs > 0) {
1110 ubi->avail_pebs += 1;
1113 ubi->beb_rsvd_pebs -= 1;
1115 ubi->bad_peb_count += 1;
1116 ubi->good_peb_count -= 1;
1117 ubi_calculate_reserved(ubi);
1119 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1120 else if (ubi->beb_rsvd_pebs)
1121 ubi_msg(ubi, "%d PEBs left in the reserve",
1122 ubi->beb_rsvd_pebs);
1124 ubi_warn(ubi, "last PEB from the reserve was used");
1125 spin_unlock(&ubi->volumes_lock);
1131 spin_lock(&ubi->volumes_lock);
1132 ubi->avail_pebs += 1;
1133 spin_unlock(&ubi->volumes_lock);
1135 ubi_ro_mode(ubi);
1139 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, argument
1149 wl_entry_destroy(ubi, e);
1153 ret = __erase_worker(ubi, wl_wrk);
1171 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, argument
1179 ubi_assert(pnum < ubi->peb_count);
1181 down_read(&ubi->fm_protect);
1184 spin_lock(&ubi->wl_lock);
1185 e = ubi->lookuptbl[pnum];
1186 if (e == ubi->move_from) {
1193 spin_unlock(&ubi->wl_lock);
1196 mutex_lock(&ubi->move_mutex);
1197 mutex_unlock(&ubi->move_mutex);
1199 } else if (e == ubi->move_to) {
1210 ubi_assert(!ubi->move_to_put);
1211 ubi->move_to_put = 1;
1212 spin_unlock(&ubi->wl_lock);
1213 up_read(&ubi->fm_protect);
1216 if (in_wl_tree(e, &ubi->used)) {
1217 self_check_in_wl_tree(ubi, e, &ubi->used);
1218 rb_erase(&e->u.rb, &ubi->used);
1219 } else if (in_wl_tree(e, &ubi->scrub)) {
1220 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1221 rb_erase(&e->u.rb, &ubi->scrub);
1222 } else if (in_wl_tree(e, &ubi->erroneous)) {
1223 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1224 rb_erase(&e->u.rb, &ubi->erroneous);
1225 ubi->erroneous_peb_count -= 1;
1226 ubi_assert(ubi->erroneous_peb_count >= 0);
1230 err = prot_queue_del(ubi, e->pnum);
1232 ubi_err(ubi, "PEB %d not found", pnum);
1233 ubi_ro_mode(ubi);
1234 spin_unlock(&ubi->wl_lock);
1235 up_read(&ubi->fm_protect);
1240 spin_unlock(&ubi->wl_lock);
1242 err = schedule_erase(ubi, e, vol_id, lnum, torture);
1244 spin_lock(&ubi->wl_lock);
1245 wl_tree_add(e, &ubi->used);
1246 spin_unlock(&ubi->wl_lock);
1249 up_read(&ubi->fm_protect);
1263 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) argument
1267 ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1270 spin_lock(&ubi->wl_lock);
1271 e = ubi->lookuptbl[pnum];
1272 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1273 in_wl_tree(e, &ubi->erroneous)) {
1274 spin_unlock(&ubi->wl_lock);
1278 if (e == ubi->move_to) {
1285 spin_unlock(&ubi->wl_lock);
1291 if (in_wl_tree(e, &ubi->used)) {
1292 self_check_in_wl_tree(ubi, e, &ubi->used);
1293 rb_erase(&e->u.rb, &ubi->used);
1297 err = prot_queue_del(ubi, e->pnum);
1299 ubi_err(ubi, "PEB %d not found", pnum);
1300 ubi_ro_mode(ubi);
1301 spin_unlock(&ubi->wl_lock);
1306 wl_tree_add(e, &ubi->scrub);
1307 spin_unlock(&ubi->wl_lock);
1313 return ensure_wear_leveling(ubi, 0);
1328 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) argument
1338 vol_id, lnum, ubi->works_count);
1344 down_read(&ubi->work_sem);
1345 spin_lock(&ubi->wl_lock);
1346 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1350 ubi->works_count -= 1;
1351 ubi_assert(ubi->works_count >= 0);
1352 spin_unlock(&ubi->wl_lock);
1354 err = wrk->func(ubi, wrk, 0);
1356 up_read(&ubi->work_sem);
1360 spin_lock(&ubi->wl_lock);
1365 spin_unlock(&ubi->wl_lock);
1366 up_read(&ubi->work_sem);
1373 down_write(&ubi->work_sem);
1374 up_write(&ubi->work_sem);
1384 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root) argument
1406 wl_entry_destroy(ubi, e);
1418 struct ubi_device *ubi = u; local
1420 ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1421 ubi->bgt_name, task_pid_nr(current));
1433 spin_lock(&ubi->wl_lock);
1434 if (list_empty(&ubi->works) || ubi->ro_mode ||
1435 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1437 spin_unlock(&ubi->wl_lock);
1441 spin_unlock(&ubi->wl_lock);
1443 err = do_work(ubi);
1445 ubi_err(ubi, "%s: work failed with error code %d",
1446 ubi->bgt_name, err);
1452 ubi_msg(ubi, "%s: %d consecutive failures",
1453 ubi->bgt_name, WL_MAX_FAILURES);
1454 ubi_ro_mode(ubi);
1455 ubi->thread_enabled = 0;
1464 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1472 static void shutdown_work(struct ubi_device *ubi) argument
1475 flush_work(&ubi->fm_work);
1477 while (!list_empty(&ubi->works)) {
1480 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1482 wrk->func(ubi, wrk, 1);
1483 ubi->works_count -= 1;
1484 ubi_assert(ubi->works_count >= 0);
1496 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) argument
1504 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1505 spin_lock_init(&ubi->wl_lock);
1506 mutex_init(&ubi->move_mutex);
1507 init_rwsem(&ubi->work_sem);
1508 ubi->max_ec = ai->max_ec;
1509 INIT_LIST_HEAD(&ubi->works);
1511 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1514 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1515 if (!ubi->lookuptbl)
1519 INIT_LIST_HEAD(&ubi->pq[i]);
1520 ubi->pq_head = 0;
1531 ubi->lookuptbl[e->pnum] = e;
1532 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1533 wl_entry_destroy(ubi, e);
1540 ubi->free_count = 0;
1552 wl_tree_add(e, &ubi->free);
1553 ubi->free_count++;
1555 ubi->lookuptbl[e->pnum] = e;
1570 ubi->lookuptbl[e->pnum] = e;
1575 wl_tree_add(e, &ubi->used);
1579 wl_tree_add(e, &ubi->scrub);
1588 if (ubi->fm) {
1589 ubi_assert(ubi->good_peb_count ==
1590 found_pebs + ubi->fm->used_blocks);
1592 for (i = 0; i < ubi->fm->used_blocks; i++) {
1593 e = ubi->fm->e[i];
1594 ubi->lookuptbl[e->pnum] = e;
1598 ubi_assert(ubi->good_peb_count == found_pebs);
1601 ubi_fastmap_init(ubi, &reserved_pebs);
1603 if (ubi->avail_pebs < reserved_pebs) {
1604 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1605 ubi->avail_pebs, reserved_pebs);
1606 if (ubi->corr_peb_count)
1607 ubi_err(ubi, "%d PEBs are corrupted and not used",
1608 ubi->corr_peb_count);
1612 ubi->avail_pebs -= reserved_pebs;
1613 ubi->rsvd_pebs += reserved_pebs;
1616 err = ensure_wear_leveling(ubi, 0);
1623 shutdown_work(ubi);
1624 tree_destroy(ubi, &ubi->used);
1625 tree_destroy(ubi, &ubi->free);
1626 tree_destroy(ubi, &ubi->scrub);
1627 kfree(ubi->lookuptbl);
1635 static void protection_queue_destroy(struct ubi_device *ubi) argument
1641 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1643 wl_entry_destroy(ubi, e);
1652 void ubi_wl_close(struct ubi_device *ubi) argument
1655 ubi_fastmap_close(ubi);
1656 shutdown_work(ubi);
1657 protection_queue_destroy(ubi);
1658 tree_destroy(ubi, &ubi->used);
1659 tree_destroy(ubi, &ubi->erroneous);
1660 tree_destroy(ubi, &ubi->free);
1661 tree_destroy(ubi, &ubi->scrub);
1662 kfree(ubi->lookuptbl);
1675 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) argument
1681 if (!ubi_dbg_chk_gen(ubi))
1684 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1688 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1697 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1698 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1718 static int self_check_in_wl_tree(const struct ubi_device *ubi, argument
1721 if (!ubi_dbg_chk_gen(ubi))
1727 ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
1741 static int self_check_in_pq(const struct ubi_device *ubi, argument
1747 if (!ubi_dbg_chk_gen(ubi))
1751 list_for_each_entry(p, &ubi->pq[i], u.list)
1755 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
1761 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) argument
1765 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1766 self_check_in_wl_tree(ubi, e, &ubi->free);
1767 ubi->free_count--;
1768 ubi_assert(ubi->free_count >= 0);
1769 rb_erase(&e->u.rb, &ubi->free);
1783 static int produce_free_peb(struct ubi_device *ubi) argument
1787 while (!ubi->free.rb_node && ubi->works_count) {
1788 spin_unlock(&ubi->wl_lock);
1791 err = do_work(ubi);
1793 spin_lock(&ubi->wl_lock);
1809 int ubi_wl_get_peb(struct ubi_device *ubi) argument
1815 down_read(&ubi->fm_eba_sem);
1816 spin_lock(&ubi->wl_lock);
1817 if (!ubi->free.rb_node) {
1818 if (ubi->works_count == 0) {
1819 ubi_err(ubi, "no free eraseblocks");
1820 ubi_assert(list_empty(&ubi->works));
1821 spin_unlock(&ubi->wl_lock);
1825 err = produce_free_peb(ubi);
1827 spin_unlock(&ubi->wl_lock);
1830 spin_unlock(&ubi->wl_lock);
1831 up_read(&ubi->fm_eba_sem);
1835 e = wl_get_wle(ubi);
1836 prot_queue_add(ubi, e);
1837 spin_unlock(&ubi->wl_lock);
1839 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
1840 ubi->peb_size - ubi->vid_hdr_aloffset);
1842 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);