This source file includes following definitions.
- wl_tree_add
- wl_entry_destroy
- do_work
- in_wl_tree
- in_pq
- prot_queue_add
- find_wl_entry
- find_mean_wl_entry
- wl_get_wle
- prot_queue_del
- sync_erase
- serve_prot_queue
- __schedule_ubi_work
- schedule_ubi_work
- schedule_erase
- do_sync_erase
- wear_leveling_worker
- ensure_wear_leveling
- __erase_worker
- erase_worker
- ubi_wl_put_peb
- ubi_wl_scrub_peb
- ubi_wl_flush
- scrub_possible
- ubi_bitflip_check
- tree_destroy
- ubi_thread
- shutdown_work
- erase_aeb
- ubi_wl_init
- protection_queue_destroy
- ubi_wl_close
- self_check_ec
- self_check_in_wl_tree
- self_check_in_pq
- get_peb_for_wl
- produce_free_peb
- ubi_wl_get_peb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88 #include <linux/slab.h>
89 #include <linux/crc32.h>
90 #include <linux/freezer.h>
91 #include <linux/kthread.h>
92 #include "ubi.h"
93 #include "wl.h"
94
95
96 #define WL_RESERVED_PEBS 1
97
98
99
100
101
102
103
104 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
105
106
107
108
109
110
111
112
113
114
115
116
117 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
118
119
120
121
122
123 #define WL_MAX_FAILURES 32
124
125 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
126 static int self_check_in_wl_tree(const struct ubi_device *ubi,
127 struct ubi_wl_entry *e, struct rb_root *root);
128 static int self_check_in_pq(const struct ubi_device *ubi,
129 struct ubi_wl_entry *e);
130
131
132
133
134
135
136
137
138
139 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
140 {
141 struct rb_node **p, *parent = NULL;
142
143 p = &root->rb_node;
144 while (*p) {
145 struct ubi_wl_entry *e1;
146
147 parent = *p;
148 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
149
150 if (e->ec < e1->ec)
151 p = &(*p)->rb_left;
152 else if (e->ec > e1->ec)
153 p = &(*p)->rb_right;
154 else {
155 ubi_assert(e->pnum != e1->pnum);
156 if (e->pnum < e1->pnum)
157 p = &(*p)->rb_left;
158 else
159 p = &(*p)->rb_right;
160 }
161 }
162
163 rb_link_node(&e->u.rb, parent, p);
164 rb_insert_color(&e->u.rb, root);
165 }
166
167
168
169
170
171
172
173
174
175 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
176 {
177 ubi->lookuptbl[e->pnum] = NULL;
178 kmem_cache_free(ubi_wl_entry_slab, e);
179 }
180
181
182
183
184
185
186
187
188 static int do_work(struct ubi_device *ubi)
189 {
190 int err;
191 struct ubi_work *wrk;
192
193 cond_resched();
194
195
196
197
198
199
200
201 down_read(&ubi->work_sem);
202 spin_lock(&ubi->wl_lock);
203 if (list_empty(&ubi->works)) {
204 spin_unlock(&ubi->wl_lock);
205 up_read(&ubi->work_sem);
206 return 0;
207 }
208
209 wrk = list_entry(ubi->works.next, struct ubi_work, list);
210 list_del(&wrk->list);
211 ubi->works_count -= 1;
212 ubi_assert(ubi->works_count >= 0);
213 spin_unlock(&ubi->wl_lock);
214
215
216
217
218
219
220 err = wrk->func(ubi, wrk, 0);
221 if (err)
222 ubi_err(ubi, "work failed with error code %d", err);
223 up_read(&ubi->work_sem);
224
225 return err;
226 }
227
228
229
230
231
232
233
234
235
236 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
237 {
238 struct rb_node *p;
239
240 p = root->rb_node;
241 while (p) {
242 struct ubi_wl_entry *e1;
243
244 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
245
246 if (e->pnum == e1->pnum) {
247 ubi_assert(e == e1);
248 return 1;
249 }
250
251 if (e->ec < e1->ec)
252 p = p->rb_left;
253 else if (e->ec > e1->ec)
254 p = p->rb_right;
255 else {
256 ubi_assert(e->pnum != e1->pnum);
257 if (e->pnum < e1->pnum)
258 p = p->rb_left;
259 else
260 p = p->rb_right;
261 }
262 }
263
264 return 0;
265 }
266
267
268
269
270
271
272
273
274
275 static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
276 {
277 struct ubi_wl_entry *p;
278 int i;
279
280 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
281 list_for_each_entry(p, &ubi->pq[i], u.list)
282 if (p == e)
283 return 1;
284
285 return 0;
286 }
287
288
289
290
291
292
293
294
295
296
297
298 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
299 {
300 int pq_tail = ubi->pq_head - 1;
301
302 if (pq_tail < 0)
303 pq_tail = UBI_PROT_QUEUE_LEN - 1;
304 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
305 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
306 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
307 }
308
309
310
311
312
313
314
315
316
317
318 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
319 struct rb_root *root, int diff)
320 {
321 struct rb_node *p;
322 struct ubi_wl_entry *e, *prev_e = NULL;
323 int max;
324
325 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
326 max = e->ec + diff;
327
328 p = root->rb_node;
329 while (p) {
330 struct ubi_wl_entry *e1;
331
332 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
333 if (e1->ec >= max)
334 p = p->rb_left;
335 else {
336 p = p->rb_right;
337 prev_e = e;
338 e = e1;
339 }
340 }
341
342
343
344
345 if (prev_e && !ubi->fm_disabled &&
346 !ubi->fm && e->pnum < UBI_FM_MAX_START)
347 return prev_e;
348
349 return e;
350 }
351
352
353
354
355
356
357
358
359
360
361 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
362 struct rb_root *root)
363 {
364 struct ubi_wl_entry *e, *first, *last;
365
366 first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
367 last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
368
369 if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
370 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
371
372
373
374
375 e = may_reserve_for_fm(ubi, e, root);
376 } else
377 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
378
379 return e;
380 }
381
382
383
384
385
386
387
388
389
390 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
391 {
392 struct ubi_wl_entry *e;
393
394 e = find_mean_wl_entry(ubi, &ubi->free);
395 if (!e) {
396 ubi_err(ubi, "no free eraseblocks");
397 return NULL;
398 }
399
400 self_check_in_wl_tree(ubi, e, &ubi->free);
401
402
403
404
405
406 rb_erase(&e->u.rb, &ubi->free);
407 ubi->free_count--;
408 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
409
410 return e;
411 }
412
413
414
415
416
417
418
419
420
421 static int prot_queue_del(struct ubi_device *ubi, int pnum)
422 {
423 struct ubi_wl_entry *e;
424
425 e = ubi->lookuptbl[pnum];
426 if (!e)
427 return -ENODEV;
428
429 if (self_check_in_pq(ubi, e))
430 return -ENODEV;
431
432 list_del(&e->u.list);
433 dbg_wl("deleted PEB %d from the protection queue", e->pnum);
434 return 0;
435 }
436
437
438
439
440
441
442
443
444
445
446 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
447 int torture)
448 {
449 int err;
450 struct ubi_ec_hdr *ec_hdr;
451 unsigned long long ec = e->ec;
452
453 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
454
455 err = self_check_ec(ubi, e->pnum, e->ec);
456 if (err)
457 return -EINVAL;
458
459 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
460 if (!ec_hdr)
461 return -ENOMEM;
462
463 err = ubi_io_sync_erase(ubi, e->pnum, torture);
464 if (err < 0)
465 goto out_free;
466
467 ec += err;
468 if (ec > UBI_MAX_ERASECOUNTER) {
469
470
471
472
473 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
474 e->pnum, ec);
475 err = -EINVAL;
476 goto out_free;
477 }
478
479 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
480
481 ec_hdr->ec = cpu_to_be64(ec);
482
483 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
484 if (err)
485 goto out_free;
486
487 e->ec = ec;
488 spin_lock(&ubi->wl_lock);
489 if (e->ec > ubi->max_ec)
490 ubi->max_ec = e->ec;
491 spin_unlock(&ubi->wl_lock);
492
493 out_free:
494 kfree(ec_hdr);
495 return err;
496 }
497
498
499
500
501
502
503
504
505
506 static void serve_prot_queue(struct ubi_device *ubi)
507 {
508 struct ubi_wl_entry *e, *tmp;
509 int count;
510
511
512
513
514
515 repeat:
516 count = 0;
517 spin_lock(&ubi->wl_lock);
518 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
519 dbg_wl("PEB %d EC %d protection over, move to used tree",
520 e->pnum, e->ec);
521
522 list_del(&e->u.list);
523 wl_tree_add(e, &ubi->used);
524 if (count++ > 32) {
525
526
527
528
529 spin_unlock(&ubi->wl_lock);
530 cond_resched();
531 goto repeat;
532 }
533 }
534
535 ubi->pq_head += 1;
536 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
537 ubi->pq_head = 0;
538 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
539 spin_unlock(&ubi->wl_lock);
540 }
541
542
543
544
545
546
547
548
549
550 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
551 {
552 spin_lock(&ubi->wl_lock);
553 list_add_tail(&wrk->list, &ubi->works);
554 ubi_assert(ubi->works_count >= 0);
555 ubi->works_count += 1;
556 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
557 wake_up_process(ubi->bgt_thread);
558 spin_unlock(&ubi->wl_lock);
559 }
560
561
562
563
564
565
566
567
568
569 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
570 {
571 down_read(&ubi->work_sem);
572 __schedule_ubi_work(ubi, wrk);
573 up_read(&ubi->work_sem);
574 }
575
576 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
577 int shutdown);
578
579
580
581
582
583
584
585
586
587
588
589
590 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
591 int vol_id, int lnum, int torture, bool nested)
592 {
593 struct ubi_work *wl_wrk;
594
595 ubi_assert(e);
596
597 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
598 e->pnum, e->ec, torture);
599
600 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
601 if (!wl_wrk)
602 return -ENOMEM;
603
604 wl_wrk->func = &erase_worker;
605 wl_wrk->e = e;
606 wl_wrk->vol_id = vol_id;
607 wl_wrk->lnum = lnum;
608 wl_wrk->torture = torture;
609
610 if (nested)
611 __schedule_ubi_work(ubi, wl_wrk);
612 else
613 schedule_ubi_work(ubi, wl_wrk);
614 return 0;
615 }
616
617 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
618
619
620
621
622
623
624
625
626
627 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
628 int vol_id, int lnum, int torture)
629 {
630 struct ubi_work wl_wrk;
631
632 dbg_wl("sync erase of PEB %i", e->pnum);
633
634 wl_wrk.e = e;
635 wl_wrk.vol_id = vol_id;
636 wl_wrk.lnum = lnum;
637 wl_wrk.torture = torture;
638
639 return __erase_worker(ubi, &wl_wrk);
640 }
641
642 static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
643
644
645
646
647
648
649
650
651
652
653
654 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
655 int shutdown)
656 {
657 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
658 int erase = 0, keep = 0, vol_id = -1, lnum = -1;
659 #ifdef CONFIG_MTD_UBI_FASTMAP
660 int anchor = wrk->anchor;
661 #endif
662 struct ubi_wl_entry *e1, *e2;
663 struct ubi_vid_io_buf *vidb;
664 struct ubi_vid_hdr *vid_hdr;
665 int dst_leb_clean = 0;
666
667 kfree(wrk);
668 if (shutdown)
669 return 0;
670
671 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
672 if (!vidb)
673 return -ENOMEM;
674
675 vid_hdr = ubi_get_vid_hdr(vidb);
676
677 down_read(&ubi->fm_eba_sem);
678 mutex_lock(&ubi->move_mutex);
679 spin_lock(&ubi->wl_lock);
680 ubi_assert(!ubi->move_from && !ubi->move_to);
681 ubi_assert(!ubi->move_to_put);
682
683 if (!ubi->free.rb_node ||
684 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
685
686
687
688
689
690
691
692
693
694
695 dbg_wl("cancel WL, a list is empty: free %d, used %d",
696 !ubi->free.rb_node, !ubi->used.rb_node);
697 goto out_cancel;
698 }
699
700 #ifdef CONFIG_MTD_UBI_FASTMAP
701
702 if (!anchor)
703 anchor = !anchor_pebs_available(&ubi->free);
704
705 if (anchor) {
706 e1 = find_anchor_wl_entry(&ubi->used);
707 if (!e1)
708 goto out_cancel;
709 e2 = get_peb_for_wl(ubi);
710 if (!e2)
711 goto out_cancel;
712
713
714
715
716 if (e2->pnum < UBI_FM_MAX_START)
717 goto out_cancel;
718
719 self_check_in_wl_tree(ubi, e1, &ubi->used);
720 rb_erase(&e1->u.rb, &ubi->used);
721 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
722 } else if (!ubi->scrub.rb_node) {
723 #else
724 if (!ubi->scrub.rb_node) {
725 #endif
726
727
728
729
730
731 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
732 e2 = get_peb_for_wl(ubi);
733 if (!e2)
734 goto out_cancel;
735
736 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
737 dbg_wl("no WL needed: min used EC %d, max free EC %d",
738 e1->ec, e2->ec);
739
740
741 wl_tree_add(e2, &ubi->free);
742 ubi->free_count++;
743 goto out_cancel;
744 }
745 self_check_in_wl_tree(ubi, e1, &ubi->used);
746 rb_erase(&e1->u.rb, &ubi->used);
747 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
748 e1->pnum, e1->ec, e2->pnum, e2->ec);
749 } else {
750
751 scrubbing = 1;
752 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
753 e2 = get_peb_for_wl(ubi);
754 if (!e2)
755 goto out_cancel;
756
757 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
758 rb_erase(&e1->u.rb, &ubi->scrub);
759 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
760 }
761
762 ubi->move_from = e1;
763 ubi->move_to = e2;
764 spin_unlock(&ubi->wl_lock);
765
766
767
768
769
770
771
772
773
774
775
776
777 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
778 if (err && err != UBI_IO_BITFLIPS) {
779 dst_leb_clean = 1;
780 if (err == UBI_IO_FF) {
781
782
783
784
785
786
787
788
789
790
791 dbg_wl("PEB %d has no VID header", e1->pnum);
792 protect = 1;
793 goto out_not_moved;
794 } else if (err == UBI_IO_FF_BITFLIPS) {
795
796
797
798
799
800 dbg_wl("PEB %d has no VID header but has bit-flips",
801 e1->pnum);
802 scrubbing = 1;
803 goto out_not_moved;
804 } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
805
806
807
808
809
810 dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
811 e1->pnum);
812 erase = 1;
813 goto out_not_moved;
814 }
815
816 ubi_err(ubi, "error %d while reading VID header from PEB %d",
817 err, e1->pnum);
818 goto out_error;
819 }
820
821 vol_id = be32_to_cpu(vid_hdr->vol_id);
822 lnum = be32_to_cpu(vid_hdr->lnum);
823
824 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
825 if (err) {
826 if (err == MOVE_CANCEL_RACE) {
827
828
829
830
831
832
833
834 protect = 1;
835 dst_leb_clean = 1;
836 goto out_not_moved;
837 }
838 if (err == MOVE_RETRY) {
839 scrubbing = 1;
840 dst_leb_clean = 1;
841 goto out_not_moved;
842 }
843 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
844 err == MOVE_TARGET_RD_ERR) {
845
846
847
848 torture = 1;
849 keep = 1;
850 goto out_not_moved;
851 }
852
853 if (err == MOVE_SOURCE_RD_ERR) {
854
855
856
857
858
859
860
861
862 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
863 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
864 ubi->erroneous_peb_count);
865 goto out_error;
866 }
867 dst_leb_clean = 1;
868 erroneous = 1;
869 goto out_not_moved;
870 }
871
872 if (err < 0)
873 goto out_error;
874
875 ubi_assert(0);
876 }
877
878
879 if (scrubbing)
880 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
881 e1->pnum, vol_id, lnum, e2->pnum);
882 ubi_free_vid_buf(vidb);
883
884 spin_lock(&ubi->wl_lock);
885 if (!ubi->move_to_put) {
886 wl_tree_add(e2, &ubi->used);
887 e2 = NULL;
888 }
889 ubi->move_from = ubi->move_to = NULL;
890 ubi->move_to_put = ubi->wl_scheduled = 0;
891 spin_unlock(&ubi->wl_lock);
892
893 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
894 if (err) {
895 if (e2)
896 wl_entry_destroy(ubi, e2);
897 goto out_ro;
898 }
899
900 if (e2) {
901
902
903
904
905 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
906 e2->pnum, vol_id, lnum);
907 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
908 if (err)
909 goto out_ro;
910 }
911
912 dbg_wl("done");
913 mutex_unlock(&ubi->move_mutex);
914 up_read(&ubi->fm_eba_sem);
915 return 0;
916
917
918
919
920
921
922 out_not_moved:
923 if (vol_id != -1)
924 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
925 e1->pnum, vol_id, lnum, e2->pnum, err);
926 else
927 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
928 e1->pnum, e2->pnum, err);
929 spin_lock(&ubi->wl_lock);
930 if (protect)
931 prot_queue_add(ubi, e1);
932 else if (erroneous) {
933 wl_tree_add(e1, &ubi->erroneous);
934 ubi->erroneous_peb_count += 1;
935 } else if (scrubbing)
936 wl_tree_add(e1, &ubi->scrub);
937 else if (keep)
938 wl_tree_add(e1, &ubi->used);
939 if (dst_leb_clean) {
940 wl_tree_add(e2, &ubi->free);
941 ubi->free_count++;
942 }
943
944 ubi_assert(!ubi->move_to_put);
945 ubi->move_from = ubi->move_to = NULL;
946 ubi->wl_scheduled = 0;
947 spin_unlock(&ubi->wl_lock);
948
949 ubi_free_vid_buf(vidb);
950 if (dst_leb_clean) {
951 ensure_wear_leveling(ubi, 1);
952 } else {
953 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
954 if (err)
955 goto out_ro;
956 }
957
958 if (erase) {
959 err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
960 if (err)
961 goto out_ro;
962 }
963
964 mutex_unlock(&ubi->move_mutex);
965 up_read(&ubi->fm_eba_sem);
966 return 0;
967
968 out_error:
969 if (vol_id != -1)
970 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
971 err, e1->pnum, e2->pnum);
972 else
973 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
974 err, e1->pnum, vol_id, lnum, e2->pnum);
975 spin_lock(&ubi->wl_lock);
976 ubi->move_from = ubi->move_to = NULL;
977 ubi->move_to_put = ubi->wl_scheduled = 0;
978 spin_unlock(&ubi->wl_lock);
979
980 ubi_free_vid_buf(vidb);
981 wl_entry_destroy(ubi, e1);
982 wl_entry_destroy(ubi, e2);
983
984 out_ro:
985 ubi_ro_mode(ubi);
986 mutex_unlock(&ubi->move_mutex);
987 up_read(&ubi->fm_eba_sem);
988 ubi_assert(err != 0);
989 return err < 0 ? err : -EIO;
990
991 out_cancel:
992 ubi->wl_scheduled = 0;
993 spin_unlock(&ubi->wl_lock);
994 mutex_unlock(&ubi->move_mutex);
995 up_read(&ubi->fm_eba_sem);
996 ubi_free_vid_buf(vidb);
997 return 0;
998 }
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1010 {
1011 int err = 0;
1012 struct ubi_wl_entry *e1;
1013 struct ubi_wl_entry *e2;
1014 struct ubi_work *wrk;
1015
1016 spin_lock(&ubi->wl_lock);
1017 if (ubi->wl_scheduled)
1018
1019 goto out_unlock;
1020
1021
1022
1023
1024
1025 if (!ubi->scrub.rb_node) {
1026 if (!ubi->used.rb_node || !ubi->free.rb_node)
1027
1028 goto out_unlock;
1029
1030
1031
1032
1033
1034
1035
1036 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1037 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1038
1039 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1040 goto out_unlock;
1041 dbg_wl("schedule wear-leveling");
1042 } else
1043 dbg_wl("schedule scrubbing");
1044
1045 ubi->wl_scheduled = 1;
1046 spin_unlock(&ubi->wl_lock);
1047
1048 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1049 if (!wrk) {
1050 err = -ENOMEM;
1051 goto out_cancel;
1052 }
1053
1054 wrk->anchor = 0;
1055 wrk->func = &wear_leveling_worker;
1056 if (nested)
1057 __schedule_ubi_work(ubi, wrk);
1058 else
1059 schedule_ubi_work(ubi, wrk);
1060 return err;
1061
1062 out_cancel:
1063 spin_lock(&ubi->wl_lock);
1064 ubi->wl_scheduled = 0;
1065 out_unlock:
1066 spin_unlock(&ubi->wl_lock);
1067 return err;
1068 }
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
1083 {
1084 struct ubi_wl_entry *e = wl_wrk->e;
1085 int pnum = e->pnum;
1086 int vol_id = wl_wrk->vol_id;
1087 int lnum = wl_wrk->lnum;
1088 int err, available_consumed = 0;
1089
1090 dbg_wl("erase PEB %d EC %d LEB %d:%d",
1091 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1092
1093 err = sync_erase(ubi, e, wl_wrk->torture);
1094 if (!err) {
1095 spin_lock(&ubi->wl_lock);
1096 wl_tree_add(e, &ubi->free);
1097 ubi->free_count++;
1098 spin_unlock(&ubi->wl_lock);
1099
1100
1101
1102
1103
1104 serve_prot_queue(ubi);
1105
1106
1107 err = ensure_wear_leveling(ubi, 1);
1108 return err;
1109 }
1110
1111 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1112
1113 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1114 err == -EBUSY) {
1115 int err1;
1116
1117
1118 err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
1119 if (err1) {
1120 wl_entry_destroy(ubi, e);
1121 err = err1;
1122 goto out_ro;
1123 }
1124 return err;
1125 }
1126
1127 wl_entry_destroy(ubi, e);
1128 if (err != -EIO)
1129
1130
1131
1132
1133
1134 goto out_ro;
1135
1136
1137
1138 if (!ubi->bad_allowed) {
1139 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1140 goto out_ro;
1141 }
1142
1143 spin_lock(&ubi->volumes_lock);
1144 if (ubi->beb_rsvd_pebs == 0) {
1145 if (ubi->avail_pebs == 0) {
1146 spin_unlock(&ubi->volumes_lock);
1147 ubi_err(ubi, "no reserved/available physical eraseblocks");
1148 goto out_ro;
1149 }
1150 ubi->avail_pebs -= 1;
1151 available_consumed = 1;
1152 }
1153 spin_unlock(&ubi->volumes_lock);
1154
1155 ubi_msg(ubi, "mark PEB %d as bad", pnum);
1156 err = ubi_io_mark_bad(ubi, pnum);
1157 if (err)
1158 goto out_ro;
1159
1160 spin_lock(&ubi->volumes_lock);
1161 if (ubi->beb_rsvd_pebs > 0) {
1162 if (available_consumed) {
1163
1164
1165
1166
1167 ubi->avail_pebs += 1;
1168 available_consumed = 0;
1169 }
1170 ubi->beb_rsvd_pebs -= 1;
1171 }
1172 ubi->bad_peb_count += 1;
1173 ubi->good_peb_count -= 1;
1174 ubi_calculate_reserved(ubi);
1175 if (available_consumed)
1176 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1177 else if (ubi->beb_rsvd_pebs)
1178 ubi_msg(ubi, "%d PEBs left in the reserve",
1179 ubi->beb_rsvd_pebs);
1180 else
1181 ubi_warn(ubi, "last PEB from the reserve was used");
1182 spin_unlock(&ubi->volumes_lock);
1183
1184 return err;
1185
1186 out_ro:
1187 if (available_consumed) {
1188 spin_lock(&ubi->volumes_lock);
1189 ubi->avail_pebs += 1;
1190 spin_unlock(&ubi->volumes_lock);
1191 }
1192 ubi_ro_mode(ubi);
1193 return err;
1194 }
1195
1196 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1197 int shutdown)
1198 {
1199 int ret;
1200
1201 if (shutdown) {
1202 struct ubi_wl_entry *e = wl_wrk->e;
1203
1204 dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
1205 kfree(wl_wrk);
1206 wl_entry_destroy(ubi, e);
1207 return 0;
1208 }
1209
1210 ret = __erase_worker(ubi, wl_wrk);
1211 kfree(wl_wrk);
1212 return ret;
1213 }
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1229 int pnum, int torture)
1230 {
1231 int err;
1232 struct ubi_wl_entry *e;
1233
1234 dbg_wl("PEB %d", pnum);
1235 ubi_assert(pnum >= 0);
1236 ubi_assert(pnum < ubi->peb_count);
1237
1238 down_read(&ubi->fm_protect);
1239
1240 retry:
1241 spin_lock(&ubi->wl_lock);
1242 e = ubi->lookuptbl[pnum];
1243 if (e == ubi->move_from) {
1244
1245
1246
1247
1248
1249 dbg_wl("PEB %d is being moved, wait", pnum);
1250 spin_unlock(&ubi->wl_lock);
1251
1252
1253 mutex_lock(&ubi->move_mutex);
1254 mutex_unlock(&ubi->move_mutex);
1255 goto retry;
1256 } else if (e == ubi->move_to) {
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266 dbg_wl("PEB %d is the target of data moving", pnum);
1267 ubi_assert(!ubi->move_to_put);
1268 ubi->move_to_put = 1;
1269 spin_unlock(&ubi->wl_lock);
1270 up_read(&ubi->fm_protect);
1271 return 0;
1272 } else {
1273 if (in_wl_tree(e, &ubi->used)) {
1274 self_check_in_wl_tree(ubi, e, &ubi->used);
1275 rb_erase(&e->u.rb, &ubi->used);
1276 } else if (in_wl_tree(e, &ubi->scrub)) {
1277 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1278 rb_erase(&e->u.rb, &ubi->scrub);
1279 } else if (in_wl_tree(e, &ubi->erroneous)) {
1280 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1281 rb_erase(&e->u.rb, &ubi->erroneous);
1282 ubi->erroneous_peb_count -= 1;
1283 ubi_assert(ubi->erroneous_peb_count >= 0);
1284
1285 torture = 1;
1286 } else {
1287 err = prot_queue_del(ubi, e->pnum);
1288 if (err) {
1289 ubi_err(ubi, "PEB %d not found", pnum);
1290 ubi_ro_mode(ubi);
1291 spin_unlock(&ubi->wl_lock);
1292 up_read(&ubi->fm_protect);
1293 return err;
1294 }
1295 }
1296 }
1297 spin_unlock(&ubi->wl_lock);
1298
1299 err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1300 if (err) {
1301 spin_lock(&ubi->wl_lock);
1302 wl_tree_add(e, &ubi->used);
1303 spin_unlock(&ubi->wl_lock);
1304 }
1305
1306 up_read(&ubi->fm_protect);
1307 return err;
1308 }
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1321 {
1322 struct ubi_wl_entry *e;
1323
1324 ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1325
1326 retry:
1327 spin_lock(&ubi->wl_lock);
1328 e = ubi->lookuptbl[pnum];
1329 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1330 in_wl_tree(e, &ubi->erroneous)) {
1331 spin_unlock(&ubi->wl_lock);
1332 return 0;
1333 }
1334
1335 if (e == ubi->move_to) {
1336
1337
1338
1339
1340
1341
1342 spin_unlock(&ubi->wl_lock);
1343 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1344 yield();
1345 goto retry;
1346 }
1347
1348 if (in_wl_tree(e, &ubi->used)) {
1349 self_check_in_wl_tree(ubi, e, &ubi->used);
1350 rb_erase(&e->u.rb, &ubi->used);
1351 } else {
1352 int err;
1353
1354 err = prot_queue_del(ubi, e->pnum);
1355 if (err) {
1356 ubi_err(ubi, "PEB %d not found", pnum);
1357 ubi_ro_mode(ubi);
1358 spin_unlock(&ubi->wl_lock);
1359 return err;
1360 }
1361 }
1362
1363 wl_tree_add(e, &ubi->scrub);
1364 spin_unlock(&ubi->wl_lock);
1365
1366
1367
1368
1369
1370 return ensure_wear_leveling(ubi, 0);
1371 }
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1386 {
1387 int err = 0;
1388 int found = 1;
1389
1390
1391
1392
1393
1394 dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1395 vol_id, lnum, ubi->works_count);
1396
1397 while (found) {
1398 struct ubi_work *wrk, *tmp;
1399 found = 0;
1400
1401 down_read(&ubi->work_sem);
1402 spin_lock(&ubi->wl_lock);
1403 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1404 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1405 (lnum == UBI_ALL || wrk->lnum == lnum)) {
1406 list_del(&wrk->list);
1407 ubi->works_count -= 1;
1408 ubi_assert(ubi->works_count >= 0);
1409 spin_unlock(&ubi->wl_lock);
1410
1411 err = wrk->func(ubi, wrk, 0);
1412 if (err) {
1413 up_read(&ubi->work_sem);
1414 return err;
1415 }
1416
1417 spin_lock(&ubi->wl_lock);
1418 found = 1;
1419 break;
1420 }
1421 }
1422 spin_unlock(&ubi->wl_lock);
1423 up_read(&ubi->work_sem);
1424 }
1425
1426
1427
1428
1429
1430 down_write(&ubi->work_sem);
1431 up_write(&ubi->work_sem);
1432
1433 return err;
1434 }
1435
1436 static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
1437 {
1438 if (in_wl_tree(e, &ubi->scrub))
1439 return false;
1440 else if (in_wl_tree(e, &ubi->erroneous))
1441 return false;
1442 else if (ubi->move_from == e)
1443 return false;
1444 else if (ubi->move_to == e)
1445 return false;
1446
1447 return true;
1448 }
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469 int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
1470 {
1471 int err = 0;
1472 struct ubi_wl_entry *e;
1473
1474 if (pnum < 0 || pnum >= ubi->peb_count) {
1475 err = -EINVAL;
1476 goto out;
1477 }
1478
1479
1480
1481
1482
1483 down_write(&ubi->work_sem);
1484
1485
1486
1487
1488
1489 spin_lock(&ubi->wl_lock);
1490 e = ubi->lookuptbl[pnum];
1491 if (!e) {
1492 spin_unlock(&ubi->wl_lock);
1493 err = -ENOENT;
1494 goto out_resume;
1495 }
1496
1497
1498
1499
1500 if (!scrub_possible(ubi, e)) {
1501 spin_unlock(&ubi->wl_lock);
1502 err = -EBUSY;
1503 goto out_resume;
1504 }
1505 spin_unlock(&ubi->wl_lock);
1506
1507 if (!force) {
1508 mutex_lock(&ubi->buf_mutex);
1509 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
1510 mutex_unlock(&ubi->buf_mutex);
1511 }
1512
1513 if (force || err == UBI_IO_BITFLIPS) {
1514
1515
1516
1517 spin_lock(&ubi->wl_lock);
1518
1519
1520
1521
1522
1523 e = ubi->lookuptbl[pnum];
1524 if (!e) {
1525 spin_unlock(&ubi->wl_lock);
1526 err = -ENOENT;
1527 goto out_resume;
1528 }
1529
1530
1531
1532
1533 if (!scrub_possible(ubi, e)) {
1534 spin_unlock(&ubi->wl_lock);
1535 err = -EBUSY;
1536 goto out_resume;
1537 }
1538
1539 if (in_pq(ubi, e)) {
1540 prot_queue_del(ubi, e->pnum);
1541 wl_tree_add(e, &ubi->scrub);
1542 spin_unlock(&ubi->wl_lock);
1543
1544 err = ensure_wear_leveling(ubi, 1);
1545 } else if (in_wl_tree(e, &ubi->used)) {
1546 rb_erase(&e->u.rb, &ubi->used);
1547 wl_tree_add(e, &ubi->scrub);
1548 spin_unlock(&ubi->wl_lock);
1549
1550 err = ensure_wear_leveling(ubi, 1);
1551 } else if (in_wl_tree(e, &ubi->free)) {
1552 rb_erase(&e->u.rb, &ubi->free);
1553 ubi->free_count--;
1554 spin_unlock(&ubi->wl_lock);
1555
1556
1557
1558
1559
1560 err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
1561 force ? 0 : 1, true);
1562 } else {
1563 spin_unlock(&ubi->wl_lock);
1564 err = -EAGAIN;
1565 }
1566
1567 if (!err && !force)
1568 err = -EUCLEAN;
1569 } else {
1570 err = 0;
1571 }
1572
1573 out_resume:
1574 up_write(&ubi->work_sem);
1575 out:
1576
1577 return err;
1578 }
1579
1580
1581
1582
1583
1584
1585 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1586 {
1587 struct rb_node *rb;
1588 struct ubi_wl_entry *e;
1589
1590 rb = root->rb_node;
1591 while (rb) {
1592 if (rb->rb_left)
1593 rb = rb->rb_left;
1594 else if (rb->rb_right)
1595 rb = rb->rb_right;
1596 else {
1597 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1598
1599 rb = rb_parent(rb);
1600 if (rb) {
1601 if (rb->rb_left == &e->u.rb)
1602 rb->rb_left = NULL;
1603 else
1604 rb->rb_right = NULL;
1605 }
1606
1607 wl_entry_destroy(ubi, e);
1608 }
1609 }
1610 }
1611
1612
1613
1614
1615
1616 int ubi_thread(void *u)
1617 {
1618 int failures = 0;
1619 struct ubi_device *ubi = u;
1620
1621 ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1622 ubi->bgt_name, task_pid_nr(current));
1623
1624 set_freezable();
1625 for (;;) {
1626 int err;
1627
1628 if (kthread_should_stop())
1629 break;
1630
1631 if (try_to_freeze())
1632 continue;
1633
1634 spin_lock(&ubi->wl_lock);
1635 if (list_empty(&ubi->works) || ubi->ro_mode ||
1636 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1637 set_current_state(TASK_INTERRUPTIBLE);
1638 spin_unlock(&ubi->wl_lock);
1639 schedule();
1640 continue;
1641 }
1642 spin_unlock(&ubi->wl_lock);
1643
1644 err = do_work(ubi);
1645 if (err) {
1646 ubi_err(ubi, "%s: work failed with error code %d",
1647 ubi->bgt_name, err);
1648 if (failures++ > WL_MAX_FAILURES) {
1649
1650
1651
1652
1653 ubi_msg(ubi, "%s: %d consecutive failures",
1654 ubi->bgt_name, WL_MAX_FAILURES);
1655 ubi_ro_mode(ubi);
1656 ubi->thread_enabled = 0;
1657 continue;
1658 }
1659 } else
1660 failures = 0;
1661
1662 cond_resched();
1663 }
1664
1665 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1666 ubi->thread_enabled = 0;
1667 return 0;
1668 }
1669
1670
1671
1672
1673
1674 static void shutdown_work(struct ubi_device *ubi)
1675 {
1676 while (!list_empty(&ubi->works)) {
1677 struct ubi_work *wrk;
1678
1679 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1680 list_del(&wrk->list);
1681 wrk->func(ubi, wrk, 1);
1682 ubi->works_count -= 1;
1683 ubi_assert(ubi->works_count >= 0);
1684 }
1685 }
1686
1687
1688
1689
1690
1691
1692
1693 static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
1694 {
1695 struct ubi_wl_entry *e;
1696 int err;
1697
1698 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1699 if (!e)
1700 return -ENOMEM;
1701
1702 e->pnum = aeb->pnum;
1703 e->ec = aeb->ec;
1704 ubi->lookuptbl[e->pnum] = e;
1705
1706 if (sync) {
1707 err = sync_erase(ubi, e, false);
1708 if (err)
1709 goto out_free;
1710
1711 wl_tree_add(e, &ubi->free);
1712 ubi->free_count++;
1713 } else {
1714 err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
1715 if (err)
1716 goto out_free;
1717 }
1718
1719 return 0;
1720
1721 out_free:
1722 wl_entry_destroy(ubi, e);
1723
1724 return err;
1725 }
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1736 {
1737 int err, i, reserved_pebs, found_pebs = 0;
1738 struct rb_node *rb1, *rb2;
1739 struct ubi_ainf_volume *av;
1740 struct ubi_ainf_peb *aeb, *tmp;
1741 struct ubi_wl_entry *e;
1742
1743 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1744 spin_lock_init(&ubi->wl_lock);
1745 mutex_init(&ubi->move_mutex);
1746 init_rwsem(&ubi->work_sem);
1747 ubi->max_ec = ai->max_ec;
1748 INIT_LIST_HEAD(&ubi->works);
1749
1750 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1751
1752 err = -ENOMEM;
1753 ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
1754 if (!ubi->lookuptbl)
1755 return err;
1756
1757 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1758 INIT_LIST_HEAD(&ubi->pq[i]);
1759 ubi->pq_head = 0;
1760
1761 ubi->free_count = 0;
1762 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1763 cond_resched();
1764
1765 err = erase_aeb(ubi, aeb, false);
1766 if (err)
1767 goto out_free;
1768
1769 found_pebs++;
1770 }
1771
1772 list_for_each_entry(aeb, &ai->free, u.list) {
1773 cond_resched();
1774
1775 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1776 if (!e) {
1777 err = -ENOMEM;
1778 goto out_free;
1779 }
1780
1781 e->pnum = aeb->pnum;
1782 e->ec = aeb->ec;
1783 ubi_assert(e->ec >= 0);
1784
1785 wl_tree_add(e, &ubi->free);
1786 ubi->free_count++;
1787
1788 ubi->lookuptbl[e->pnum] = e;
1789
1790 found_pebs++;
1791 }
1792
1793 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1794 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1795 cond_resched();
1796
1797 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1798 if (!e) {
1799 err = -ENOMEM;
1800 goto out_free;
1801 }
1802
1803 e->pnum = aeb->pnum;
1804 e->ec = aeb->ec;
1805 ubi->lookuptbl[e->pnum] = e;
1806
1807 if (!aeb->scrub) {
1808 dbg_wl("add PEB %d EC %d to the used tree",
1809 e->pnum, e->ec);
1810 wl_tree_add(e, &ubi->used);
1811 } else {
1812 dbg_wl("add PEB %d EC %d to the scrub tree",
1813 e->pnum, e->ec);
1814 wl_tree_add(e, &ubi->scrub);
1815 }
1816
1817 found_pebs++;
1818 }
1819 }
1820
1821 list_for_each_entry(aeb, &ai->fastmap, u.list) {
1822 cond_resched();
1823
1824 e = ubi_find_fm_block(ubi, aeb->pnum);
1825
1826 if (e) {
1827 ubi_assert(!ubi->lookuptbl[e->pnum]);
1828 ubi->lookuptbl[e->pnum] = e;
1829 } else {
1830 bool sync = false;
1831
1832
1833
1834
1835
1836
1837
1838 if (ubi->lookuptbl[aeb->pnum])
1839 continue;
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
1851 sync = true;
1852
1853 err = erase_aeb(ubi, aeb, sync);
1854 if (err)
1855 goto out_free;
1856 }
1857
1858 found_pebs++;
1859 }
1860
1861 dbg_wl("found %i PEBs", found_pebs);
1862
1863 ubi_assert(ubi->good_peb_count == found_pebs);
1864
1865 reserved_pebs = WL_RESERVED_PEBS;
1866 ubi_fastmap_init(ubi, &reserved_pebs);
1867
1868 if (ubi->avail_pebs < reserved_pebs) {
1869 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1870 ubi->avail_pebs, reserved_pebs);
1871 if (ubi->corr_peb_count)
1872 ubi_err(ubi, "%d PEBs are corrupted and not used",
1873 ubi->corr_peb_count);
1874 err = -ENOSPC;
1875 goto out_free;
1876 }
1877 ubi->avail_pebs -= reserved_pebs;
1878 ubi->rsvd_pebs += reserved_pebs;
1879
1880
1881 err = ensure_wear_leveling(ubi, 0);
1882 if (err)
1883 goto out_free;
1884
1885 return 0;
1886
1887 out_free:
1888 shutdown_work(ubi);
1889 tree_destroy(ubi, &ubi->used);
1890 tree_destroy(ubi, &ubi->free);
1891 tree_destroy(ubi, &ubi->scrub);
1892 kfree(ubi->lookuptbl);
1893 return err;
1894 }
1895
1896
1897
1898
1899
1900 static void protection_queue_destroy(struct ubi_device *ubi)
1901 {
1902 int i;
1903 struct ubi_wl_entry *e, *tmp;
1904
1905 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1906 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1907 list_del(&e->u.list);
1908 wl_entry_destroy(ubi, e);
1909 }
1910 }
1911 }
1912
1913
1914
1915
1916
1917 void ubi_wl_close(struct ubi_device *ubi)
1918 {
1919 dbg_wl("close the WL sub-system");
1920 ubi_fastmap_close(ubi);
1921 shutdown_work(ubi);
1922 protection_queue_destroy(ubi);
1923 tree_destroy(ubi, &ubi->used);
1924 tree_destroy(ubi, &ubi->erroneous);
1925 tree_destroy(ubi, &ubi->free);
1926 tree_destroy(ubi, &ubi->scrub);
1927 kfree(ubi->lookuptbl);
1928 }
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1941 {
1942 int err;
1943 long long read_ec;
1944 struct ubi_ec_hdr *ec_hdr;
1945
1946 if (!ubi_dbg_chk_gen(ubi))
1947 return 0;
1948
1949 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1950 if (!ec_hdr)
1951 return -ENOMEM;
1952
1953 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1954 if (err && err != UBI_IO_BITFLIPS) {
1955
1956 err = 0;
1957 goto out_free;
1958 }
1959
1960 read_ec = be64_to_cpu(ec_hdr->ec);
1961 if (ec != read_ec && read_ec - ec > 1) {
1962 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1963 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1964 dump_stack();
1965 err = 1;
1966 } else
1967 err = 0;
1968
1969 out_free:
1970 kfree(ec_hdr);
1971 return err;
1972 }
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983 static int self_check_in_wl_tree(const struct ubi_device *ubi,
1984 struct ubi_wl_entry *e, struct rb_root *root)
1985 {
1986 if (!ubi_dbg_chk_gen(ubi))
1987 return 0;
1988
1989 if (in_wl_tree(e, root))
1990 return 0;
1991
1992 ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
1993 e->pnum, e->ec, root);
1994 dump_stack();
1995 return -EINVAL;
1996 }
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006 static int self_check_in_pq(const struct ubi_device *ubi,
2007 struct ubi_wl_entry *e)
2008 {
2009 if (!ubi_dbg_chk_gen(ubi))
2010 return 0;
2011
2012 if (in_pq(ubi, e))
2013 return 0;
2014
2015 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
2016 e->pnum, e->ec);
2017 dump_stack();
2018 return -EINVAL;
2019 }
2020 #ifndef CONFIG_MTD_UBI_FASTMAP
2021 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
2022 {
2023 struct ubi_wl_entry *e;
2024
2025 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
2026 self_check_in_wl_tree(ubi, e, &ubi->free);
2027 ubi->free_count--;
2028 ubi_assert(ubi->free_count >= 0);
2029 rb_erase(&e->u.rb, &ubi->free);
2030
2031 return e;
2032 }
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043 static int produce_free_peb(struct ubi_device *ubi)
2044 {
2045 int err;
2046
2047 while (!ubi->free.rb_node && ubi->works_count) {
2048 spin_unlock(&ubi->wl_lock);
2049
2050 dbg_wl("do one work synchronously");
2051 err = do_work(ubi);
2052
2053 spin_lock(&ubi->wl_lock);
2054 if (err)
2055 return err;
2056 }
2057
2058 return 0;
2059 }
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069 int ubi_wl_get_peb(struct ubi_device *ubi)
2070 {
2071 int err;
2072 struct ubi_wl_entry *e;
2073
2074 retry:
2075 down_read(&ubi->fm_eba_sem);
2076 spin_lock(&ubi->wl_lock);
2077 if (!ubi->free.rb_node) {
2078 if (ubi->works_count == 0) {
2079 ubi_err(ubi, "no free eraseblocks");
2080 ubi_assert(list_empty(&ubi->works));
2081 spin_unlock(&ubi->wl_lock);
2082 return -ENOSPC;
2083 }
2084
2085 err = produce_free_peb(ubi);
2086 if (err < 0) {
2087 spin_unlock(&ubi->wl_lock);
2088 return err;
2089 }
2090 spin_unlock(&ubi->wl_lock);
2091 up_read(&ubi->fm_eba_sem);
2092 goto retry;
2093
2094 }
2095 e = wl_get_wle(ubi);
2096 prot_queue_add(ubi, e);
2097 spin_unlock(&ubi->wl_lock);
2098
2099 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
2100 ubi->peb_size - ubi->vid_hdr_aloffset);
2101 if (err) {
2102 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
2103 return err;
2104 }
2105
2106 return e->pnum;
2107 }
2108 #else
2109 #include "fastmap-wl.c"
2110 #endif