This source file includes following definitions.
- switch_gc_head
- data_nodes_cmp
- nondata_nodes_cmp
- sort_nodes
- move_node
- move_nodes
- gc_sync_wbufs
- ubifs_garbage_collect_leb
- ubifs_garbage_collect
- ubifs_gc_start_commit
- ubifs_gc_end_commit
- ubifs_destroy_idx_gc
- ubifs_get_idx_gc_leb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44 #include <linux/slab.h>
45 #include <linux/pagemap.h>
46 #include <linux/list_sort.h>
47 #include "ubifs.h"
48
49
50
51
52
53
54 #define SOFT_LEBS_LIMIT 4
55 #define HARD_LEBS_LIMIT 32
56
57
58
59
60
61
62
63
64
65
66
67
68
69 static int switch_gc_head(struct ubifs_info *c)
70 {
71 int err, gc_lnum = c->gc_lnum;
72 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
73
74 ubifs_assert(c, gc_lnum != -1);
75 dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)",
76 wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum,
77 c->leb_size - wbuf->offs - wbuf->used);
78
79 err = ubifs_wbuf_sync_nolock(wbuf);
80 if (err)
81 return err;
82
83
84
85
86
87 err = ubifs_leb_unmap(c, gc_lnum);
88 if (err)
89 return err;
90
91 err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
92 if (err)
93 return err;
94
95 c->gc_lnum = -1;
96 err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0);
97 return err;
98 }
99
100
101
102
103
104
105
106
107
108
109 static int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
110 {
111 ino_t inuma, inumb;
112 struct ubifs_info *c = priv;
113 struct ubifs_scan_node *sa, *sb;
114
115 cond_resched();
116 if (a == b)
117 return 0;
118
119 sa = list_entry(a, struct ubifs_scan_node, list);
120 sb = list_entry(b, struct ubifs_scan_node, list);
121
122 ubifs_assert(c, key_type(c, &sa->key) == UBIFS_DATA_KEY);
123 ubifs_assert(c, key_type(c, &sb->key) == UBIFS_DATA_KEY);
124 ubifs_assert(c, sa->type == UBIFS_DATA_NODE);
125 ubifs_assert(c, sb->type == UBIFS_DATA_NODE);
126
127 inuma = key_inum(c, &sa->key);
128 inumb = key_inum(c, &sb->key);
129
130 if (inuma == inumb) {
131 unsigned int blka = key_block(c, &sa->key);
132 unsigned int blkb = key_block(c, &sb->key);
133
134 if (blka <= blkb)
135 return -1;
136 } else if (inuma <= inumb)
137 return -1;
138
139 return 1;
140 }
141
142
143
144
145
146
147
148
149
150
151
152 static int nondata_nodes_cmp(void *priv, struct list_head *a,
153 struct list_head *b)
154 {
155 ino_t inuma, inumb;
156 struct ubifs_info *c = priv;
157 struct ubifs_scan_node *sa, *sb;
158
159 cond_resched();
160 if (a == b)
161 return 0;
162
163 sa = list_entry(a, struct ubifs_scan_node, list);
164 sb = list_entry(b, struct ubifs_scan_node, list);
165
166 ubifs_assert(c, key_type(c, &sa->key) != UBIFS_DATA_KEY &&
167 key_type(c, &sb->key) != UBIFS_DATA_KEY);
168 ubifs_assert(c, sa->type != UBIFS_DATA_NODE &&
169 sb->type != UBIFS_DATA_NODE);
170
171
172 if (sa->type == UBIFS_INO_NODE) {
173 if (sb->type == UBIFS_INO_NODE)
174 return sb->len - sa->len;
175 return -1;
176 }
177 if (sb->type == UBIFS_INO_NODE)
178 return 1;
179
180 ubifs_assert(c, key_type(c, &sa->key) == UBIFS_DENT_KEY ||
181 key_type(c, &sa->key) == UBIFS_XENT_KEY);
182 ubifs_assert(c, key_type(c, &sb->key) == UBIFS_DENT_KEY ||
183 key_type(c, &sb->key) == UBIFS_XENT_KEY);
184 ubifs_assert(c, sa->type == UBIFS_DENT_NODE ||
185 sa->type == UBIFS_XENT_NODE);
186 ubifs_assert(c, sb->type == UBIFS_DENT_NODE ||
187 sb->type == UBIFS_XENT_NODE);
188
189 inuma = key_inum(c, &sa->key);
190 inumb = key_inum(c, &sb->key);
191
192 if (inuma == inumb) {
193 uint32_t hasha = key_hash(c, &sa->key);
194 uint32_t hashb = key_hash(c, &sb->key);
195
196 if (hasha <= hashb)
197 return -1;
198 } else if (inuma <= inumb)
199 return -1;
200
201 return 1;
202 }
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231 static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
232 struct list_head *nondata, int *min)
233 {
234 int err;
235 struct ubifs_scan_node *snod, *tmp;
236
237 *min = INT_MAX;
238
239
240 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
241 ubifs_assert(c, snod->type == UBIFS_INO_NODE ||
242 snod->type == UBIFS_DATA_NODE ||
243 snod->type == UBIFS_DENT_NODE ||
244 snod->type == UBIFS_XENT_NODE ||
245 snod->type == UBIFS_TRUN_NODE ||
246 snod->type == UBIFS_AUTH_NODE);
247
248 if (snod->type != UBIFS_INO_NODE &&
249 snod->type != UBIFS_DATA_NODE &&
250 snod->type != UBIFS_DENT_NODE &&
251 snod->type != UBIFS_XENT_NODE) {
252
253 list_del(&snod->list);
254 kfree(snod);
255 continue;
256 }
257
258 ubifs_assert(c, key_type(c, &snod->key) == UBIFS_DATA_KEY ||
259 key_type(c, &snod->key) == UBIFS_INO_KEY ||
260 key_type(c, &snod->key) == UBIFS_DENT_KEY ||
261 key_type(c, &snod->key) == UBIFS_XENT_KEY);
262
263 err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
264 snod->offs, 0);
265 if (err < 0)
266 return err;
267
268 if (!err) {
269
270 list_del(&snod->list);
271 kfree(snod);
272 continue;
273 }
274
275 if (snod->len < *min)
276 *min = snod->len;
277
278 if (key_type(c, &snod->key) != UBIFS_DATA_KEY)
279 list_move_tail(&snod->list, nondata);
280 }
281
282
283 list_sort(c, &sleb->nodes, &data_nodes_cmp);
284 list_sort(c, nondata, &nondata_nodes_cmp);
285
286 err = dbg_check_data_nodes_order(c, &sleb->nodes);
287 if (err)
288 return err;
289 err = dbg_check_nondata_nodes_order(c, nondata);
290 if (err)
291 return err;
292 return 0;
293 }
294
295
296
297
298
299
300
301
302
303
304
305
306 static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
307 struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf)
308 {
309 int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used;
310
311 cond_resched();
312 err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
313 if (err)
314 return err;
315
316 err = ubifs_tnc_replace(c, &snod->key, sleb->lnum,
317 snod->offs, new_lnum, new_offs,
318 snod->len);
319 list_del(&snod->list);
320 kfree(snod);
321 return err;
322 }
323
324
325
326
327
328
329
330
331
332
333
334 static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
335 {
336 int err, min;
337 LIST_HEAD(nondata);
338 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
339
340 if (wbuf->lnum == -1) {
341
342
343
344
345 err = switch_gc_head(c);
346 if (err)
347 return err;
348 }
349
350 err = sort_nodes(c, sleb, &nondata, &min);
351 if (err)
352 goto out;
353
354
355 while (1) {
356 int avail, moved = 0;
357 struct ubifs_scan_node *snod, *tmp;
358
359
360 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
361 avail = c->leb_size - wbuf->offs - wbuf->used -
362 ubifs_auth_node_sz(c);
363 if (snod->len > avail)
364
365
366
367
368 break;
369
370 err = ubifs_shash_update(c, c->jheads[GCHD].log_hash,
371 snod->node, snod->len);
372 if (err)
373 goto out;
374
375 err = move_node(c, sleb, snod, wbuf);
376 if (err)
377 goto out;
378 moved = 1;
379 }
380
381
382 list_for_each_entry_safe(snod, tmp, &nondata, list) {
383 avail = c->leb_size - wbuf->offs - wbuf->used -
384 ubifs_auth_node_sz(c);
385 if (avail < min)
386 break;
387
388 if (snod->len > avail) {
389
390
391
392
393
394
395
396 if (key_type(c, &snod->key) == UBIFS_DENT_KEY ||
397 snod->len == UBIFS_INO_NODE_SZ)
398 break;
399 continue;
400 }
401
402 err = ubifs_shash_update(c, c->jheads[GCHD].log_hash,
403 snod->node, snod->len);
404 if (err)
405 goto out;
406
407 err = move_node(c, sleb, snod, wbuf);
408 if (err)
409 goto out;
410 moved = 1;
411 }
412
413 if (ubifs_authenticated(c) && moved) {
414 struct ubifs_auth_node *auth;
415
416 auth = kmalloc(ubifs_auth_node_sz(c), GFP_NOFS);
417 if (!auth) {
418 err = -ENOMEM;
419 goto out;
420 }
421
422 err = ubifs_prepare_auth_node(c, auth,
423 c->jheads[GCHD].log_hash);
424 if (err) {
425 kfree(auth);
426 goto out;
427 }
428
429 err = ubifs_wbuf_write_nolock(wbuf, auth,
430 ubifs_auth_node_sz(c));
431 if (err) {
432 kfree(auth);
433 goto out;
434 }
435
436 ubifs_add_dirt(c, wbuf->lnum, ubifs_auth_node_sz(c));
437 }
438
439 if (list_empty(&sleb->nodes) && list_empty(&nondata))
440 break;
441
442
443
444
445
446 err = switch_gc_head(c);
447 if (err)
448 goto out;
449 }
450
451 return 0;
452
453 out:
454 list_splice_tail(&nondata, &sleb->nodes);
455 return err;
456 }
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471 static int gc_sync_wbufs(struct ubifs_info *c)
472 {
473 int err, i;
474
475 for (i = 0; i < c->jhead_cnt; i++) {
476 if (i == GCHD)
477 continue;
478 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
479 if (err)
480 return err;
481 }
482 return 0;
483 }
484
485
486
487
488
489
490
491
492
493
494 int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
495 {
496 struct ubifs_scan_leb *sleb;
497 struct ubifs_scan_node *snod;
498 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
499 int err = 0, lnum = lp->lnum;
500
501 ubifs_assert(c, c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
502 c->need_recovery);
503 ubifs_assert(c, c->gc_lnum != lnum);
504 ubifs_assert(c, wbuf->lnum != lnum);
505
506 if (lp->free + lp->dirty == c->leb_size) {
507
508 dbg_gc("LEB %d is free, return it", lp->lnum);
509 ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
510
511 if (lp->free != c->leb_size) {
512
513
514
515
516
517 err = gc_sync_wbufs(c);
518 if (err)
519 return err;
520 err = ubifs_change_one_lp(c, lp->lnum, c->leb_size,
521 0, 0, 0, 0);
522 if (err)
523 return err;
524 }
525 err = ubifs_leb_unmap(c, lp->lnum);
526 if (err)
527 return err;
528
529 if (c->gc_lnum == -1) {
530 c->gc_lnum = lnum;
531 return LEB_RETAINED;
532 }
533
534 return LEB_FREED;
535 }
536
537
538
539
540
541 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
542 if (IS_ERR(sleb))
543 return PTR_ERR(sleb);
544
545 ubifs_assert(c, !list_empty(&sleb->nodes));
546 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
547
548 if (snod->type == UBIFS_IDX_NODE) {
549 struct ubifs_gced_idx_leb *idx_gc;
550
551 dbg_gc("indexing LEB %d (free %d, dirty %d)",
552 lnum, lp->free, lp->dirty);
553 list_for_each_entry(snod, &sleb->nodes, list) {
554 struct ubifs_idx_node *idx = snod->node;
555 int level = le16_to_cpu(idx->level);
556
557 ubifs_assert(c, snod->type == UBIFS_IDX_NODE);
558 key_read(c, ubifs_idx_key(c, idx), &snod->key);
559 err = ubifs_dirty_idx_node(c, &snod->key, level, lnum,
560 snod->offs);
561 if (err)
562 goto out;
563 }
564
565 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
566 if (!idx_gc) {
567 err = -ENOMEM;
568 goto out;
569 }
570
571 idx_gc->lnum = lnum;
572 idx_gc->unmap = 0;
573 list_add(&idx_gc->list, &c->idx_gc);
574
575
576
577
578
579
580
581 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0,
582 LPROPS_INDEX, 1);
583 if (err)
584 goto out;
585 err = LEB_FREED_IDX;
586 } else {
587 dbg_gc("data LEB %d (free %d, dirty %d)",
588 lnum, lp->free, lp->dirty);
589
590 err = move_nodes(c, sleb);
591 if (err)
592 goto out_inc_seq;
593
594 err = gc_sync_wbufs(c);
595 if (err)
596 goto out_inc_seq;
597
598 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0);
599 if (err)
600 goto out_inc_seq;
601
602
603 c->gced_lnum = lnum;
604 smp_wmb();
605 c->gc_seq += 1;
606 smp_wmb();
607
608 if (c->gc_lnum == -1) {
609 c->gc_lnum = lnum;
610 err = LEB_RETAINED;
611 } else {
612 err = ubifs_wbuf_sync_nolock(wbuf);
613 if (err)
614 goto out;
615
616 err = ubifs_leb_unmap(c, lnum);
617 if (err)
618 goto out;
619
620 err = LEB_FREED;
621 }
622 }
623
624 out:
625 ubifs_scan_destroy(sleb);
626 return err;
627
628 out_inc_seq:
629
630 c->gced_lnum = lnum;
631 smp_wmb();
632 c->gc_seq += 1;
633 smp_wmb();
634 goto out;
635 }
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673 int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
674 {
675 int i, err, ret, min_space = c->dead_wm;
676 struct ubifs_lprops lp;
677 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
678
679 ubifs_assert_cmt_locked(c);
680 ubifs_assert(c, !c->ro_media && !c->ro_mount);
681
682 if (ubifs_gc_should_commit(c))
683 return -EAGAIN;
684
685 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
686
687 if (c->ro_error) {
688 ret = -EROFS;
689 goto out_unlock;
690 }
691
692
693 ubifs_assert(c, !wbuf->used);
694
695 for (i = 0; ; i++) {
696 int space_before, space_after;
697
698 cond_resched();
699
700
701 if (ubifs_gc_should_commit(c)) {
702 ret = -EAGAIN;
703 break;
704 }
705
706 if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) {
707
708
709
710
711 dbg_gc("soft limit, some index LEBs GC'ed, -EAGAIN");
712 ubifs_commit_required(c);
713 ret = -EAGAIN;
714 break;
715 }
716
717 if (i > HARD_LEBS_LIMIT) {
718
719
720
721
722 dbg_gc("hard limit, -ENOSPC");
723 ret = -ENOSPC;
724 break;
725 }
726
727
728
729
730
731
732
733
734 ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1);
735 if (ret) {
736 if (ret == -ENOSPC)
737 dbg_gc("no more dirty LEBs");
738 break;
739 }
740
741 dbg_gc("found LEB %d: free %d, dirty %d, sum %d (min. space %d)",
742 lp.lnum, lp.free, lp.dirty, lp.free + lp.dirty,
743 min_space);
744
745 space_before = c->leb_size - wbuf->offs - wbuf->used;
746 if (wbuf->lnum == -1)
747 space_before = 0;
748
749 ret = ubifs_garbage_collect_leb(c, &lp);
750 if (ret < 0) {
751 if (ret == -EAGAIN) {
752
753
754
755
756
757
758 err = ubifs_return_leb(c, lp.lnum);
759 if (err)
760 ret = err;
761 break;
762 }
763 goto out;
764 }
765
766 if (ret == LEB_FREED) {
767
768 dbg_gc("LEB %d freed, return", lp.lnum);
769 ret = lp.lnum;
770 break;
771 }
772
773 if (ret == LEB_FREED_IDX) {
774
775
776
777
778
779
780 dbg_gc("indexing LEB %d freed, continue", lp.lnum);
781 continue;
782 }
783
784 ubifs_assert(c, ret == LEB_RETAINED);
785 space_after = c->leb_size - wbuf->offs - wbuf->used;
786 dbg_gc("LEB %d retained, freed %d bytes", lp.lnum,
787 space_after - space_before);
788
789 if (space_after > space_before) {
790
791 min_space >>= 1;
792 if (min_space < c->dead_wm)
793 min_space = c->dead_wm;
794 continue;
795 }
796
797 dbg_gc("did not make progress");
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815 if (i < SOFT_LEBS_LIMIT) {
816 dbg_gc("try again");
817 continue;
818 }
819
820 min_space <<= 1;
821 if (min_space > c->dark_wm)
822 min_space = c->dark_wm;
823 dbg_gc("set min. space to %d", min_space);
824 }
825
826 if (ret == -ENOSPC && !list_empty(&c->idx_gc)) {
827 dbg_gc("no space, some index LEBs GC'ed, -EAGAIN");
828 ubifs_commit_required(c);
829 ret = -EAGAIN;
830 }
831
832 err = ubifs_wbuf_sync_nolock(wbuf);
833 if (!err)
834 err = ubifs_leb_unmap(c, c->gc_lnum);
835 if (err) {
836 ret = err;
837 goto out;
838 }
839 out_unlock:
840 mutex_unlock(&wbuf->io_mutex);
841 return ret;
842
843 out:
844 ubifs_assert(c, ret < 0);
845 ubifs_assert(c, ret != -ENOSPC && ret != -EAGAIN);
846 ubifs_wbuf_sync_nolock(wbuf);
847 ubifs_ro_mode(c, ret);
848 mutex_unlock(&wbuf->io_mutex);
849 ubifs_return_leb(c, lp.lnum);
850 return ret;
851 }
852
853
854
855
856
857
858
859
860
861
862
863
864 int ubifs_gc_start_commit(struct ubifs_info *c)
865 {
866 struct ubifs_gced_idx_leb *idx_gc;
867 const struct ubifs_lprops *lp;
868 int err = 0, flags;
869
870 ubifs_get_lprops(c);
871
872
873
874
875
876 while (1) {
877 lp = ubifs_fast_find_freeable(c);
878 if (!lp)
879 break;
880 ubifs_assert(c, !(lp->flags & LPROPS_TAKEN));
881 ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
882 err = ubifs_leb_unmap(c, lp->lnum);
883 if (err)
884 goto out;
885 lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0);
886 if (IS_ERR(lp)) {
887 err = PTR_ERR(lp);
888 goto out;
889 }
890 ubifs_assert(c, !(lp->flags & LPROPS_TAKEN));
891 ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
892 }
893
894
895 list_for_each_entry(idx_gc, &c->idx_gc, list)
896 idx_gc->unmap = 1;
897
898
899 while (1) {
900 lp = ubifs_fast_find_frdi_idx(c);
901 if (IS_ERR(lp)) {
902 err = PTR_ERR(lp);
903 goto out;
904 }
905 if (!lp)
906 break;
907 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
908 if (!idx_gc) {
909 err = -ENOMEM;
910 goto out;
911 }
912 ubifs_assert(c, !(lp->flags & LPROPS_TAKEN));
913 ubifs_assert(c, lp->flags & LPROPS_INDEX);
914
915 flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX;
916 lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1);
917 if (IS_ERR(lp)) {
918 err = PTR_ERR(lp);
919 kfree(idx_gc);
920 goto out;
921 }
922 ubifs_assert(c, lp->flags & LPROPS_TAKEN);
923 ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
924 idx_gc->lnum = lp->lnum;
925 idx_gc->unmap = 1;
926 list_add(&idx_gc->list, &c->idx_gc);
927 }
928 out:
929 ubifs_release_lprops(c);
930 return err;
931 }
932
933
934
935
936
937
938
939 int ubifs_gc_end_commit(struct ubifs_info *c)
940 {
941 struct ubifs_gced_idx_leb *idx_gc, *tmp;
942 struct ubifs_wbuf *wbuf;
943 int err = 0;
944
945 wbuf = &c->jheads[GCHD].wbuf;
946 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
947 list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list)
948 if (idx_gc->unmap) {
949 dbg_gc("LEB %d", idx_gc->lnum);
950 err = ubifs_leb_unmap(c, idx_gc->lnum);
951 if (err)
952 goto out;
953 err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC,
954 LPROPS_NC, 0, LPROPS_TAKEN, -1);
955 if (err)
956 goto out;
957 list_del(&idx_gc->list);
958 kfree(idx_gc);
959 }
960 out:
961 mutex_unlock(&wbuf->io_mutex);
962 return err;
963 }
964
965
966
967
968
969
970
971
972
973 void ubifs_destroy_idx_gc(struct ubifs_info *c)
974 {
975 while (!list_empty(&c->idx_gc)) {
976 struct ubifs_gced_idx_leb *idx_gc;
977
978 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb,
979 list);
980 c->idx_gc_cnt -= 1;
981 list_del(&idx_gc->list);
982 kfree(idx_gc);
983 }
984 }
985
986
987
988
989
990
991
992 int ubifs_get_idx_gc_leb(struct ubifs_info *c)
993 {
994 struct ubifs_gced_idx_leb *idx_gc;
995 int lnum;
996
997 if (list_empty(&c->idx_gc))
998 return -ENOSPC;
999 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list);
1000 lnum = idx_gc->lnum;
1001
1002 list_del(&idx_gc->list);
1003 kfree(idx_gc);
1004 return lnum;
1005 }