This source file includes following definitions.
- insert_old_idx
- insert_old_idx_znode
- ins_clr_old_idx_znode
- destroy_old_idx
- copy_znode
- add_idx_dirt
- dirty_cow_znode
- lnc_add
- lnc_add_directly
- lnc_free
- tnc_read_hashed_node
- try_read_node
- fallible_read_node
- matches_name
- get_znode
- tnc_next
- tnc_prev
- resolve_collision
- fallible_matches_name
- fallible_resolve_collision
- matches_position
- resolve_collision_directly
- dirty_cow_bottom_up
- ubifs_lookup_level0
- lookup_level0_dirty
- maybe_leb_gced
- ubifs_tnc_locate
- ubifs_tnc_get_bu_keys
- read_wbuf
- validate_data_node
- ubifs_tnc_bulk_read
- do_lookup_nm
- ubifs_tnc_lookup_nm
- search_dh_cookie
- do_lookup_dh
- ubifs_tnc_lookup_dh
- correct_parent_keys
- insert_zbranch
- tnc_insert
- ubifs_tnc_add
- ubifs_tnc_replace
- ubifs_tnc_add_nm
- tnc_delete
- ubifs_tnc_remove
- ubifs_tnc_remove_nm
- ubifs_tnc_remove_dh
- key_in_range
- ubifs_tnc_remove_range
- ubifs_tnc_remove_ino
- ubifs_tnc_next_ent
- tnc_destroy_cnext
- ubifs_tnc_close
- left_znode
- right_znode
- lookup_znode
- is_idx_node_in_tnc
- is_leaf_node_in_tnc
- ubifs_tnc_has_node
- ubifs_dirty_idx_node
- dbg_check_inode_size
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 #include <linux/crc32.h>
22 #include <linux/slab.h>
23 #include "ubifs.h"
24
25 static int try_read_node(const struct ubifs_info *c, void *buf, int type,
26 struct ubifs_zbranch *zbr);
27 static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
28 struct ubifs_zbranch *zbr, void *node);
29
30
31
32
33
34
35
36
37
38
39
40 enum {
41 NAME_LESS = 0,
42 NAME_MATCHES = 1,
43 NAME_GREATER = 2,
44 NOT_ON_MEDIA = 3,
45 };
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
71 {
72 struct ubifs_old_idx *old_idx, *o;
73 struct rb_node **p, *parent = NULL;
74
75 old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
76 if (unlikely(!old_idx))
77 return -ENOMEM;
78 old_idx->lnum = lnum;
79 old_idx->offs = offs;
80
81 p = &c->old_idx.rb_node;
82 while (*p) {
83 parent = *p;
84 o = rb_entry(parent, struct ubifs_old_idx, rb);
85 if (lnum < o->lnum)
86 p = &(*p)->rb_left;
87 else if (lnum > o->lnum)
88 p = &(*p)->rb_right;
89 else if (offs < o->offs)
90 p = &(*p)->rb_left;
91 else if (offs > o->offs)
92 p = &(*p)->rb_right;
93 else {
94 ubifs_err(c, "old idx added twice!");
95 kfree(old_idx);
96 return 0;
97 }
98 }
99 rb_link_node(&old_idx->rb, parent, p);
100 rb_insert_color(&old_idx->rb, &c->old_idx);
101 return 0;
102 }
103
104
105
106
107
108
109
110
111 int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode)
112 {
113 if (znode->parent) {
114 struct ubifs_zbranch *zbr;
115
116 zbr = &znode->parent->zbranch[znode->iip];
117 if (zbr->len)
118 return insert_old_idx(c, zbr->lnum, zbr->offs);
119 } else
120 if (c->zroot.len)
121 return insert_old_idx(c, c->zroot.lnum,
122 c->zroot.offs);
123 return 0;
124 }
125
126
127
128
129
130
131
132
133 static int ins_clr_old_idx_znode(struct ubifs_info *c,
134 struct ubifs_znode *znode)
135 {
136 int err;
137
138 if (znode->parent) {
139 struct ubifs_zbranch *zbr;
140
141 zbr = &znode->parent->zbranch[znode->iip];
142 if (zbr->len) {
143 err = insert_old_idx(c, zbr->lnum, zbr->offs);
144 if (err)
145 return err;
146 zbr->lnum = 0;
147 zbr->offs = 0;
148 zbr->len = 0;
149 }
150 } else
151 if (c->zroot.len) {
152 err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs);
153 if (err)
154 return err;
155 c->zroot.lnum = 0;
156 c->zroot.offs = 0;
157 c->zroot.len = 0;
158 }
159 return 0;
160 }
161
162
163
164
165
166
167
168
169
170
171
172 void destroy_old_idx(struct ubifs_info *c)
173 {
174 struct ubifs_old_idx *old_idx, *n;
175
176 rbtree_postorder_for_each_entry_safe(old_idx, n, &c->old_idx, rb)
177 kfree(old_idx);
178
179 c->old_idx = RB_ROOT;
180 }
181
182
183
184
185
186
187
188
189 static struct ubifs_znode *copy_znode(struct ubifs_info *c,
190 struct ubifs_znode *znode)
191 {
192 struct ubifs_znode *zn;
193
194 zn = kmemdup(znode, c->max_znode_sz, GFP_NOFS);
195 if (unlikely(!zn))
196 return ERR_PTR(-ENOMEM);
197
198 zn->cnext = NULL;
199 __set_bit(DIRTY_ZNODE, &zn->flags);
200 __clear_bit(COW_ZNODE, &zn->flags);
201
202 ubifs_assert(c, !ubifs_zn_obsolete(znode));
203 __set_bit(OBSOLETE_ZNODE, &znode->flags);
204
205 if (znode->level != 0) {
206 int i;
207 const int n = zn->child_cnt;
208
209
210 for (i = 0; i < n; i++) {
211 struct ubifs_zbranch *zbr = &zn->zbranch[i];
212
213 if (zbr->znode)
214 zbr->znode->parent = zn;
215 }
216 }
217
218 atomic_long_inc(&c->dirty_zn_cnt);
219 return zn;
220 }
221
222
223
224
225
226
227
228
229
230 static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt)
231 {
232 c->calc_idx_sz -= ALIGN(dirt, 8);
233 return ubifs_add_dirt(c, lnum, dirt);
234 }
235
236
237
238
239
240
241
242
243 static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
244 struct ubifs_zbranch *zbr)
245 {
246 struct ubifs_znode *znode = zbr->znode;
247 struct ubifs_znode *zn;
248 int err;
249
250 if (!ubifs_zn_cow(znode)) {
251
252 if (!test_and_set_bit(DIRTY_ZNODE, &znode->flags)) {
253 atomic_long_inc(&c->dirty_zn_cnt);
254 atomic_long_dec(&c->clean_zn_cnt);
255 atomic_long_dec(&ubifs_clean_zn_cnt);
256 err = add_idx_dirt(c, zbr->lnum, zbr->len);
257 if (unlikely(err))
258 return ERR_PTR(err);
259 }
260 return znode;
261 }
262
263 zn = copy_znode(c, znode);
264 if (IS_ERR(zn))
265 return zn;
266
267 if (zbr->len) {
268 err = insert_old_idx(c, zbr->lnum, zbr->offs);
269 if (unlikely(err))
270 return ERR_PTR(err);
271 err = add_idx_dirt(c, zbr->lnum, zbr->len);
272 } else
273 err = 0;
274
275 zbr->znode = zn;
276 zbr->lnum = 0;
277 zbr->offs = 0;
278 zbr->len = 0;
279
280 if (unlikely(err))
281 return ERR_PTR(err);
282 return zn;
283 }
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305 static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr,
306 const void *node)
307 {
308 int err;
309 void *lnc_node;
310 const struct ubifs_dent_node *dent = node;
311
312 ubifs_assert(c, !zbr->leaf);
313 ubifs_assert(c, zbr->len != 0);
314 ubifs_assert(c, is_hash_key(c, &zbr->key));
315
316 err = ubifs_validate_entry(c, dent);
317 if (err) {
318 dump_stack();
319 ubifs_dump_node(c, dent);
320 return err;
321 }
322
323 lnc_node = kmemdup(node, zbr->len, GFP_NOFS);
324 if (!lnc_node)
325
326 return 0;
327
328 zbr->leaf = lnc_node;
329 return 0;
330 }
331
332
333
334
335
336
337
338
339
340
341 static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr,
342 void *node)
343 {
344 int err;
345
346 ubifs_assert(c, !zbr->leaf);
347 ubifs_assert(c, zbr->len != 0);
348
349 err = ubifs_validate_entry(c, node);
350 if (err) {
351 dump_stack();
352 ubifs_dump_node(c, node);
353 return err;
354 }
355
356 zbr->leaf = node;
357 return 0;
358 }
359
360
361
362
363
364
365 static void lnc_free(struct ubifs_zbranch *zbr)
366 {
367 if (!zbr->leaf)
368 return;
369 kfree(zbr->leaf);
370 zbr->leaf = NULL;
371 }
372
373
374
375
376
377
378
379
380
381
382
383
384 static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
385 void *node)
386 {
387 int err;
388
389 ubifs_assert(c, is_hash_key(c, &zbr->key));
390
391 if (zbr->leaf) {
392
393 ubifs_assert(c, zbr->len != 0);
394 memcpy(node, zbr->leaf, zbr->len);
395 return 0;
396 }
397
398 if (c->replaying) {
399 err = fallible_read_node(c, &zbr->key, zbr, node);
400
401
402
403
404 if (err == 0)
405 err = -ENOENT;
406 else if (err == 1)
407 err = 0;
408 } else {
409 err = ubifs_tnc_read_node(c, zbr, node);
410 }
411 if (err)
412 return err;
413
414
415 err = lnc_add(c, zbr, node);
416 return err;
417 }
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441 static int try_read_node(const struct ubifs_info *c, void *buf, int type,
442 struct ubifs_zbranch *zbr)
443 {
444 int len = zbr->len;
445 int lnum = zbr->lnum;
446 int offs = zbr->offs;
447 int err, node_len;
448 struct ubifs_ch *ch = buf;
449 uint32_t crc, node_crc;
450
451 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
452
453 err = ubifs_leb_read(c, lnum, buf, offs, len, 1);
454 if (err) {
455 ubifs_err(c, "cannot read node type %d from LEB %d:%d, error %d",
456 type, lnum, offs, err);
457 return err;
458 }
459
460 if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC)
461 return 0;
462
463 if (ch->node_type != type)
464 return 0;
465
466 node_len = le32_to_cpu(ch->len);
467 if (node_len != len)
468 return 0;
469
470 if (type != UBIFS_DATA_NODE || !c->no_chk_data_crc || c->mounting ||
471 c->remounting_rw) {
472 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
473 node_crc = le32_to_cpu(ch->crc);
474 if (crc != node_crc)
475 return 0;
476 }
477
478 err = ubifs_node_check_hash(c, buf, zbr->hash);
479 if (err) {
480 ubifs_bad_hash(c, buf, zbr->hash, lnum, offs);
481 return 0;
482 }
483
484 return 1;
485 }
486
487
488
489
490
491
492
493
494
495
496
497 static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
498 struct ubifs_zbranch *zbr, void *node)
499 {
500 int ret;
501
502 dbg_tnck(key, "LEB %d:%d, key ", zbr->lnum, zbr->offs);
503
504 ret = try_read_node(c, node, key_type(c, key), zbr);
505 if (ret == 1) {
506 union ubifs_key node_key;
507 struct ubifs_dent_node *dent = node;
508
509
510 key_read(c, &dent->key, &node_key);
511 if (keys_cmp(c, key, &node_key) != 0)
512 ret = 0;
513 }
514 if (ret == 0 && c->replaying)
515 dbg_mntk(key, "dangling branch LEB %d:%d len %d, key ",
516 zbr->lnum, zbr->offs, zbr->len);
517 return ret;
518 }
519
520
521
522
523
524
525
526
527
528
529
530
531 static int matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr,
532 const struct fscrypt_name *nm)
533 {
534 struct ubifs_dent_node *dent;
535 int nlen, err;
536
537
538 if (!zbr->leaf) {
539 dent = kmalloc(zbr->len, GFP_NOFS);
540 if (!dent)
541 return -ENOMEM;
542
543 err = ubifs_tnc_read_node(c, zbr, dent);
544 if (err)
545 goto out_free;
546
547
548 err = lnc_add_directly(c, zbr, dent);
549 if (err)
550 goto out_free;
551 } else
552 dent = zbr->leaf;
553
554 nlen = le16_to_cpu(dent->nlen);
555 err = memcmp(dent->name, fname_name(nm), min_t(int, nlen, fname_len(nm)));
556 if (err == 0) {
557 if (nlen == fname_len(nm))
558 return NAME_MATCHES;
559 else if (nlen < fname_len(nm))
560 return NAME_LESS;
561 else
562 return NAME_GREATER;
563 } else if (err < 0)
564 return NAME_LESS;
565 else
566 return NAME_GREATER;
567
568 out_free:
569 kfree(dent);
570 return err;
571 }
572
573
574
575
576
577
578
579
580
581 static struct ubifs_znode *get_znode(struct ubifs_info *c,
582 struct ubifs_znode *znode, int n)
583 {
584 struct ubifs_zbranch *zbr;
585
586 zbr = &znode->zbranch[n];
587 if (zbr->znode)
588 znode = zbr->znode;
589 else
590 znode = ubifs_load_znode(c, zbr, znode, n);
591 return znode;
592 }
593
594
595
596
597
598
599
600
601
602
603 static int tnc_next(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
604 {
605 struct ubifs_znode *znode = *zn;
606 int nn = *n;
607
608 nn += 1;
609 if (nn < znode->child_cnt) {
610 *n = nn;
611 return 0;
612 }
613 while (1) {
614 struct ubifs_znode *zp;
615
616 zp = znode->parent;
617 if (!zp)
618 return -ENOENT;
619 nn = znode->iip + 1;
620 znode = zp;
621 if (nn < znode->child_cnt) {
622 znode = get_znode(c, znode, nn);
623 if (IS_ERR(znode))
624 return PTR_ERR(znode);
625 while (znode->level != 0) {
626 znode = get_znode(c, znode, 0);
627 if (IS_ERR(znode))
628 return PTR_ERR(znode);
629 }
630 nn = 0;
631 break;
632 }
633 }
634 *zn = znode;
635 *n = nn;
636 return 0;
637 }
638
639
640
641
642
643
644
645
646
647
648 static int tnc_prev(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
649 {
650 struct ubifs_znode *znode = *zn;
651 int nn = *n;
652
653 if (nn > 0) {
654 *n = nn - 1;
655 return 0;
656 }
657 while (1) {
658 struct ubifs_znode *zp;
659
660 zp = znode->parent;
661 if (!zp)
662 return -ENOENT;
663 nn = znode->iip - 1;
664 znode = zp;
665 if (nn >= 0) {
666 znode = get_znode(c, znode, nn);
667 if (IS_ERR(znode))
668 return PTR_ERR(znode);
669 while (znode->level != 0) {
670 nn = znode->child_cnt - 1;
671 znode = get_znode(c, znode, nn);
672 if (IS_ERR(znode))
673 return PTR_ERR(znode);
674 }
675 nn = znode->child_cnt - 1;
676 break;
677 }
678 }
679 *zn = znode;
680 *n = nn;
681 return 0;
682 }
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700 static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key,
701 struct ubifs_znode **zn, int *n,
702 const struct fscrypt_name *nm)
703 {
704 int err;
705
706 err = matches_name(c, &(*zn)->zbranch[*n], nm);
707 if (unlikely(err < 0))
708 return err;
709 if (err == NAME_MATCHES)
710 return 1;
711
712 if (err == NAME_GREATER) {
713
714 while (1) {
715 err = tnc_prev(c, zn, n);
716 if (err == -ENOENT) {
717 ubifs_assert(c, *n == 0);
718 *n = -1;
719 return 0;
720 }
721 if (err < 0)
722 return err;
723 if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753 if (*n == (*zn)->child_cnt - 1) {
754 err = tnc_next(c, zn, n);
755 if (err) {
756
757 ubifs_assert(c, 0);
758 if (err == -ENOENT)
759 err = -EINVAL;
760 return err;
761 }
762 ubifs_assert(c, *n == 0);
763 *n = -1;
764 }
765 return 0;
766 }
767 err = matches_name(c, &(*zn)->zbranch[*n], nm);
768 if (err < 0)
769 return err;
770 if (err == NAME_LESS)
771 return 0;
772 if (err == NAME_MATCHES)
773 return 1;
774 ubifs_assert(c, err == NAME_GREATER);
775 }
776 } else {
777 int nn = *n;
778 struct ubifs_znode *znode = *zn;
779
780
781 while (1) {
782 err = tnc_next(c, &znode, &nn);
783 if (err == -ENOENT)
784 return 0;
785 if (err < 0)
786 return err;
787 if (keys_cmp(c, &znode->zbranch[nn].key, key))
788 return 0;
789 err = matches_name(c, &znode->zbranch[nn], nm);
790 if (err < 0)
791 return err;
792 if (err == NAME_GREATER)
793 return 0;
794 *zn = znode;
795 *n = nn;
796 if (err == NAME_MATCHES)
797 return 1;
798 ubifs_assert(c, err == NAME_LESS);
799 }
800 }
801 }
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818 static int fallible_matches_name(struct ubifs_info *c,
819 struct ubifs_zbranch *zbr,
820 const struct fscrypt_name *nm)
821 {
822 struct ubifs_dent_node *dent;
823 int nlen, err;
824
825
826 if (!zbr->leaf) {
827 dent = kmalloc(zbr->len, GFP_NOFS);
828 if (!dent)
829 return -ENOMEM;
830
831 err = fallible_read_node(c, &zbr->key, zbr, dent);
832 if (err < 0)
833 goto out_free;
834 if (err == 0) {
835
836 err = NOT_ON_MEDIA;
837 goto out_free;
838 }
839 ubifs_assert(c, err == 1);
840
841 err = lnc_add_directly(c, zbr, dent);
842 if (err)
843 goto out_free;
844 } else
845 dent = zbr->leaf;
846
847 nlen = le16_to_cpu(dent->nlen);
848 err = memcmp(dent->name, fname_name(nm), min_t(int, nlen, fname_len(nm)));
849 if (err == 0) {
850 if (nlen == fname_len(nm))
851 return NAME_MATCHES;
852 else if (nlen < fname_len(nm))
853 return NAME_LESS;
854 else
855 return NAME_GREATER;
856 } else if (err < 0)
857 return NAME_LESS;
858 else
859 return NAME_GREATER;
860
861 out_free:
862 kfree(dent);
863 return err;
864 }
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888 static int fallible_resolve_collision(struct ubifs_info *c,
889 const union ubifs_key *key,
890 struct ubifs_znode **zn, int *n,
891 const struct fscrypt_name *nm,
892 int adding)
893 {
894 struct ubifs_znode *o_znode = NULL, *znode = *zn;
895 int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n;
896
897 cmp = fallible_matches_name(c, &znode->zbranch[nn], nm);
898 if (unlikely(cmp < 0))
899 return cmp;
900 if (cmp == NAME_MATCHES)
901 return 1;
902 if (cmp == NOT_ON_MEDIA) {
903 o_znode = znode;
904 o_n = nn;
905
906
907
908
909
910 unsure = 1;
911 } else if (!adding)
912 unsure = 1;
913
914 if (cmp == NAME_GREATER || unsure) {
915
916 while (1) {
917 err = tnc_prev(c, zn, n);
918 if (err == -ENOENT) {
919 ubifs_assert(c, *n == 0);
920 *n = -1;
921 break;
922 }
923 if (err < 0)
924 return err;
925 if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
926
927 if (*n == (*zn)->child_cnt - 1) {
928 err = tnc_next(c, zn, n);
929 if (err) {
930
931 ubifs_assert(c, 0);
932 if (err == -ENOENT)
933 err = -EINVAL;
934 return err;
935 }
936 ubifs_assert(c, *n == 0);
937 *n = -1;
938 }
939 break;
940 }
941 err = fallible_matches_name(c, &(*zn)->zbranch[*n], nm);
942 if (err < 0)
943 return err;
944 if (err == NAME_MATCHES)
945 return 1;
946 if (err == NOT_ON_MEDIA) {
947 o_znode = *zn;
948 o_n = *n;
949 continue;
950 }
951 if (!adding)
952 continue;
953 if (err == NAME_LESS)
954 break;
955 else
956 unsure = 0;
957 }
958 }
959
960 if (cmp == NAME_LESS || unsure) {
961
962 *zn = znode;
963 *n = nn;
964 while (1) {
965 err = tnc_next(c, &znode, &nn);
966 if (err == -ENOENT)
967 break;
968 if (err < 0)
969 return err;
970 if (keys_cmp(c, &znode->zbranch[nn].key, key))
971 break;
972 err = fallible_matches_name(c, &znode->zbranch[nn], nm);
973 if (err < 0)
974 return err;
975 if (err == NAME_GREATER)
976 break;
977 *zn = znode;
978 *n = nn;
979 if (err == NAME_MATCHES)
980 return 1;
981 if (err == NOT_ON_MEDIA) {
982 o_znode = znode;
983 o_n = nn;
984 }
985 }
986 }
987
988
989 if (adding || !o_znode)
990 return 0;
991
992 dbg_mntk(key, "dangling match LEB %d:%d len %d key ",
993 o_znode->zbranch[o_n].lnum, o_znode->zbranch[o_n].offs,
994 o_znode->zbranch[o_n].len);
995 *zn = o_znode;
996 *n = o_n;
997 return 1;
998 }
999
1000
1001
1002
1003
1004
1005
1006
1007
1008 static int matches_position(struct ubifs_zbranch *zbr, int lnum, int offs)
1009 {
1010 if (zbr->lnum == lnum && zbr->offs == offs)
1011 return 1;
1012 else
1013 return 0;
1014 }
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033 static int resolve_collision_directly(struct ubifs_info *c,
1034 const union ubifs_key *key,
1035 struct ubifs_znode **zn, int *n,
1036 int lnum, int offs)
1037 {
1038 struct ubifs_znode *znode;
1039 int nn, err;
1040
1041 znode = *zn;
1042 nn = *n;
1043 if (matches_position(&znode->zbranch[nn], lnum, offs))
1044 return 1;
1045
1046
1047 while (1) {
1048 err = tnc_prev(c, &znode, &nn);
1049 if (err == -ENOENT)
1050 break;
1051 if (err < 0)
1052 return err;
1053 if (keys_cmp(c, &znode->zbranch[nn].key, key))
1054 break;
1055 if (matches_position(&znode->zbranch[nn], lnum, offs)) {
1056 *zn = znode;
1057 *n = nn;
1058 return 1;
1059 }
1060 }
1061
1062
1063 znode = *zn;
1064 nn = *n;
1065 while (1) {
1066 err = tnc_next(c, &znode, &nn);
1067 if (err == -ENOENT)
1068 return 0;
1069 if (err < 0)
1070 return err;
1071 if (keys_cmp(c, &znode->zbranch[nn].key, key))
1072 return 0;
1073 *zn = znode;
1074 *n = nn;
1075 if (matches_position(&znode->zbranch[nn], lnum, offs))
1076 return 1;
1077 }
1078 }
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
1091 struct ubifs_znode *znode)
1092 {
1093 struct ubifs_znode *zp;
1094 int *path = c->bottom_up_buf, p = 0;
1095
1096 ubifs_assert(c, c->zroot.znode);
1097 ubifs_assert(c, znode);
1098 if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) {
1099 kfree(c->bottom_up_buf);
1100 c->bottom_up_buf = kmalloc_array(c->zroot.znode->level,
1101 sizeof(int),
1102 GFP_NOFS);
1103 if (!c->bottom_up_buf)
1104 return ERR_PTR(-ENOMEM);
1105 path = c->bottom_up_buf;
1106 }
1107 if (c->zroot.znode->level) {
1108
1109 while (1) {
1110 int n;
1111
1112 zp = znode->parent;
1113 if (!zp)
1114 break;
1115 n = znode->iip;
1116 ubifs_assert(c, p < c->zroot.znode->level);
1117 path[p++] = n;
1118 if (!zp->cnext && ubifs_zn_dirty(znode))
1119 break;
1120 znode = zp;
1121 }
1122 }
1123
1124
1125 while (1) {
1126 struct ubifs_zbranch *zbr;
1127
1128 zp = znode->parent;
1129 if (zp) {
1130 ubifs_assert(c, path[p - 1] >= 0);
1131 ubifs_assert(c, path[p - 1] < zp->child_cnt);
1132 zbr = &zp->zbranch[path[--p]];
1133 znode = dirty_cow_znode(c, zbr);
1134 } else {
1135 ubifs_assert(c, znode == c->zroot.znode);
1136 znode = dirty_cow_znode(c, &c->zroot);
1137 }
1138 if (IS_ERR(znode) || !p)
1139 break;
1140 ubifs_assert(c, path[p - 1] >= 0);
1141 ubifs_assert(c, path[p - 1] < znode->child_cnt);
1142 znode = znode->zbranch[path[p - 1]].znode;
1143 }
1144
1145 return znode;
1146 }
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170 int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
1171 struct ubifs_znode **zn, int *n)
1172 {
1173 int err, exact;
1174 struct ubifs_znode *znode;
1175 time64_t time = ktime_get_seconds();
1176
1177 dbg_tnck(key, "search key ");
1178 ubifs_assert(c, key_type(c, key) < UBIFS_INVALID_KEY);
1179
1180 znode = c->zroot.znode;
1181 if (unlikely(!znode)) {
1182 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1183 if (IS_ERR(znode))
1184 return PTR_ERR(znode);
1185 }
1186
1187 znode->time = time;
1188
1189 while (1) {
1190 struct ubifs_zbranch *zbr;
1191
1192 exact = ubifs_search_zbranch(c, znode, key, n);
1193
1194 if (znode->level == 0)
1195 break;
1196
1197 if (*n < 0)
1198 *n = 0;
1199 zbr = &znode->zbranch[*n];
1200
1201 if (zbr->znode) {
1202 znode->time = time;
1203 znode = zbr->znode;
1204 continue;
1205 }
1206
1207
1208 znode = ubifs_load_znode(c, zbr, znode, *n);
1209 if (IS_ERR(znode))
1210 return PTR_ERR(znode);
1211 }
1212
1213 *zn = znode;
1214 if (exact || !is_hash_key(c, key) || *n != -1) {
1215 dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1216 return exact;
1217 }
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262 err = tnc_prev(c, &znode, n);
1263 if (err == -ENOENT) {
1264 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1265 *n = -1;
1266 return 0;
1267 }
1268 if (unlikely(err < 0))
1269 return err;
1270 if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1271 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1272 *n = -1;
1273 return 0;
1274 }
1275
1276 dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1277 *zn = znode;
1278 return 1;
1279 }
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306 static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key,
1307 struct ubifs_znode **zn, int *n)
1308 {
1309 int err, exact;
1310 struct ubifs_znode *znode;
1311 time64_t time = ktime_get_seconds();
1312
1313 dbg_tnck(key, "search and dirty key ");
1314
1315 znode = c->zroot.znode;
1316 if (unlikely(!znode)) {
1317 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1318 if (IS_ERR(znode))
1319 return PTR_ERR(znode);
1320 }
1321
1322 znode = dirty_cow_znode(c, &c->zroot);
1323 if (IS_ERR(znode))
1324 return PTR_ERR(znode);
1325
1326 znode->time = time;
1327
1328 while (1) {
1329 struct ubifs_zbranch *zbr;
1330
1331 exact = ubifs_search_zbranch(c, znode, key, n);
1332
1333 if (znode->level == 0)
1334 break;
1335
1336 if (*n < 0)
1337 *n = 0;
1338 zbr = &znode->zbranch[*n];
1339
1340 if (zbr->znode) {
1341 znode->time = time;
1342 znode = dirty_cow_znode(c, zbr);
1343 if (IS_ERR(znode))
1344 return PTR_ERR(znode);
1345 continue;
1346 }
1347
1348
1349 znode = ubifs_load_znode(c, zbr, znode, *n);
1350 if (IS_ERR(znode))
1351 return PTR_ERR(znode);
1352 znode = dirty_cow_znode(c, zbr);
1353 if (IS_ERR(znode))
1354 return PTR_ERR(znode);
1355 }
1356
1357 *zn = znode;
1358 if (exact || !is_hash_key(c, key) || *n != -1) {
1359 dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1360 return exact;
1361 }
1362
1363
1364
1365
1366
1367 err = tnc_prev(c, &znode, n);
1368 if (err == -ENOENT) {
1369 *n = -1;
1370 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1371 return 0;
1372 }
1373 if (unlikely(err < 0))
1374 return err;
1375 if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1376 *n = -1;
1377 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1378 return 0;
1379 }
1380
1381 if (znode->cnext || !ubifs_zn_dirty(znode)) {
1382 znode = dirty_cow_bottom_up(c, znode);
1383 if (IS_ERR(znode))
1384 return PTR_ERR(znode);
1385 }
1386
1387 dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1388 *zn = znode;
1389 return 1;
1390 }
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402 static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1)
1403 {
1404 int gc_seq2, gced_lnum;
1405
1406 gced_lnum = c->gced_lnum;
1407 smp_rmb();
1408 gc_seq2 = c->gc_seq;
1409
1410 if (gc_seq1 == gc_seq2)
1411 return 0;
1412
1413 if (gc_seq1 + 1 != gc_seq2)
1414 return 1;
1415
1416
1417
1418
1419 smp_rmb();
1420 if (gced_lnum != c->gced_lnum)
1421 return 1;
1422
1423 if (gced_lnum == lnum)
1424 return 1;
1425 return 0;
1426 }
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441 int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key,
1442 void *node, int *lnum, int *offs)
1443 {
1444 int found, n, err, safely = 0, gc_seq1;
1445 struct ubifs_znode *znode;
1446 struct ubifs_zbranch zbr, *zt;
1447
1448 again:
1449 mutex_lock(&c->tnc_mutex);
1450 found = ubifs_lookup_level0(c, key, &znode, &n);
1451 if (!found) {
1452 err = -ENOENT;
1453 goto out;
1454 } else if (found < 0) {
1455 err = found;
1456 goto out;
1457 }
1458 zt = &znode->zbranch[n];
1459 if (lnum) {
1460 *lnum = zt->lnum;
1461 *offs = zt->offs;
1462 }
1463 if (is_hash_key(c, key)) {
1464
1465
1466
1467
1468 err = tnc_read_hashed_node(c, zt, node);
1469 goto out;
1470 }
1471 if (safely) {
1472 err = ubifs_tnc_read_node(c, zt, node);
1473 goto out;
1474 }
1475
1476 zbr = znode->zbranch[n];
1477 gc_seq1 = c->gc_seq;
1478 mutex_unlock(&c->tnc_mutex);
1479
1480 if (ubifs_get_wbuf(c, zbr.lnum)) {
1481
1482 err = ubifs_tnc_read_node(c, &zbr, node);
1483 return err;
1484 }
1485
1486 err = fallible_read_node(c, key, &zbr, node);
1487 if (err <= 0 || maybe_leb_gced(c, zbr.lnum, gc_seq1)) {
1488
1489
1490
1491
1492 safely = 1;
1493 goto again;
1494 }
1495 return 0;
1496
1497 out:
1498 mutex_unlock(&c->tnc_mutex);
1499 return err;
1500 }
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515 int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
1516 {
1517 int n, err = 0, lnum = -1, uninitialized_var(offs);
1518 int uninitialized_var(len);
1519 unsigned int block = key_block(c, &bu->key);
1520 struct ubifs_znode *znode;
1521
1522 bu->cnt = 0;
1523 bu->blk_cnt = 0;
1524 bu->eof = 0;
1525
1526 mutex_lock(&c->tnc_mutex);
1527
1528 err = ubifs_lookup_level0(c, &bu->key, &znode, &n);
1529 if (err < 0)
1530 goto out;
1531 if (err) {
1532
1533 len = znode->zbranch[n].len;
1534
1535 if (len > bu->buf_len) {
1536 err = -EINVAL;
1537 goto out;
1538 }
1539
1540 bu->zbranch[bu->cnt++] = znode->zbranch[n];
1541 bu->blk_cnt += 1;
1542 lnum = znode->zbranch[n].lnum;
1543 offs = ALIGN(znode->zbranch[n].offs + len, 8);
1544 }
1545 while (1) {
1546 struct ubifs_zbranch *zbr;
1547 union ubifs_key *key;
1548 unsigned int next_block;
1549
1550
1551 err = tnc_next(c, &znode, &n);
1552 if (err)
1553 goto out;
1554 zbr = &znode->zbranch[n];
1555 key = &zbr->key;
1556
1557 if (key_inum(c, key) != key_inum(c, &bu->key) ||
1558 key_type(c, key) != UBIFS_DATA_KEY) {
1559 err = -ENOENT;
1560 goto out;
1561 }
1562 if (lnum < 0) {
1563
1564 lnum = zbr->lnum;
1565 offs = ALIGN(zbr->offs + zbr->len, 8);
1566 len = zbr->len;
1567 if (len > bu->buf_len) {
1568 err = -EINVAL;
1569 goto out;
1570 }
1571 } else {
1572
1573
1574
1575
1576 if (zbr->lnum != lnum || zbr->offs != offs)
1577 goto out;
1578 offs += ALIGN(zbr->len, 8);
1579 len = ALIGN(len, 8) + zbr->len;
1580
1581 if (len > bu->buf_len)
1582 goto out;
1583 }
1584
1585 next_block = key_block(c, key);
1586 bu->blk_cnt += (next_block - block - 1);
1587 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1588 goto out;
1589 block = next_block;
1590
1591 bu->zbranch[bu->cnt++] = *zbr;
1592 bu->blk_cnt += 1;
1593
1594 if (bu->cnt >= UBIFS_MAX_BULK_READ)
1595 goto out;
1596 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1597 goto out;
1598 }
1599 out:
1600 if (err == -ENOENT) {
1601 bu->eof = 1;
1602 err = 0;
1603 }
1604 bu->gc_seq = c->gc_seq;
1605 mutex_unlock(&c->tnc_mutex);
1606 if (err)
1607 return err;
1608
1609
1610
1611
1612 if (bu->blk_cnt > UBIFS_MAX_BULK_READ)
1613 bu->blk_cnt = UBIFS_MAX_BULK_READ;
1614
1615
1616
1617
1618 if (UBIFS_BLOCKS_PER_PAGE == 1 ||
1619 !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1)))
1620 return 0;
1621 if (bu->eof) {
1622
1623 bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1;
1624 return 0;
1625 }
1626
1627 block = key_block(c, &bu->key) + bu->blk_cnt;
1628 block &= ~(UBIFS_BLOCKS_PER_PAGE - 1);
1629 while (bu->cnt) {
1630 if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block)
1631 break;
1632 bu->cnt -= 1;
1633 }
1634 return 0;
1635 }
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647 static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum,
1648 int offs)
1649 {
1650 const struct ubifs_info *c = wbuf->c;
1651 int rlen, overlap;
1652
1653 dbg_io("LEB %d:%d, length %d", lnum, offs, len);
1654 ubifs_assert(c, wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1655 ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
1656 ubifs_assert(c, offs + len <= c->leb_size);
1657
1658 spin_lock(&wbuf->lock);
1659 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
1660 if (!overlap) {
1661
1662 spin_unlock(&wbuf->lock);
1663 return ubifs_leb_read(c, lnum, buf, offs, len, 0);
1664 }
1665
1666
1667 rlen = wbuf->offs - offs;
1668 if (rlen < 0)
1669 rlen = 0;
1670
1671
1672 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
1673 spin_unlock(&wbuf->lock);
1674
1675 if (rlen > 0)
1676
1677 return ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
1678
1679 return 0;
1680 }
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690 static int validate_data_node(struct ubifs_info *c, void *buf,
1691 struct ubifs_zbranch *zbr)
1692 {
1693 union ubifs_key key1;
1694 struct ubifs_ch *ch = buf;
1695 int err, len;
1696
1697 if (ch->node_type != UBIFS_DATA_NODE) {
1698 ubifs_err(c, "bad node type (%d but expected %d)",
1699 ch->node_type, UBIFS_DATA_NODE);
1700 goto out_err;
1701 }
1702
1703 err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0);
1704 if (err) {
1705 ubifs_err(c, "expected node type %d", UBIFS_DATA_NODE);
1706 goto out;
1707 }
1708
1709 err = ubifs_node_check_hash(c, buf, zbr->hash);
1710 if (err) {
1711 ubifs_bad_hash(c, buf, zbr->hash, zbr->lnum, zbr->offs);
1712 return err;
1713 }
1714
1715 len = le32_to_cpu(ch->len);
1716 if (len != zbr->len) {
1717 ubifs_err(c, "bad node length %d, expected %d", len, zbr->len);
1718 goto out_err;
1719 }
1720
1721
1722 key_read(c, buf + UBIFS_KEY_OFFSET, &key1);
1723 if (!keys_eq(c, &zbr->key, &key1)) {
1724 ubifs_err(c, "bad key in node at LEB %d:%d",
1725 zbr->lnum, zbr->offs);
1726 dbg_tnck(&zbr->key, "looked for key ");
1727 dbg_tnck(&key1, "found node's key ");
1728 goto out_err;
1729 }
1730
1731 return 0;
1732
1733 out_err:
1734 err = -EINVAL;
1735 out:
1736 ubifs_err(c, "bad node at LEB %d:%d", zbr->lnum, zbr->offs);
1737 ubifs_dump_node(c, buf);
1738 dump_stack();
1739 return err;
1740 }
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752 int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
1753 {
1754 int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i;
1755 struct ubifs_wbuf *wbuf;
1756 void *buf;
1757
1758 len = bu->zbranch[bu->cnt - 1].offs;
1759 len += bu->zbranch[bu->cnt - 1].len - offs;
1760 if (len > bu->buf_len) {
1761 ubifs_err(c, "buffer too small %d vs %d", bu->buf_len, len);
1762 return -EINVAL;
1763 }
1764
1765
1766 wbuf = ubifs_get_wbuf(c, lnum);
1767 if (wbuf)
1768 err = read_wbuf(wbuf, bu->buf, len, lnum, offs);
1769 else
1770 err = ubifs_leb_read(c, lnum, bu->buf, offs, len, 0);
1771
1772
1773 if (maybe_leb_gced(c, lnum, bu->gc_seq))
1774 return -EAGAIN;
1775
1776 if (err && err != -EBADMSG) {
1777 ubifs_err(c, "failed to read from LEB %d:%d, error %d",
1778 lnum, offs, err);
1779 dump_stack();
1780 dbg_tnck(&bu->key, "key ");
1781 return err;
1782 }
1783
1784
1785 buf = bu->buf;
1786 for (i = 0; i < bu->cnt; i++) {
1787 err = validate_data_node(c, buf, &bu->zbranch[i]);
1788 if (err)
1789 return err;
1790 buf = buf + ALIGN(bu->zbranch[i].len, 8);
1791 }
1792
1793 return 0;
1794 }
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809 static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1810 void *node, const struct fscrypt_name *nm)
1811 {
1812 int found, n, err;
1813 struct ubifs_znode *znode;
1814
1815 dbg_tnck(key, "key ");
1816 mutex_lock(&c->tnc_mutex);
1817 found = ubifs_lookup_level0(c, key, &znode, &n);
1818 if (!found) {
1819 err = -ENOENT;
1820 goto out_unlock;
1821 } else if (found < 0) {
1822 err = found;
1823 goto out_unlock;
1824 }
1825
1826 ubifs_assert(c, n >= 0);
1827
1828 err = resolve_collision(c, key, &znode, &n, nm);
1829 dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
1830 if (unlikely(err < 0))
1831 goto out_unlock;
1832 if (err == 0) {
1833 err = -ENOENT;
1834 goto out_unlock;
1835 }
1836
1837 err = tnc_read_hashed_node(c, &znode->zbranch[n], node);
1838
1839 out_unlock:
1840 mutex_unlock(&c->tnc_mutex);
1841 return err;
1842 }
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857 int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1858 void *node, const struct fscrypt_name *nm)
1859 {
1860 int err, len;
1861 const struct ubifs_dent_node *dent = node;
1862
1863
1864
1865
1866
1867 err = ubifs_tnc_lookup(c, key, node);
1868 if (err)
1869 return err;
1870
1871 len = le16_to_cpu(dent->nlen);
1872 if (fname_len(nm) == len && !memcmp(dent->name, fname_name(nm), len))
1873 return 0;
1874
1875
1876
1877
1878
1879
1880 return do_lookup_nm(c, key, node, nm);
1881 }
1882
1883 static int search_dh_cookie(struct ubifs_info *c, const union ubifs_key *key,
1884 struct ubifs_dent_node *dent, uint32_t cookie,
1885 struct ubifs_znode **zn, int *n, int exact)
1886 {
1887 int err;
1888 struct ubifs_znode *znode = *zn;
1889 struct ubifs_zbranch *zbr;
1890 union ubifs_key *dkey;
1891
1892 if (!exact) {
1893 err = tnc_next(c, &znode, n);
1894 if (err)
1895 return err;
1896 }
1897
1898 for (;;) {
1899 zbr = &znode->zbranch[*n];
1900 dkey = &zbr->key;
1901
1902 if (key_inum(c, dkey) != key_inum(c, key) ||
1903 key_type(c, dkey) != key_type(c, key)) {
1904 return -ENOENT;
1905 }
1906
1907 err = tnc_read_hashed_node(c, zbr, dent);
1908 if (err)
1909 return err;
1910
1911 if (key_hash(c, key) == key_hash(c, dkey) &&
1912 le32_to_cpu(dent->cookie) == cookie) {
1913 *zn = znode;
1914 return 0;
1915 }
1916
1917 err = tnc_next(c, &znode, n);
1918 if (err)
1919 return err;
1920 }
1921 }
1922
1923 static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
1924 struct ubifs_dent_node *dent, uint32_t cookie)
1925 {
1926 int n, err;
1927 struct ubifs_znode *znode;
1928 union ubifs_key start_key;
1929
1930 ubifs_assert(c, is_hash_key(c, key));
1931
1932 lowest_dent_key(c, &start_key, key_inum(c, key));
1933
1934 mutex_lock(&c->tnc_mutex);
1935 err = ubifs_lookup_level0(c, &start_key, &znode, &n);
1936 if (unlikely(err < 0))
1937 goto out_unlock;
1938
1939 err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
1940
1941 out_unlock:
1942 mutex_unlock(&c->tnc_mutex);
1943 return err;
1944 }
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960 int ubifs_tnc_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
1961 void *node, uint32_t cookie)
1962 {
1963 int err;
1964 const struct ubifs_dent_node *dent = node;
1965
1966 if (!c->double_hash)
1967 return -EOPNOTSUPP;
1968
1969
1970
1971
1972
1973 err = ubifs_tnc_lookup(c, key, node);
1974 if (err)
1975 return err;
1976
1977 if (le32_to_cpu(dent->cookie) == cookie)
1978 return 0;
1979
1980
1981
1982
1983
1984 return do_lookup_dh(c, key, node, cookie);
1985 }
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996 static void correct_parent_keys(const struct ubifs_info *c,
1997 struct ubifs_znode *znode)
1998 {
1999 union ubifs_key *key, *key1;
2000
2001 ubifs_assert(c, znode->parent);
2002 ubifs_assert(c, znode->iip == 0);
2003
2004 key = &znode->zbranch[0].key;
2005 key1 = &znode->parent->zbranch[0].key;
2006
2007 while (keys_cmp(c, key, key1) < 0) {
2008 key_copy(c, key, key1);
2009 znode = znode->parent;
2010 znode->alt = 1;
2011 if (!znode->parent || znode->iip)
2012 break;
2013 key1 = &znode->parent->zbranch[0].key;
2014 }
2015 }
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029 static void insert_zbranch(struct ubifs_info *c, struct ubifs_znode *znode,
2030 const struct ubifs_zbranch *zbr, int n)
2031 {
2032 int i;
2033
2034 ubifs_assert(c, ubifs_zn_dirty(znode));
2035
2036 if (znode->level) {
2037 for (i = znode->child_cnt; i > n; i--) {
2038 znode->zbranch[i] = znode->zbranch[i - 1];
2039 if (znode->zbranch[i].znode)
2040 znode->zbranch[i].znode->iip = i;
2041 }
2042 if (zbr->znode)
2043 zbr->znode->iip = n;
2044 } else
2045 for (i = znode->child_cnt; i > n; i--)
2046 znode->zbranch[i] = znode->zbranch[i - 1];
2047
2048 znode->zbranch[n] = *zbr;
2049 znode->child_cnt += 1;
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065 if (n == 0)
2066 znode->alt = 1;
2067 }
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081 static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode,
2082 struct ubifs_zbranch *zbr, int n)
2083 {
2084 struct ubifs_znode *zn, *zi, *zp;
2085 int i, keep, move, appending = 0;
2086 union ubifs_key *key = &zbr->key, *key1;
2087
2088 ubifs_assert(c, n >= 0 && n <= c->fanout);
2089
2090
2091 again:
2092 zp = znode->parent;
2093 if (znode->child_cnt < c->fanout) {
2094 ubifs_assert(c, n != c->fanout);
2095 dbg_tnck(key, "inserted at %d level %d, key ", n, znode->level);
2096
2097 insert_zbranch(c, znode, zbr, n);
2098
2099
2100 if (n == 0 && zp && znode->iip == 0)
2101 correct_parent_keys(c, znode);
2102
2103 return 0;
2104 }
2105
2106
2107
2108
2109
2110 dbg_tnck(key, "splitting level %d, key ", znode->level);
2111
2112 if (znode->alt)
2113
2114
2115
2116
2117 ins_clr_old_idx_znode(c, znode);
2118
2119 zn = kzalloc(c->max_znode_sz, GFP_NOFS);
2120 if (!zn)
2121 return -ENOMEM;
2122 zn->parent = zp;
2123 zn->level = znode->level;
2124
2125
2126 if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) {
2127
2128 if (n == c->fanout) {
2129 key1 = &znode->zbranch[n - 1].key;
2130 if (key_inum(c, key1) == key_inum(c, key) &&
2131 key_type(c, key1) == UBIFS_DATA_KEY)
2132 appending = 1;
2133 } else
2134 goto check_split;
2135 } else if (appending && n != c->fanout) {
2136
2137 appending = 0;
2138 check_split:
2139 if (n >= (c->fanout + 1) / 2) {
2140 key1 = &znode->zbranch[0].key;
2141 if (key_inum(c, key1) == key_inum(c, key) &&
2142 key_type(c, key1) == UBIFS_DATA_KEY) {
2143 key1 = &znode->zbranch[n].key;
2144 if (key_inum(c, key1) != key_inum(c, key) ||
2145 key_type(c, key1) != UBIFS_DATA_KEY) {
2146 keep = n;
2147 move = c->fanout - keep;
2148 zi = znode;
2149 goto do_split;
2150 }
2151 }
2152 }
2153 }
2154
2155 if (appending) {
2156 keep = c->fanout;
2157 move = 0;
2158 } else {
2159 keep = (c->fanout + 1) / 2;
2160 move = c->fanout - keep;
2161 }
2162
2163
2164
2165
2166
2167
2168 if (n < keep) {
2169
2170 zi = znode;
2171 move += 1;
2172 keep -= 1;
2173 } else {
2174
2175 zi = zn;
2176 n -= keep;
2177
2178 if (zn->level != 0)
2179 zbr->znode->parent = zn;
2180 }
2181
2182 do_split:
2183
2184 __set_bit(DIRTY_ZNODE, &zn->flags);
2185 atomic_long_inc(&c->dirty_zn_cnt);
2186
2187 zn->child_cnt = move;
2188 znode->child_cnt = keep;
2189
2190 dbg_tnc("moving %d, keeping %d", move, keep);
2191
2192
2193 for (i = 0; i < move; i++) {
2194 zn->zbranch[i] = znode->zbranch[keep + i];
2195
2196 if (zn->level != 0)
2197 if (zn->zbranch[i].znode) {
2198 zn->zbranch[i].znode->parent = zn;
2199 zn->zbranch[i].znode->iip = i;
2200 }
2201 }
2202
2203
2204 dbg_tnck(key, "inserting at %d level %d, key ", n, zn->level);
2205
2206 insert_zbranch(c, zi, zbr, n);
2207
2208
2209 if (zp) {
2210 if (n == 0 && zi == znode && znode->iip == 0)
2211 correct_parent_keys(c, znode);
2212
2213
2214 n = znode->iip + 1;
2215
2216
2217 zbr->key = zn->zbranch[0].key;
2218 zbr->znode = zn;
2219 zbr->lnum = 0;
2220 zbr->offs = 0;
2221 zbr->len = 0;
2222 znode = zp;
2223
2224 goto again;
2225 }
2226
2227
2228 dbg_tnc("creating new zroot at level %d", znode->level + 1);
2229
2230 zi = kzalloc(c->max_znode_sz, GFP_NOFS);
2231 if (!zi)
2232 return -ENOMEM;
2233
2234 zi->child_cnt = 2;
2235 zi->level = znode->level + 1;
2236
2237 __set_bit(DIRTY_ZNODE, &zi->flags);
2238 atomic_long_inc(&c->dirty_zn_cnt);
2239
2240 zi->zbranch[0].key = znode->zbranch[0].key;
2241 zi->zbranch[0].znode = znode;
2242 zi->zbranch[0].lnum = c->zroot.lnum;
2243 zi->zbranch[0].offs = c->zroot.offs;
2244 zi->zbranch[0].len = c->zroot.len;
2245 zi->zbranch[1].key = zn->zbranch[0].key;
2246 zi->zbranch[1].znode = zn;
2247
2248 c->zroot.lnum = 0;
2249 c->zroot.offs = 0;
2250 c->zroot.len = 0;
2251 c->zroot.znode = zi;
2252
2253 zn->parent = zi;
2254 zn->iip = 1;
2255 znode->parent = zi;
2256 znode->iip = 0;
2257
2258 return 0;
2259 }
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274 int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
2275 int offs, int len, const u8 *hash)
2276 {
2277 int found, n, err = 0;
2278 struct ubifs_znode *znode;
2279
2280 mutex_lock(&c->tnc_mutex);
2281 dbg_tnck(key, "%d:%d, len %d, key ", lnum, offs, len);
2282 found = lookup_level0_dirty(c, key, &znode, &n);
2283 if (!found) {
2284 struct ubifs_zbranch zbr;
2285
2286 zbr.znode = NULL;
2287 zbr.lnum = lnum;
2288 zbr.offs = offs;
2289 zbr.len = len;
2290 ubifs_copy_hash(c, hash, zbr.hash);
2291 key_copy(c, key, &zbr.key);
2292 err = tnc_insert(c, znode, &zbr, n + 1);
2293 } else if (found == 1) {
2294 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2295
2296 lnc_free(zbr);
2297 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2298 zbr->lnum = lnum;
2299 zbr->offs = offs;
2300 zbr->len = len;
2301 ubifs_copy_hash(c, hash, zbr->hash);
2302 } else
2303 err = found;
2304 if (!err)
2305 err = dbg_check_tnc(c, 0);
2306 mutex_unlock(&c->tnc_mutex);
2307
2308 return err;
2309 }
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325 int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key,
2326 int old_lnum, int old_offs, int lnum, int offs, int len)
2327 {
2328 int found, n, err = 0;
2329 struct ubifs_znode *znode;
2330
2331 mutex_lock(&c->tnc_mutex);
2332 dbg_tnck(key, "old LEB %d:%d, new LEB %d:%d, len %d, key ", old_lnum,
2333 old_offs, lnum, offs, len);
2334 found = lookup_level0_dirty(c, key, &znode, &n);
2335 if (found < 0) {
2336 err = found;
2337 goto out_unlock;
2338 }
2339
2340 if (found == 1) {
2341 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2342
2343 found = 0;
2344 if (zbr->lnum == old_lnum && zbr->offs == old_offs) {
2345 lnc_free(zbr);
2346 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2347 if (err)
2348 goto out_unlock;
2349 zbr->lnum = lnum;
2350 zbr->offs = offs;
2351 zbr->len = len;
2352 found = 1;
2353 } else if (is_hash_key(c, key)) {
2354 found = resolve_collision_directly(c, key, &znode, &n,
2355 old_lnum, old_offs);
2356 dbg_tnc("rc returned %d, znode %p, n %d, LEB %d:%d",
2357 found, znode, n, old_lnum, old_offs);
2358 if (found < 0) {
2359 err = found;
2360 goto out_unlock;
2361 }
2362
2363 if (found) {
2364
2365 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2366 znode = dirty_cow_bottom_up(c, znode);
2367 if (IS_ERR(znode)) {
2368 err = PTR_ERR(znode);
2369 goto out_unlock;
2370 }
2371 }
2372 zbr = &znode->zbranch[n];
2373 lnc_free(zbr);
2374 err = ubifs_add_dirt(c, zbr->lnum,
2375 zbr->len);
2376 if (err)
2377 goto out_unlock;
2378 zbr->lnum = lnum;
2379 zbr->offs = offs;
2380 zbr->len = len;
2381 }
2382 }
2383 }
2384
2385 if (!found)
2386 err = ubifs_add_dirt(c, lnum, len);
2387
2388 if (!err)
2389 err = dbg_check_tnc(c, 0);
2390
2391 out_unlock:
2392 mutex_unlock(&c->tnc_mutex);
2393 return err;
2394 }
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409 int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
2410 int lnum, int offs, int len, const u8 *hash,
2411 const struct fscrypt_name *nm)
2412 {
2413 int found, n, err = 0;
2414 struct ubifs_znode *znode;
2415
2416 mutex_lock(&c->tnc_mutex);
2417 dbg_tnck(key, "LEB %d:%d, key ", lnum, offs);
2418 found = lookup_level0_dirty(c, key, &znode, &n);
2419 if (found < 0) {
2420 err = found;
2421 goto out_unlock;
2422 }
2423
2424 if (found == 1) {
2425 if (c->replaying)
2426 found = fallible_resolve_collision(c, key, &znode, &n,
2427 nm, 1);
2428 else
2429 found = resolve_collision(c, key, &znode, &n, nm);
2430 dbg_tnc("rc returned %d, znode %p, n %d", found, znode, n);
2431 if (found < 0) {
2432 err = found;
2433 goto out_unlock;
2434 }
2435
2436
2437 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2438 znode = dirty_cow_bottom_up(c, znode);
2439 if (IS_ERR(znode)) {
2440 err = PTR_ERR(znode);
2441 goto out_unlock;
2442 }
2443 }
2444
2445 if (found == 1) {
2446 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2447
2448 lnc_free(zbr);
2449 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2450 zbr->lnum = lnum;
2451 zbr->offs = offs;
2452 zbr->len = len;
2453 ubifs_copy_hash(c, hash, zbr->hash);
2454 goto out_unlock;
2455 }
2456 }
2457
2458 if (!found) {
2459 struct ubifs_zbranch zbr;
2460
2461 zbr.znode = NULL;
2462 zbr.lnum = lnum;
2463 zbr.offs = offs;
2464 zbr.len = len;
2465 ubifs_copy_hash(c, hash, zbr.hash);
2466 key_copy(c, key, &zbr.key);
2467 err = tnc_insert(c, znode, &zbr, n + 1);
2468 if (err)
2469 goto out_unlock;
2470 if (c->replaying) {
2471
2472
2473
2474
2475
2476
2477 struct fscrypt_name noname = { .disk_name = { .name = "", .len = 1 } };
2478
2479 err = dbg_check_tnc(c, 0);
2480 mutex_unlock(&c->tnc_mutex);
2481 if (err)
2482 return err;
2483 return ubifs_tnc_remove_nm(c, key, &noname);
2484 }
2485 }
2486
2487 out_unlock:
2488 if (!err)
2489 err = dbg_check_tnc(c, 0);
2490 mutex_unlock(&c->tnc_mutex);
2491 return err;
2492 }
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503 static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
2504 {
2505 struct ubifs_zbranch *zbr;
2506 struct ubifs_znode *zp;
2507 int i, err;
2508
2509
2510 ubifs_assert(c, znode->level == 0);
2511 ubifs_assert(c, n >= 0 && n < c->fanout);
2512 dbg_tnck(&znode->zbranch[n].key, "deleting key ");
2513
2514 zbr = &znode->zbranch[n];
2515 lnc_free(zbr);
2516
2517 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2518 if (err) {
2519 ubifs_dump_znode(c, znode);
2520 return err;
2521 }
2522
2523
2524 for (i = n; i < znode->child_cnt - 1; i++)
2525 znode->zbranch[i] = znode->zbranch[i + 1];
2526 znode->child_cnt -= 1;
2527
2528 if (znode->child_cnt > 0)
2529 return 0;
2530
2531
2532
2533
2534
2535
2536 do {
2537 ubifs_assert(c, !ubifs_zn_obsolete(znode));
2538 ubifs_assert(c, ubifs_zn_dirty(znode));
2539
2540 zp = znode->parent;
2541 n = znode->iip;
2542
2543 atomic_long_dec(&c->dirty_zn_cnt);
2544
2545 err = insert_old_idx_znode(c, znode);
2546 if (err)
2547 return err;
2548
2549 if (znode->cnext) {
2550 __set_bit(OBSOLETE_ZNODE, &znode->flags);
2551 atomic_long_inc(&c->clean_zn_cnt);
2552 atomic_long_inc(&ubifs_clean_zn_cnt);
2553 } else
2554 kfree(znode);
2555 znode = zp;
2556 } while (znode->child_cnt == 1);
2557
2558
2559 znode->child_cnt -= 1;
2560 ubifs_assert(c, znode->level != 0);
2561 for (i = n; i < znode->child_cnt; i++) {
2562 znode->zbranch[i] = znode->zbranch[i + 1];
2563 if (znode->zbranch[i].znode)
2564 znode->zbranch[i].znode->iip = i;
2565 }
2566
2567
2568
2569
2570
2571 if (!znode->parent) {
2572 while (znode->child_cnt == 1 && znode->level != 0) {
2573 zp = znode;
2574 zbr = &znode->zbranch[0];
2575 znode = get_znode(c, znode, 0);
2576 if (IS_ERR(znode))
2577 return PTR_ERR(znode);
2578 znode = dirty_cow_znode(c, zbr);
2579 if (IS_ERR(znode))
2580 return PTR_ERR(znode);
2581 znode->parent = NULL;
2582 znode->iip = 0;
2583 if (c->zroot.len) {
2584 err = insert_old_idx(c, c->zroot.lnum,
2585 c->zroot.offs);
2586 if (err)
2587 return err;
2588 }
2589 c->zroot.lnum = zbr->lnum;
2590 c->zroot.offs = zbr->offs;
2591 c->zroot.len = zbr->len;
2592 c->zroot.znode = znode;
2593 ubifs_assert(c, !ubifs_zn_obsolete(zp));
2594 ubifs_assert(c, ubifs_zn_dirty(zp));
2595 atomic_long_dec(&c->dirty_zn_cnt);
2596
2597 if (zp->cnext) {
2598 __set_bit(OBSOLETE_ZNODE, &zp->flags);
2599 atomic_long_inc(&c->clean_zn_cnt);
2600 atomic_long_inc(&ubifs_clean_zn_cnt);
2601 } else
2602 kfree(zp);
2603 }
2604 }
2605
2606 return 0;
2607 }
2608
2609
2610
2611
2612
2613
2614
2615
2616 int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key)
2617 {
2618 int found, n, err = 0;
2619 struct ubifs_znode *znode;
2620
2621 mutex_lock(&c->tnc_mutex);
2622 dbg_tnck(key, "key ");
2623 found = lookup_level0_dirty(c, key, &znode, &n);
2624 if (found < 0) {
2625 err = found;
2626 goto out_unlock;
2627 }
2628 if (found == 1)
2629 err = tnc_delete(c, znode, n);
2630 if (!err)
2631 err = dbg_check_tnc(c, 0);
2632
2633 out_unlock:
2634 mutex_unlock(&c->tnc_mutex);
2635 return err;
2636 }
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646 int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
2647 const struct fscrypt_name *nm)
2648 {
2649 int n, err;
2650 struct ubifs_znode *znode;
2651
2652 mutex_lock(&c->tnc_mutex);
2653 dbg_tnck(key, "key ");
2654 err = lookup_level0_dirty(c, key, &znode, &n);
2655 if (err < 0)
2656 goto out_unlock;
2657
2658 if (err) {
2659 if (c->replaying)
2660 err = fallible_resolve_collision(c, key, &znode, &n,
2661 nm, 0);
2662 else
2663 err = resolve_collision(c, key, &znode, &n, nm);
2664 dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
2665 if (err < 0)
2666 goto out_unlock;
2667 if (err) {
2668
2669 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2670 znode = dirty_cow_bottom_up(c, znode);
2671 if (IS_ERR(znode)) {
2672 err = PTR_ERR(znode);
2673 goto out_unlock;
2674 }
2675 }
2676 err = tnc_delete(c, znode, n);
2677 }
2678 }
2679
2680 out_unlock:
2681 if (!err)
2682 err = dbg_check_tnc(c, 0);
2683 mutex_unlock(&c->tnc_mutex);
2684 return err;
2685 }
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695 int ubifs_tnc_remove_dh(struct ubifs_info *c, const union ubifs_key *key,
2696 uint32_t cookie)
2697 {
2698 int n, err;
2699 struct ubifs_znode *znode;
2700 struct ubifs_dent_node *dent;
2701 struct ubifs_zbranch *zbr;
2702
2703 if (!c->double_hash)
2704 return -EOPNOTSUPP;
2705
2706 mutex_lock(&c->tnc_mutex);
2707 err = lookup_level0_dirty(c, key, &znode, &n);
2708 if (err <= 0)
2709 goto out_unlock;
2710
2711 zbr = &znode->zbranch[n];
2712 dent = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
2713 if (!dent) {
2714 err = -ENOMEM;
2715 goto out_unlock;
2716 }
2717
2718 err = tnc_read_hashed_node(c, zbr, dent);
2719 if (err)
2720 goto out_free;
2721
2722
2723 if (le32_to_cpu(dent->cookie) != cookie) {
2724 union ubifs_key start_key;
2725
2726 lowest_dent_key(c, &start_key, key_inum(c, key));
2727
2728 err = ubifs_lookup_level0(c, &start_key, &znode, &n);
2729 if (unlikely(err < 0))
2730 goto out_free;
2731
2732 err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
2733 if (err)
2734 goto out_free;
2735 }
2736
2737 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2738 znode = dirty_cow_bottom_up(c, znode);
2739 if (IS_ERR(znode)) {
2740 err = PTR_ERR(znode);
2741 goto out_free;
2742 }
2743 }
2744 err = tnc_delete(c, znode, n);
2745
2746 out_free:
2747 kfree(dent);
2748 out_unlock:
2749 if (!err)
2750 err = dbg_check_tnc(c, 0);
2751 mutex_unlock(&c->tnc_mutex);
2752 return err;
2753 }
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764 static int key_in_range(struct ubifs_info *c, union ubifs_key *key,
2765 union ubifs_key *from_key, union ubifs_key *to_key)
2766 {
2767 if (keys_cmp(c, key, from_key) < 0)
2768 return 0;
2769 if (keys_cmp(c, key, to_key) > 0)
2770 return 0;
2771 return 1;
2772 }
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784 int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key,
2785 union ubifs_key *to_key)
2786 {
2787 int i, n, k, err = 0;
2788 struct ubifs_znode *znode;
2789 union ubifs_key *key;
2790
2791 mutex_lock(&c->tnc_mutex);
2792 while (1) {
2793
2794 err = ubifs_lookup_level0(c, from_key, &znode, &n);
2795 if (err < 0)
2796 goto out_unlock;
2797
2798 if (err)
2799 key = from_key;
2800 else {
2801 err = tnc_next(c, &znode, &n);
2802 if (err == -ENOENT) {
2803 err = 0;
2804 goto out_unlock;
2805 }
2806 if (err < 0)
2807 goto out_unlock;
2808 key = &znode->zbranch[n].key;
2809 if (!key_in_range(c, key, from_key, to_key)) {
2810 err = 0;
2811 goto out_unlock;
2812 }
2813 }
2814
2815
2816 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2817 znode = dirty_cow_bottom_up(c, znode);
2818 if (IS_ERR(znode)) {
2819 err = PTR_ERR(znode);
2820 goto out_unlock;
2821 }
2822 }
2823
2824
2825 for (i = n + 1, k = 0; i < znode->child_cnt; i++, k++) {
2826 key = &znode->zbranch[i].key;
2827 if (!key_in_range(c, key, from_key, to_key))
2828 break;
2829 lnc_free(&znode->zbranch[i]);
2830 err = ubifs_add_dirt(c, znode->zbranch[i].lnum,
2831 znode->zbranch[i].len);
2832 if (err) {
2833 ubifs_dump_znode(c, znode);
2834 goto out_unlock;
2835 }
2836 dbg_tnck(key, "removing key ");
2837 }
2838 if (k) {
2839 for (i = n + 1 + k; i < znode->child_cnt; i++)
2840 znode->zbranch[i - k] = znode->zbranch[i];
2841 znode->child_cnt -= k;
2842 }
2843
2844
2845 err = tnc_delete(c, znode, n);
2846 if (err)
2847 goto out_unlock;
2848 }
2849
2850 out_unlock:
2851 if (!err)
2852 err = dbg_check_tnc(c, 0);
2853 mutex_unlock(&c->tnc_mutex);
2854 return err;
2855 }
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866 int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
2867 {
2868 union ubifs_key key1, key2;
2869 struct ubifs_dent_node *xent, *pxent = NULL;
2870 struct fscrypt_name nm = {0};
2871
2872 dbg_tnc("ino %lu", (unsigned long)inum);
2873
2874
2875
2876
2877
2878 lowest_xent_key(c, &key1, inum);
2879 while (1) {
2880 ino_t xattr_inum;
2881 int err;
2882
2883 xent = ubifs_tnc_next_ent(c, &key1, &nm);
2884 if (IS_ERR(xent)) {
2885 err = PTR_ERR(xent);
2886 if (err == -ENOENT)
2887 break;
2888 return err;
2889 }
2890
2891 xattr_inum = le64_to_cpu(xent->inum);
2892 dbg_tnc("xent '%s', ino %lu", xent->name,
2893 (unsigned long)xattr_inum);
2894
2895 ubifs_evict_xattr_inode(c, xattr_inum);
2896
2897 fname_name(&nm) = xent->name;
2898 fname_len(&nm) = le16_to_cpu(xent->nlen);
2899 err = ubifs_tnc_remove_nm(c, &key1, &nm);
2900 if (err) {
2901 kfree(xent);
2902 return err;
2903 }
2904
2905 lowest_ino_key(c, &key1, xattr_inum);
2906 highest_ino_key(c, &key2, xattr_inum);
2907 err = ubifs_tnc_remove_range(c, &key1, &key2);
2908 if (err) {
2909 kfree(xent);
2910 return err;
2911 }
2912
2913 kfree(pxent);
2914 pxent = xent;
2915 key_read(c, &xent->key, &key1);
2916 }
2917
2918 kfree(pxent);
2919 lowest_ino_key(c, &key1, inum);
2920 highest_ino_key(c, &key2, inum);
2921
2922 return ubifs_tnc_remove_range(c, &key1, &key2);
2923 }
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948 struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
2949 union ubifs_key *key,
2950 const struct fscrypt_name *nm)
2951 {
2952 int n, err, type = key_type(c, key);
2953 struct ubifs_znode *znode;
2954 struct ubifs_dent_node *dent;
2955 struct ubifs_zbranch *zbr;
2956 union ubifs_key *dkey;
2957
2958 dbg_tnck(key, "key ");
2959 ubifs_assert(c, is_hash_key(c, key));
2960
2961 mutex_lock(&c->tnc_mutex);
2962 err = ubifs_lookup_level0(c, key, &znode, &n);
2963 if (unlikely(err < 0))
2964 goto out_unlock;
2965
2966 if (fname_len(nm) > 0) {
2967 if (err) {
2968
2969 if (c->replaying)
2970 err = fallible_resolve_collision(c, key, &znode, &n,
2971 nm, 0);
2972 else
2973 err = resolve_collision(c, key, &znode, &n, nm);
2974 dbg_tnc("rc returned %d, znode %p, n %d",
2975 err, znode, n);
2976 if (unlikely(err < 0))
2977 goto out_unlock;
2978 }
2979
2980
2981 err = tnc_next(c, &znode, &n);
2982 if (unlikely(err))
2983 goto out_unlock;
2984 } else {
2985
2986
2987
2988
2989
2990 if (!err) {
2991
2992
2993
2994
2995
2996 err = tnc_next(c, &znode, &n);
2997 if (err)
2998 goto out_unlock;
2999 }
3000 }
3001
3002 zbr = &znode->zbranch[n];
3003 dent = kmalloc(zbr->len, GFP_NOFS);
3004 if (unlikely(!dent)) {
3005 err = -ENOMEM;
3006 goto out_unlock;
3007 }
3008
3009
3010
3011
3012
3013 dkey = &zbr->key;
3014 if (key_inum(c, dkey) != key_inum(c, key) ||
3015 key_type(c, dkey) != type) {
3016 err = -ENOENT;
3017 goto out_free;
3018 }
3019
3020 err = tnc_read_hashed_node(c, zbr, dent);
3021 if (unlikely(err))
3022 goto out_free;
3023
3024 mutex_unlock(&c->tnc_mutex);
3025 return dent;
3026
3027 out_free:
3028 kfree(dent);
3029 out_unlock:
3030 mutex_unlock(&c->tnc_mutex);
3031 return ERR_PTR(err);
3032 }
3033
3034
3035
3036
3037
3038
3039
3040 static void tnc_destroy_cnext(struct ubifs_info *c)
3041 {
3042 struct ubifs_znode *cnext;
3043
3044 if (!c->cnext)
3045 return;
3046 ubifs_assert(c, c->cmt_state == COMMIT_BROKEN);
3047 cnext = c->cnext;
3048 do {
3049 struct ubifs_znode *znode = cnext;
3050
3051 cnext = cnext->cnext;
3052 if (ubifs_zn_obsolete(znode))
3053 kfree(znode);
3054 } while (cnext && cnext != c->cnext);
3055 }
3056
3057
3058
3059
3060
3061 void ubifs_tnc_close(struct ubifs_info *c)
3062 {
3063 tnc_destroy_cnext(c);
3064 if (c->zroot.znode) {
3065 long n, freed;
3066
3067 n = atomic_long_read(&c->clean_zn_cnt);
3068 freed = ubifs_destroy_tnc_subtree(c, c->zroot.znode);
3069 ubifs_assert(c, freed == n);
3070 atomic_long_sub(n, &ubifs_clean_zn_cnt);
3071 }
3072 kfree(c->gap_lebs);
3073 kfree(c->ilebs);
3074 destroy_old_idx(c);
3075 }
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085 static struct ubifs_znode *left_znode(struct ubifs_info *c,
3086 struct ubifs_znode *znode)
3087 {
3088 int level = znode->level;
3089
3090 while (1) {
3091 int n = znode->iip - 1;
3092
3093
3094 znode = znode->parent;
3095 if (!znode)
3096 return NULL;
3097 if (n >= 0) {
3098
3099 znode = get_znode(c, znode, n);
3100 if (IS_ERR(znode))
3101 return znode;
3102 while (znode->level != level) {
3103 n = znode->child_cnt - 1;
3104 znode = get_znode(c, znode, n);
3105 if (IS_ERR(znode))
3106 return znode;
3107 }
3108 break;
3109 }
3110 }
3111 return znode;
3112 }
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122 static struct ubifs_znode *right_znode(struct ubifs_info *c,
3123 struct ubifs_znode *znode)
3124 {
3125 int level = znode->level;
3126
3127 while (1) {
3128 int n = znode->iip + 1;
3129
3130
3131 znode = znode->parent;
3132 if (!znode)
3133 return NULL;
3134 if (n < znode->child_cnt) {
3135
3136 znode = get_znode(c, znode, n);
3137 if (IS_ERR(znode))
3138 return znode;
3139 while (znode->level != level) {
3140 znode = get_znode(c, znode, 0);
3141 if (IS_ERR(znode))
3142 return znode;
3143 }
3144 break;
3145 }
3146 }
3147 return znode;
3148 }
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175 static struct ubifs_znode *lookup_znode(struct ubifs_info *c,
3176 union ubifs_key *key, int level,
3177 int lnum, int offs)
3178 {
3179 struct ubifs_znode *znode, *zn;
3180 int n, nn;
3181
3182 ubifs_assert(c, key_type(c, key) < UBIFS_INVALID_KEY);
3183
3184
3185
3186
3187
3188 if (level < 0)
3189 return ERR_PTR(-EINVAL);
3190
3191
3192 znode = c->zroot.znode;
3193 if (!znode) {
3194 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
3195 if (IS_ERR(znode))
3196 return znode;
3197 }
3198
3199 if (c->zroot.lnum == lnum && c->zroot.offs == offs)
3200 return znode;
3201
3202 if (level >= znode->level)
3203 return NULL;
3204 while (1) {
3205 ubifs_search_zbranch(c, znode, key, &n);
3206 if (n < 0) {
3207
3208
3209
3210
3211
3212
3213
3214
3215 znode = left_znode(c, znode);
3216 if (!znode)
3217 return NULL;
3218 if (IS_ERR(znode))
3219 return znode;
3220 ubifs_search_zbranch(c, znode, key, &n);
3221 ubifs_assert(c, n >= 0);
3222 }
3223 if (znode->level == level + 1)
3224 break;
3225 znode = get_znode(c, znode, n);
3226 if (IS_ERR(znode))
3227 return znode;
3228 }
3229
3230 if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs)
3231 return get_znode(c, znode, n);
3232
3233 if (!is_hash_key(c, key))
3234 return NULL;
3235
3236
3237
3238
3239 zn = znode;
3240 nn = n;
3241
3242 while (1) {
3243
3244 if (n)
3245 n -= 1;
3246 else {
3247 znode = left_znode(c, znode);
3248 if (!znode)
3249 break;
3250 if (IS_ERR(znode))
3251 return znode;
3252 n = znode->child_cnt - 1;
3253 }
3254
3255 if (znode->zbranch[n].lnum == lnum &&
3256 znode->zbranch[n].offs == offs)
3257 return get_znode(c, znode, n);
3258
3259 if (keys_cmp(c, &znode->zbranch[n].key, key) < 0)
3260 break;
3261 }
3262
3263 znode = zn;
3264 n = nn;
3265
3266 while (1) {
3267
3268 if (++n >= znode->child_cnt) {
3269 znode = right_znode(c, znode);
3270 if (!znode)
3271 break;
3272 if (IS_ERR(znode))
3273 return znode;
3274 n = 0;
3275 }
3276
3277 if (znode->zbranch[n].lnum == lnum &&
3278 znode->zbranch[n].offs == offs)
3279 return get_znode(c, znode, n);
3280
3281 if (keys_cmp(c, &znode->zbranch[n].key, key) > 0)
3282 break;
3283 }
3284 return NULL;
3285 }
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304 int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level,
3305 int lnum, int offs)
3306 {
3307 struct ubifs_znode *znode;
3308
3309 znode = lookup_znode(c, key, level, lnum, offs);
3310 if (!znode)
3311 return 0;
3312 if (IS_ERR(znode))
3313 return PTR_ERR(znode);
3314
3315 return ubifs_zn_dirty(znode) ? 1 : 2;
3316 }
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331 static int is_leaf_node_in_tnc(struct ubifs_info *c, union ubifs_key *key,
3332 int lnum, int offs)
3333 {
3334 struct ubifs_zbranch *zbr;
3335 struct ubifs_znode *znode, *zn;
3336 int n, found, err, nn;
3337 const int unique = !is_hash_key(c, key);
3338
3339 found = ubifs_lookup_level0(c, key, &znode, &n);
3340 if (found < 0)
3341 return found;
3342 if (!found)
3343 return 0;
3344 zbr = &znode->zbranch[n];
3345 if (lnum == zbr->lnum && offs == zbr->offs)
3346 return 1;
3347 if (unique)
3348 return 0;
3349
3350
3351
3352
3353 zn = znode;
3354 nn = n;
3355
3356 while (1) {
3357 err = tnc_prev(c, &znode, &n);
3358 if (err == -ENOENT)
3359 break;
3360 if (err)
3361 return err;
3362 if (keys_cmp(c, key, &znode->zbranch[n].key))
3363 break;
3364 zbr = &znode->zbranch[n];
3365 if (lnum == zbr->lnum && offs == zbr->offs)
3366 return 1;
3367 }
3368
3369 znode = zn;
3370 n = nn;
3371 while (1) {
3372 err = tnc_next(c, &znode, &n);
3373 if (err) {
3374 if (err == -ENOENT)
3375 return 0;
3376 return err;
3377 }
3378 if (keys_cmp(c, key, &znode->zbranch[n].key))
3379 break;
3380 zbr = &znode->zbranch[n];
3381 if (lnum == zbr->lnum && offs == zbr->offs)
3382 return 1;
3383 }
3384 return 0;
3385 }
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401 int ubifs_tnc_has_node(struct ubifs_info *c, union ubifs_key *key, int level,
3402 int lnum, int offs, int is_idx)
3403 {
3404 int err;
3405
3406 mutex_lock(&c->tnc_mutex);
3407 if (is_idx) {
3408 err = is_idx_node_in_tnc(c, key, level, lnum, offs);
3409 if (err < 0)
3410 goto out_unlock;
3411 if (err == 1)
3412
3413 err = 0;
3414 else if (err == 2)
3415
3416 err = 1;
3417 else
3418 BUG_ON(err != 0);
3419 } else
3420 err = is_leaf_node_in_tnc(c, key, lnum, offs);
3421
3422 out_unlock:
3423 mutex_unlock(&c->tnc_mutex);
3424 return err;
3425 }
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441 int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level,
3442 int lnum, int offs)
3443 {
3444 struct ubifs_znode *znode;
3445 int err = 0;
3446
3447 mutex_lock(&c->tnc_mutex);
3448 znode = lookup_znode(c, key, level, lnum, offs);
3449 if (!znode)
3450 goto out_unlock;
3451 if (IS_ERR(znode)) {
3452 err = PTR_ERR(znode);
3453 goto out_unlock;
3454 }
3455 znode = dirty_cow_bottom_up(c, znode);
3456 if (IS_ERR(znode)) {
3457 err = PTR_ERR(znode);
3458 goto out_unlock;
3459 }
3460
3461 out_unlock:
3462 mutex_unlock(&c->tnc_mutex);
3463 return err;
3464 }
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477 int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
3478 loff_t size)
3479 {
3480 int err, n;
3481 union ubifs_key from_key, to_key, *key;
3482 struct ubifs_znode *znode;
3483 unsigned int block;
3484
3485 if (!S_ISREG(inode->i_mode))
3486 return 0;
3487 if (!dbg_is_chk_gen(c))
3488 return 0;
3489
3490 block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
3491 data_key_init(c, &from_key, inode->i_ino, block);
3492 highest_data_key(c, &to_key, inode->i_ino);
3493
3494 mutex_lock(&c->tnc_mutex);
3495 err = ubifs_lookup_level0(c, &from_key, &znode, &n);
3496 if (err < 0)
3497 goto out_unlock;
3498
3499 if (err) {
3500 key = &from_key;
3501 goto out_dump;
3502 }
3503
3504 err = tnc_next(c, &znode, &n);
3505 if (err == -ENOENT) {
3506 err = 0;
3507 goto out_unlock;
3508 }
3509 if (err < 0)
3510 goto out_unlock;
3511
3512 ubifs_assert(c, err == 0);
3513 key = &znode->zbranch[n].key;
3514 if (!key_in_range(c, key, &from_key, &to_key))
3515 goto out_unlock;
3516
3517 out_dump:
3518 block = key_block(c, key);
3519 ubifs_err(c, "inode %lu has size %lld, but there are data at offset %lld",
3520 (unsigned long)inode->i_ino, size,
3521 ((loff_t)block) << UBIFS_BLOCK_SHIFT);
3522 mutex_unlock(&c->tnc_mutex);
3523 ubifs_dump_inode(c, inode);
3524 dump_stack();
3525 return -EINVAL;
3526
3527 out_unlock:
3528 mutex_unlock(&c->tnc_mutex);
3529 return err;
3530 }