This source file includes following definitions.
- ubifs_ro_mode
- ubifs_leb_read
- ubifs_leb_write
- ubifs_leb_change
- ubifs_leb_unmap
- ubifs_leb_map
- ubifs_is_mapped
- ubifs_check_node
- ubifs_pad
- next_sqnum
- ubifs_init_node
- ubifs_crc_node
- ubifs_prepare_node_hmac
- ubifs_prepare_node
- ubifs_prep_grp_node
- wbuf_timer_callback_nolock
- new_wbuf_timer_nolock
- cancel_wbuf_timer_nolock
- ubifs_wbuf_sync_nolock
- ubifs_wbuf_seek_nolock
- ubifs_bg_wbufs_sync
- ubifs_wbuf_write_nolock
- ubifs_write_node_hmac
- ubifs_write_node
- ubifs_read_node_wbuf
- ubifs_read_node
- ubifs_wbuf_init
- ubifs_wbuf_add_ino_nolock
- wbuf_has_ino
- ubifs_sync_wbufs_by_inode
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61 #include <linux/crc32.h>
62 #include <linux/slab.h>
63 #include "ubifs.h"
64
65
66
67
68
69
70 void ubifs_ro_mode(struct ubifs_info *c, int err)
71 {
72 if (!c->ro_error) {
73 c->ro_error = 1;
74 c->no_chk_data_crc = 0;
75 c->vfs_sb->s_flags |= SB_RDONLY;
76 ubifs_warn(c, "switched to read-only mode, error %d", err);
77 dump_stack();
78 }
79 }
80
81
82
83
84
85
86
87 int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
88 int len, int even_ebadmsg)
89 {
90 int err;
91
92 err = ubi_read(c->ubi, lnum, buf, offs, len);
93
94
95
96
97 if (err && (err != -EBADMSG || even_ebadmsg)) {
98 ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d",
99 len, lnum, offs, err);
100 dump_stack();
101 }
102 return err;
103 }
104
105 int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
106 int len)
107 {
108 int err;
109
110 ubifs_assert(c, !c->ro_media && !c->ro_mount);
111 if (c->ro_error)
112 return -EROFS;
113 if (!dbg_is_tst_rcvry(c))
114 err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
115 else
116 err = dbg_leb_write(c, lnum, buf, offs, len);
117 if (err) {
118 ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d",
119 len, lnum, offs, err);
120 ubifs_ro_mode(c, err);
121 dump_stack();
122 }
123 return err;
124 }
125
126 int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
127 {
128 int err;
129
130 ubifs_assert(c, !c->ro_media && !c->ro_mount);
131 if (c->ro_error)
132 return -EROFS;
133 if (!dbg_is_tst_rcvry(c))
134 err = ubi_leb_change(c->ubi, lnum, buf, len);
135 else
136 err = dbg_leb_change(c, lnum, buf, len);
137 if (err) {
138 ubifs_err(c, "changing %d bytes in LEB %d failed, error %d",
139 len, lnum, err);
140 ubifs_ro_mode(c, err);
141 dump_stack();
142 }
143 return err;
144 }
145
146 int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
147 {
148 int err;
149
150 ubifs_assert(c, !c->ro_media && !c->ro_mount);
151 if (c->ro_error)
152 return -EROFS;
153 if (!dbg_is_tst_rcvry(c))
154 err = ubi_leb_unmap(c->ubi, lnum);
155 else
156 err = dbg_leb_unmap(c, lnum);
157 if (err) {
158 ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err);
159 ubifs_ro_mode(c, err);
160 dump_stack();
161 }
162 return err;
163 }
164
165 int ubifs_leb_map(struct ubifs_info *c, int lnum)
166 {
167 int err;
168
169 ubifs_assert(c, !c->ro_media && !c->ro_mount);
170 if (c->ro_error)
171 return -EROFS;
172 if (!dbg_is_tst_rcvry(c))
173 err = ubi_leb_map(c->ubi, lnum);
174 else
175 err = dbg_leb_map(c, lnum);
176 if (err) {
177 ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err);
178 ubifs_ro_mode(c, err);
179 dump_stack();
180 }
181 return err;
182 }
183
184 int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
185 {
186 int err;
187
188 err = ubi_is_mapped(c->ubi, lnum);
189 if (err < 0) {
190 ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d",
191 lnum, err);
192 dump_stack();
193 }
194 return err;
195 }
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225 int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
226 int offs, int quiet, int must_chk_crc)
227 {
228 int err = -EINVAL, type, node_len;
229 uint32_t crc, node_crc, magic;
230 const struct ubifs_ch *ch = buf;
231
232 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
233 ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
234
235 magic = le32_to_cpu(ch->magic);
236 if (magic != UBIFS_NODE_MAGIC) {
237 if (!quiet)
238 ubifs_err(c, "bad magic %#08x, expected %#08x",
239 magic, UBIFS_NODE_MAGIC);
240 err = -EUCLEAN;
241 goto out;
242 }
243
244 type = ch->node_type;
245 if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
246 if (!quiet)
247 ubifs_err(c, "bad node type %d", type);
248 goto out;
249 }
250
251 node_len = le32_to_cpu(ch->len);
252 if (node_len + offs > c->leb_size)
253 goto out_len;
254
255 if (c->ranges[type].max_len == 0) {
256 if (node_len != c->ranges[type].len)
257 goto out_len;
258 } else if (node_len < c->ranges[type].min_len ||
259 node_len > c->ranges[type].max_len)
260 goto out_len;
261
262 if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting &&
263 !c->remounting_rw && c->no_chk_data_crc)
264 return 0;
265
266 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
267 node_crc = le32_to_cpu(ch->crc);
268 if (crc != node_crc) {
269 if (!quiet)
270 ubifs_err(c, "bad CRC: calculated %#08x, read %#08x",
271 crc, node_crc);
272 err = -EUCLEAN;
273 goto out;
274 }
275
276 return 0;
277
278 out_len:
279 if (!quiet)
280 ubifs_err(c, "bad node length %d", node_len);
281 out:
282 if (!quiet) {
283 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
284 ubifs_dump_node(c, buf);
285 dump_stack();
286 }
287 return err;
288 }
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306 void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
307 {
308 uint32_t crc;
309
310 ubifs_assert(c, pad >= 0 && !(pad & 7));
311
312 if (pad >= UBIFS_PAD_NODE_SZ) {
313 struct ubifs_ch *ch = buf;
314 struct ubifs_pad_node *pad_node = buf;
315
316 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
317 ch->node_type = UBIFS_PAD_NODE;
318 ch->group_type = UBIFS_NO_NODE_GROUP;
319 ch->padding[0] = ch->padding[1] = 0;
320 ch->sqnum = 0;
321 ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ);
322 pad -= UBIFS_PAD_NODE_SZ;
323 pad_node->pad_len = cpu_to_le32(pad);
324 crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8);
325 ch->crc = cpu_to_le32(crc);
326 memset(buf + UBIFS_PAD_NODE_SZ, 0, pad);
327 } else if (pad > 0)
328
329 memset(buf, UBIFS_PADDING_BYTE, pad);
330 }
331
332
333
334
335
336 static unsigned long long next_sqnum(struct ubifs_info *c)
337 {
338 unsigned long long sqnum;
339
340 spin_lock(&c->cnt_lock);
341 sqnum = ++c->max_sqnum;
342 spin_unlock(&c->cnt_lock);
343
344 if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) {
345 if (sqnum >= SQNUM_WATERMARK) {
346 ubifs_err(c, "sequence number overflow %llu, end of life",
347 sqnum);
348 ubifs_ro_mode(c, -EINVAL);
349 }
350 ubifs_warn(c, "running out of sequence numbers, end of life soon");
351 }
352
353 return sqnum;
354 }
355
356 void ubifs_init_node(struct ubifs_info *c, void *node, int len, int pad)
357 {
358 struct ubifs_ch *ch = node;
359 unsigned long long sqnum = next_sqnum(c);
360
361 ubifs_assert(c, len >= UBIFS_CH_SZ);
362
363 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
364 ch->len = cpu_to_le32(len);
365 ch->group_type = UBIFS_NO_NODE_GROUP;
366 ch->sqnum = cpu_to_le64(sqnum);
367 ch->padding[0] = ch->padding[1] = 0;
368
369 if (pad) {
370 len = ALIGN(len, 8);
371 pad = ALIGN(len, c->min_io_size) - len;
372 ubifs_pad(c, node + len, pad);
373 }
374 }
375
376 void ubifs_crc_node(struct ubifs_info *c, void *node, int len)
377 {
378 struct ubifs_ch *ch = node;
379 uint32_t crc;
380
381 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
382 ch->crc = cpu_to_le32(crc);
383 }
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400 int ubifs_prepare_node_hmac(struct ubifs_info *c, void *node, int len,
401 int hmac_offs, int pad)
402 {
403 int err;
404
405 ubifs_init_node(c, node, len, pad);
406
407 if (hmac_offs > 0) {
408 err = ubifs_node_insert_hmac(c, node, len, hmac_offs);
409 if (err)
410 return err;
411 }
412
413 ubifs_crc_node(c, node, len);
414
415 return 0;
416 }
417
418
419
420
421
422
423
424
425
426
427
428
429 void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
430 {
431
432
433
434
435 ubifs_prepare_node_hmac(c, node, len, 0, pad);
436 }
437
438
439
440
441
442
443
444
445
446
447
448 void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
449 {
450 uint32_t crc;
451 struct ubifs_ch *ch = node;
452 unsigned long long sqnum = next_sqnum(c);
453
454 ubifs_assert(c, len >= UBIFS_CH_SZ);
455
456 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
457 ch->len = cpu_to_le32(len);
458 if (last)
459 ch->group_type = UBIFS_LAST_OF_NODE_GROUP;
460 else
461 ch->group_type = UBIFS_IN_NODE_GROUP;
462 ch->sqnum = cpu_to_le64(sqnum);
463 ch->padding[0] = ch->padding[1] = 0;
464 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
465 ch->crc = cpu_to_le32(crc);
466 }
467
468
469
470
471
472
473
474 static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
475 {
476 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
477
478 dbg_io("jhead %s", dbg_jhead(wbuf->jhead));
479 wbuf->need_sync = 1;
480 wbuf->c->need_wbuf_sync = 1;
481 ubifs_wake_up_bgt(wbuf->c);
482 return HRTIMER_NORESTART;
483 }
484
485
486
487
488
489
490 static void new_wbuf_timer_nolock(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
491 {
492 ktime_t softlimit = ms_to_ktime(dirty_writeback_interval * 10);
493 unsigned long long delta = dirty_writeback_interval;
494
495
496 delta *= 10ULL * NSEC_PER_MSEC / 10ULL;
497
498 ubifs_assert(c, !hrtimer_active(&wbuf->timer));
499 ubifs_assert(c, delta <= ULONG_MAX);
500
501 if (wbuf->no_timer)
502 return;
503 dbg_io("set timer for jhead %s, %llu-%llu millisecs",
504 dbg_jhead(wbuf->jhead),
505 div_u64(ktime_to_ns(softlimit), USEC_PER_SEC),
506 div_u64(ktime_to_ns(softlimit) + delta, USEC_PER_SEC));
507 hrtimer_start_range_ns(&wbuf->timer, softlimit, delta,
508 HRTIMER_MODE_REL);
509 }
510
511
512
513
514
515 static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
516 {
517 if (wbuf->no_timer)
518 return;
519 wbuf->need_sync = 0;
520 hrtimer_cancel(&wbuf->timer);
521 }
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536 int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
537 {
538 struct ubifs_info *c = wbuf->c;
539 int err, dirt, sync_len;
540
541 cancel_wbuf_timer_nolock(wbuf);
542 if (!wbuf->used || wbuf->lnum == -1)
543
544 return 0;
545
546 dbg_io("LEB %d:%d, %d bytes, jhead %s",
547 wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
548 ubifs_assert(c, !(wbuf->avail & 7));
549 ubifs_assert(c, wbuf->offs + wbuf->size <= c->leb_size);
550 ubifs_assert(c, wbuf->size >= c->min_io_size);
551 ubifs_assert(c, wbuf->size <= c->max_write_size);
552 ubifs_assert(c, wbuf->size % c->min_io_size == 0);
553 ubifs_assert(c, !c->ro_media && !c->ro_mount);
554 if (c->leb_size - wbuf->offs >= c->max_write_size)
555 ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size));
556
557 if (c->ro_error)
558 return -EROFS;
559
560
561
562
563
564 sync_len = ALIGN(wbuf->used, c->min_io_size);
565 dirt = sync_len - wbuf->used;
566 if (dirt)
567 ubifs_pad(c, wbuf->buf + wbuf->used, dirt);
568 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len);
569 if (err)
570 return err;
571
572 spin_lock(&wbuf->lock);
573 wbuf->offs += sync_len;
574
575
576
577
578
579
580
581
582
583
584 if (c->leb_size - wbuf->offs < c->max_write_size)
585 wbuf->size = c->leb_size - wbuf->offs;
586 else if (wbuf->offs & (c->max_write_size - 1))
587 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
588 else
589 wbuf->size = c->max_write_size;
590 wbuf->avail = wbuf->size;
591 wbuf->used = 0;
592 wbuf->next_ino = 0;
593 spin_unlock(&wbuf->lock);
594
595 if (wbuf->sync_callback)
596 err = wbuf->sync_callback(c, wbuf->lnum,
597 c->leb_size - wbuf->offs, dirt);
598 return err;
599 }
600
601
602
603
604
605
606
607
608
609
610
611 int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs)
612 {
613 const struct ubifs_info *c = wbuf->c;
614
615 dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead));
616 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt);
617 ubifs_assert(c, offs >= 0 && offs <= c->leb_size);
618 ubifs_assert(c, offs % c->min_io_size == 0 && !(offs & 7));
619 ubifs_assert(c, lnum != wbuf->lnum);
620 ubifs_assert(c, wbuf->used == 0);
621
622 spin_lock(&wbuf->lock);
623 wbuf->lnum = lnum;
624 wbuf->offs = offs;
625 if (c->leb_size - wbuf->offs < c->max_write_size)
626 wbuf->size = c->leb_size - wbuf->offs;
627 else if (wbuf->offs & (c->max_write_size - 1))
628 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
629 else
630 wbuf->size = c->max_write_size;
631 wbuf->avail = wbuf->size;
632 wbuf->used = 0;
633 spin_unlock(&wbuf->lock);
634
635 return 0;
636 }
637
638
639
640
641
642
643
644
645
646 int ubifs_bg_wbufs_sync(struct ubifs_info *c)
647 {
648 int err, i;
649
650 ubifs_assert(c, !c->ro_media && !c->ro_mount);
651 if (!c->need_wbuf_sync)
652 return 0;
653 c->need_wbuf_sync = 0;
654
655 if (c->ro_error) {
656 err = -EROFS;
657 goto out_timers;
658 }
659
660 dbg_io("synchronize");
661 for (i = 0; i < c->jhead_cnt; i++) {
662 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
663
664 cond_resched();
665
666
667
668
669
670 if (mutex_is_locked(&wbuf->io_mutex))
671 continue;
672
673 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
674 if (!wbuf->need_sync) {
675 mutex_unlock(&wbuf->io_mutex);
676 continue;
677 }
678
679 err = ubifs_wbuf_sync_nolock(wbuf);
680 mutex_unlock(&wbuf->io_mutex);
681 if (err) {
682 ubifs_err(c, "cannot sync write-buffer, error %d", err);
683 ubifs_ro_mode(c, err);
684 goto out_timers;
685 }
686 }
687
688 return 0;
689
690 out_timers:
691
692 for (i = 0; i < c->jhead_cnt; i++) {
693 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
694
695 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
696 cancel_wbuf_timer_nolock(wbuf);
697 mutex_unlock(&wbuf->io_mutex);
698 }
699 return err;
700 }
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718 int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
719 {
720 struct ubifs_info *c = wbuf->c;
721 int err, written, n, aligned_len = ALIGN(len, 8);
722
723 dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
724 dbg_ntype(((struct ubifs_ch *)buf)->node_type),
725 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
726 ubifs_assert(c, len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
727 ubifs_assert(c, wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
728 ubifs_assert(c, !(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
729 ubifs_assert(c, wbuf->avail > 0 && wbuf->avail <= wbuf->size);
730 ubifs_assert(c, wbuf->size >= c->min_io_size);
731 ubifs_assert(c, wbuf->size <= c->max_write_size);
732 ubifs_assert(c, wbuf->size % c->min_io_size == 0);
733 ubifs_assert(c, mutex_is_locked(&wbuf->io_mutex));
734 ubifs_assert(c, !c->ro_media && !c->ro_mount);
735 ubifs_assert(c, !c->space_fixup);
736 if (c->leb_size - wbuf->offs >= c->max_write_size)
737 ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size));
738
739 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
740 err = -ENOSPC;
741 goto out;
742 }
743
744 cancel_wbuf_timer_nolock(wbuf);
745
746 if (c->ro_error)
747 return -EROFS;
748
749 if (aligned_len <= wbuf->avail) {
750
751
752
753
754 memcpy(wbuf->buf + wbuf->used, buf, len);
755
756 if (aligned_len == wbuf->avail) {
757 dbg_io("flush jhead %s wbuf to LEB %d:%d",
758 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
759 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf,
760 wbuf->offs, wbuf->size);
761 if (err)
762 goto out;
763
764 spin_lock(&wbuf->lock);
765 wbuf->offs += wbuf->size;
766 if (c->leb_size - wbuf->offs >= c->max_write_size)
767 wbuf->size = c->max_write_size;
768 else
769 wbuf->size = c->leb_size - wbuf->offs;
770 wbuf->avail = wbuf->size;
771 wbuf->used = 0;
772 wbuf->next_ino = 0;
773 spin_unlock(&wbuf->lock);
774 } else {
775 spin_lock(&wbuf->lock);
776 wbuf->avail -= aligned_len;
777 wbuf->used += aligned_len;
778 spin_unlock(&wbuf->lock);
779 }
780
781 goto exit;
782 }
783
784 written = 0;
785
786 if (wbuf->used) {
787
788
789
790
791
792 dbg_io("flush jhead %s wbuf to LEB %d:%d",
793 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
794 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
795 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs,
796 wbuf->size);
797 if (err)
798 goto out;
799
800 wbuf->offs += wbuf->size;
801 len -= wbuf->avail;
802 aligned_len -= wbuf->avail;
803 written += wbuf->avail;
804 } else if (wbuf->offs & (c->max_write_size - 1)) {
805
806
807
808
809
810
811
812 dbg_io("write %d bytes to LEB %d:%d",
813 wbuf->size, wbuf->lnum, wbuf->offs);
814 err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs,
815 wbuf->size);
816 if (err)
817 goto out;
818
819 wbuf->offs += wbuf->size;
820 len -= wbuf->size;
821 aligned_len -= wbuf->size;
822 written += wbuf->size;
823 }
824
825
826
827
828
829
830
831 n = aligned_len >> c->max_write_shift;
832 if (n) {
833 n <<= c->max_write_shift;
834 dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
835 wbuf->offs);
836 err = ubifs_leb_write(c, wbuf->lnum, buf + written,
837 wbuf->offs, n);
838 if (err)
839 goto out;
840 wbuf->offs += n;
841 aligned_len -= n;
842 len -= n;
843 written += n;
844 }
845
846 spin_lock(&wbuf->lock);
847 if (aligned_len)
848
849
850
851
852
853 memcpy(wbuf->buf, buf + written, len);
854
855 if (c->leb_size - wbuf->offs >= c->max_write_size)
856 wbuf->size = c->max_write_size;
857 else
858 wbuf->size = c->leb_size - wbuf->offs;
859 wbuf->avail = wbuf->size - aligned_len;
860 wbuf->used = aligned_len;
861 wbuf->next_ino = 0;
862 spin_unlock(&wbuf->lock);
863
864 exit:
865 if (wbuf->sync_callback) {
866 int free = c->leb_size - wbuf->offs - wbuf->used;
867
868 err = wbuf->sync_callback(c, wbuf->lnum, free, 0);
869 if (err)
870 goto out;
871 }
872
873 if (wbuf->used)
874 new_wbuf_timer_nolock(c, wbuf);
875
876 return 0;
877
878 out:
879 ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d",
880 len, wbuf->lnum, wbuf->offs, err);
881 ubifs_dump_node(c, buf);
882 dump_stack();
883 ubifs_dump_leb(c, wbuf->lnum);
884 return err;
885 }
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902 int ubifs_write_node_hmac(struct ubifs_info *c, void *buf, int len, int lnum,
903 int offs, int hmac_offs)
904 {
905 int err, buf_len = ALIGN(len, c->min_io_size);
906
907 dbg_io("LEB %d:%d, %s, length %d (aligned %d)",
908 lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len,
909 buf_len);
910 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
911 ubifs_assert(c, offs % c->min_io_size == 0 && offs < c->leb_size);
912 ubifs_assert(c, !c->ro_media && !c->ro_mount);
913 ubifs_assert(c, !c->space_fixup);
914
915 if (c->ro_error)
916 return -EROFS;
917
918 err = ubifs_prepare_node_hmac(c, buf, len, hmac_offs, 1);
919 if (err)
920 return err;
921
922 err = ubifs_leb_write(c, lnum, buf, offs, buf_len);
923 if (err)
924 ubifs_dump_node(c, buf);
925
926 return err;
927 }
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943 int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
944 int offs)
945 {
946 return ubifs_write_node_hmac(c, buf, len, lnum, offs, -1);
947 }
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964 int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
965 int lnum, int offs)
966 {
967 const struct ubifs_info *c = wbuf->c;
968 int err, rlen, overlap;
969 struct ubifs_ch *ch = buf;
970
971 dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
972 dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
973 ubifs_assert(c, wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
974 ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
975 ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT);
976
977 spin_lock(&wbuf->lock);
978 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
979 if (!overlap) {
980
981 spin_unlock(&wbuf->lock);
982 return ubifs_read_node(c, buf, type, len, lnum, offs);
983 }
984
985
986 rlen = wbuf->offs - offs;
987 if (rlen < 0)
988 rlen = 0;
989
990
991 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
992 spin_unlock(&wbuf->lock);
993
994 if (rlen > 0) {
995
996 err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
997 if (err && err != -EBADMSG)
998 return err;
999 }
1000
1001 if (type != ch->node_type) {
1002 ubifs_err(c, "bad node type (%d but expected %d)",
1003 ch->node_type, type);
1004 goto out;
1005 }
1006
1007 err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
1008 if (err) {
1009 ubifs_err(c, "expected node type %d", type);
1010 return err;
1011 }
1012
1013 rlen = le32_to_cpu(ch->len);
1014 if (rlen != len) {
1015 ubifs_err(c, "bad node length %d, expected %d", rlen, len);
1016 goto out;
1017 }
1018
1019 return 0;
1020
1021 out:
1022 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
1023 ubifs_dump_node(c, buf);
1024 dump_stack();
1025 return -EINVAL;
1026 }
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041 int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
1042 int lnum, int offs)
1043 {
1044 int err, l;
1045 struct ubifs_ch *ch = buf;
1046
1047 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
1048 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1049 ubifs_assert(c, len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
1050 ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
1051 ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT);
1052
1053 err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
1054 if (err && err != -EBADMSG)
1055 return err;
1056
1057 if (type != ch->node_type) {
1058 ubifs_errc(c, "bad node type (%d but expected %d)",
1059 ch->node_type, type);
1060 goto out;
1061 }
1062
1063 err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
1064 if (err) {
1065 ubifs_errc(c, "expected node type %d", type);
1066 return err;
1067 }
1068
1069 l = le32_to_cpu(ch->len);
1070 if (l != len) {
1071 ubifs_errc(c, "bad node length %d, expected %d", l, len);
1072 goto out;
1073 }
1074
1075 return 0;
1076
1077 out:
1078 ubifs_errc(c, "bad node at LEB %d:%d, LEB mapping status %d", lnum,
1079 offs, ubi_is_mapped(c->ubi, lnum));
1080 if (!c->probing) {
1081 ubifs_dump_node(c, buf);
1082 dump_stack();
1083 }
1084 return -EINVAL;
1085 }
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095 int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
1096 {
1097 size_t size;
1098
1099 wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL);
1100 if (!wbuf->buf)
1101 return -ENOMEM;
1102
1103 size = (c->max_write_size / UBIFS_CH_SZ + 1) * sizeof(ino_t);
1104 wbuf->inodes = kmalloc(size, GFP_KERNEL);
1105 if (!wbuf->inodes) {
1106 kfree(wbuf->buf);
1107 wbuf->buf = NULL;
1108 return -ENOMEM;
1109 }
1110
1111 wbuf->used = 0;
1112 wbuf->lnum = wbuf->offs = -1;
1113
1114
1115
1116
1117
1118
1119 size = c->max_write_size - (c->leb_start % c->max_write_size);
1120 wbuf->avail = wbuf->size = size;
1121 wbuf->sync_callback = NULL;
1122 mutex_init(&wbuf->io_mutex);
1123 spin_lock_init(&wbuf->lock);
1124 wbuf->c = c;
1125 wbuf->next_ino = 0;
1126
1127 hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1128 wbuf->timer.function = wbuf_timer_callback_nolock;
1129 return 0;
1130 }
1131
1132
1133
1134
1135
1136
1137
1138
1139 void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum)
1140 {
1141 if (!wbuf->buf)
1142
1143 return;
1144
1145 spin_lock(&wbuf->lock);
1146 if (wbuf->used)
1147 wbuf->inodes[wbuf->next_ino++] = inum;
1148 spin_unlock(&wbuf->lock);
1149 }
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159 static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum)
1160 {
1161 int i, ret = 0;
1162
1163 spin_lock(&wbuf->lock);
1164 for (i = 0; i < wbuf->next_ino; i++)
1165 if (inum == wbuf->inodes[i]) {
1166 ret = 1;
1167 break;
1168 }
1169 spin_unlock(&wbuf->lock);
1170
1171 return ret;
1172 }
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183 int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode)
1184 {
1185 int i, err = 0;
1186
1187 for (i = 0; i < c->jhead_cnt; i++) {
1188 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
1189
1190 if (i == GCHD)
1191
1192
1193
1194
1195
1196
1197 continue;
1198
1199 if (!wbuf_has_ino(wbuf, inode->i_ino))
1200 continue;
1201
1202 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
1203 if (wbuf_has_ino(wbuf, inode->i_ino))
1204 err = ubifs_wbuf_sync_nolock(wbuf);
1205 mutex_unlock(&wbuf->io_mutex);
1206
1207 if (err) {
1208 ubifs_ro_mode(c, err);
1209 return err;
1210 }
1211 }
1212 return 0;
1213 }