Lines Matching refs:c

82 void ubifs_ro_mode(struct ubifs_info *c, int err)  in ubifs_ro_mode()  argument
84 if (!c->ro_error) { in ubifs_ro_mode()
85 c->ro_error = 1; in ubifs_ro_mode()
86 c->no_chk_data_crc = 0; in ubifs_ro_mode()
87 c->vfs_sb->s_flags |= MS_RDONLY; in ubifs_ro_mode()
88 ubifs_warn(c, "switched to read-only mode, error %d", err); in ubifs_ro_mode()
99 int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, in ubifs_leb_read() argument
104 err = ubi_read(c->ubi, lnum, buf, offs, len); in ubifs_leb_read()
110 ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d", in ubifs_leb_read()
117 int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, in ubifs_leb_write() argument
122 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_leb_write()
123 if (c->ro_error) in ubifs_leb_write()
125 if (!dbg_is_tst_rcvry(c)) in ubifs_leb_write()
126 err = ubi_leb_write(c->ubi, lnum, buf, offs, len); in ubifs_leb_write()
128 err = dbg_leb_write(c, lnum, buf, offs, len); in ubifs_leb_write()
130 ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d", in ubifs_leb_write()
132 ubifs_ro_mode(c, err); in ubifs_leb_write()
138 int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len) in ubifs_leb_change() argument
142 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_leb_change()
143 if (c->ro_error) in ubifs_leb_change()
145 if (!dbg_is_tst_rcvry(c)) in ubifs_leb_change()
146 err = ubi_leb_change(c->ubi, lnum, buf, len); in ubifs_leb_change()
148 err = dbg_leb_change(c, lnum, buf, len); in ubifs_leb_change()
150 ubifs_err(c, "changing %d bytes in LEB %d failed, error %d", in ubifs_leb_change()
152 ubifs_ro_mode(c, err); in ubifs_leb_change()
158 int ubifs_leb_unmap(struct ubifs_info *c, int lnum) in ubifs_leb_unmap() argument
162 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_leb_unmap()
163 if (c->ro_error) in ubifs_leb_unmap()
165 if (!dbg_is_tst_rcvry(c)) in ubifs_leb_unmap()
166 err = ubi_leb_unmap(c->ubi, lnum); in ubifs_leb_unmap()
168 err = dbg_leb_unmap(c, lnum); in ubifs_leb_unmap()
170 ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err); in ubifs_leb_unmap()
171 ubifs_ro_mode(c, err); in ubifs_leb_unmap()
177 int ubifs_leb_map(struct ubifs_info *c, int lnum) in ubifs_leb_map() argument
181 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_leb_map()
182 if (c->ro_error) in ubifs_leb_map()
184 if (!dbg_is_tst_rcvry(c)) in ubifs_leb_map()
185 err = ubi_leb_map(c->ubi, lnum); in ubifs_leb_map()
187 err = dbg_leb_map(c, lnum); in ubifs_leb_map()
189 ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err); in ubifs_leb_map()
190 ubifs_ro_mode(c, err); in ubifs_leb_map()
196 int ubifs_is_mapped(const struct ubifs_info *c, int lnum) in ubifs_is_mapped() argument
200 err = ubi_is_mapped(c->ubi, lnum); in ubifs_is_mapped()
202 ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d", in ubifs_is_mapped()
237 int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, in ubifs_check_node() argument
244 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); in ubifs_check_node()
245 ubifs_assert(!(offs & 7) && offs < c->leb_size); in ubifs_check_node()
250 ubifs_err(c, "bad magic %#08x, expected %#08x", in ubifs_check_node()
259 ubifs_err(c, "bad node type %d", type); in ubifs_check_node()
264 if (node_len + offs > c->leb_size) in ubifs_check_node()
267 if (c->ranges[type].max_len == 0) { in ubifs_check_node()
268 if (node_len != c->ranges[type].len) in ubifs_check_node()
270 } else if (node_len < c->ranges[type].min_len || in ubifs_check_node()
271 node_len > c->ranges[type].max_len) in ubifs_check_node()
274 if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting && in ubifs_check_node()
275 !c->remounting_rw && c->no_chk_data_crc) in ubifs_check_node()
282 ubifs_err(c, "bad CRC: calculated %#08x, read %#08x", in ubifs_check_node()
292 ubifs_err(c, "bad node length %d", node_len); in ubifs_check_node()
295 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); in ubifs_check_node()
296 ubifs_dump_node(c, buf); in ubifs_check_node()
318 void ubifs_pad(const struct ubifs_info *c, void *buf, int pad) in ubifs_pad() argument
348 static unsigned long long next_sqnum(struct ubifs_info *c) in next_sqnum() argument
352 spin_lock(&c->cnt_lock); in next_sqnum()
353 sqnum = ++c->max_sqnum; in next_sqnum()
354 spin_unlock(&c->cnt_lock); in next_sqnum()
358 ubifs_err(c, "sequence number overflow %llu, end of life", in next_sqnum()
360 ubifs_ro_mode(c, -EINVAL); in next_sqnum()
362 ubifs_warn(c, "running out of sequence numbers, end of life soon"); in next_sqnum()
379 void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad) in ubifs_prepare_node() argument
383 unsigned long long sqnum = next_sqnum(c); in ubifs_prepare_node()
397 pad = ALIGN(len, c->min_io_size) - len; in ubifs_prepare_node()
398 ubifs_pad(c, node + len, pad); in ubifs_prepare_node()
412 void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last) in ubifs_prep_grp_node() argument
416 unsigned long long sqnum = next_sqnum(c); in ubifs_prep_grp_node()
444 wbuf->c->need_wbuf_sync = 1; in wbuf_timer_callback_nolock()
445 ubifs_wake_up_bgt(wbuf->c); in wbuf_timer_callback_nolock()
495 struct ubifs_info *c = wbuf->c; in ubifs_wbuf_sync_nolock() local
506 ubifs_assert(wbuf->offs + wbuf->size <= c->leb_size); in ubifs_wbuf_sync_nolock()
507 ubifs_assert(wbuf->size >= c->min_io_size); in ubifs_wbuf_sync_nolock()
508 ubifs_assert(wbuf->size <= c->max_write_size); in ubifs_wbuf_sync_nolock()
509 ubifs_assert(wbuf->size % c->min_io_size == 0); in ubifs_wbuf_sync_nolock()
510 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_wbuf_sync_nolock()
511 if (c->leb_size - wbuf->offs >= c->max_write_size) in ubifs_wbuf_sync_nolock()
512 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); in ubifs_wbuf_sync_nolock()
514 if (c->ro_error) in ubifs_wbuf_sync_nolock()
521 sync_len = ALIGN(wbuf->used, c->min_io_size); in ubifs_wbuf_sync_nolock()
524 ubifs_pad(c, wbuf->buf + wbuf->used, dirt); in ubifs_wbuf_sync_nolock()
525 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len); in ubifs_wbuf_sync_nolock()
541 if (c->leb_size - wbuf->offs < c->max_write_size) in ubifs_wbuf_sync_nolock()
542 wbuf->size = c->leb_size - wbuf->offs; in ubifs_wbuf_sync_nolock()
543 else if (wbuf->offs & (c->max_write_size - 1)) in ubifs_wbuf_sync_nolock()
544 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs; in ubifs_wbuf_sync_nolock()
546 wbuf->size = c->max_write_size; in ubifs_wbuf_sync_nolock()
553 err = wbuf->sync_callback(c, wbuf->lnum, in ubifs_wbuf_sync_nolock()
554 c->leb_size - wbuf->offs, dirt); in ubifs_wbuf_sync_nolock()
570 const struct ubifs_info *c = wbuf->c; in ubifs_wbuf_seek_nolock() local
573 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt); in ubifs_wbuf_seek_nolock()
574 ubifs_assert(offs >= 0 && offs <= c->leb_size); in ubifs_wbuf_seek_nolock()
575 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); in ubifs_wbuf_seek_nolock()
582 if (c->leb_size - wbuf->offs < c->max_write_size) in ubifs_wbuf_seek_nolock()
583 wbuf->size = c->leb_size - wbuf->offs; in ubifs_wbuf_seek_nolock()
584 else if (wbuf->offs & (c->max_write_size - 1)) in ubifs_wbuf_seek_nolock()
585 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs; in ubifs_wbuf_seek_nolock()
587 wbuf->size = c->max_write_size; in ubifs_wbuf_seek_nolock()
603 int ubifs_bg_wbufs_sync(struct ubifs_info *c) in ubifs_bg_wbufs_sync() argument
607 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_bg_wbufs_sync()
608 if (!c->need_wbuf_sync) in ubifs_bg_wbufs_sync()
610 c->need_wbuf_sync = 0; in ubifs_bg_wbufs_sync()
612 if (c->ro_error) { in ubifs_bg_wbufs_sync()
618 for (i = 0; i < c->jhead_cnt; i++) { in ubifs_bg_wbufs_sync()
619 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; in ubifs_bg_wbufs_sync()
639 ubifs_err(c, "cannot sync write-buffer, error %d", err); in ubifs_bg_wbufs_sync()
640 ubifs_ro_mode(c, err); in ubifs_bg_wbufs_sync()
649 for (i = 0; i < c->jhead_cnt; i++) { in ubifs_bg_wbufs_sync()
650 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; in ubifs_bg_wbufs_sync()
677 struct ubifs_info *c = wbuf->c; in ubifs_wbuf_write_nolock() local
683 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); in ubifs_wbuf_write_nolock()
684 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); in ubifs_wbuf_write_nolock()
685 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size); in ubifs_wbuf_write_nolock()
687 ubifs_assert(wbuf->size >= c->min_io_size); in ubifs_wbuf_write_nolock()
688 ubifs_assert(wbuf->size <= c->max_write_size); in ubifs_wbuf_write_nolock()
689 ubifs_assert(wbuf->size % c->min_io_size == 0); in ubifs_wbuf_write_nolock()
691 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_wbuf_write_nolock()
692 ubifs_assert(!c->space_fixup); in ubifs_wbuf_write_nolock()
693 if (c->leb_size - wbuf->offs >= c->max_write_size) in ubifs_wbuf_write_nolock()
694 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); in ubifs_wbuf_write_nolock()
696 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) { in ubifs_wbuf_write_nolock()
703 if (c->ro_error) in ubifs_wbuf_write_nolock()
716 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, in ubifs_wbuf_write_nolock()
723 if (c->leb_size - wbuf->offs >= c->max_write_size) in ubifs_wbuf_write_nolock()
724 wbuf->size = c->max_write_size; in ubifs_wbuf_write_nolock()
726 wbuf->size = c->leb_size - wbuf->offs; in ubifs_wbuf_write_nolock()
752 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, in ubifs_wbuf_write_nolock()
761 } else if (wbuf->offs & (c->max_write_size - 1)) { in ubifs_wbuf_write_nolock()
771 err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs, in ubifs_wbuf_write_nolock()
788 n = aligned_len >> c->max_write_shift; in ubifs_wbuf_write_nolock()
790 n <<= c->max_write_shift; in ubifs_wbuf_write_nolock()
793 err = ubifs_leb_write(c, wbuf->lnum, buf + written, in ubifs_wbuf_write_nolock()
812 if (c->leb_size - wbuf->offs >= c->max_write_size) in ubifs_wbuf_write_nolock()
813 wbuf->size = c->max_write_size; in ubifs_wbuf_write_nolock()
815 wbuf->size = c->leb_size - wbuf->offs; in ubifs_wbuf_write_nolock()
823 int free = c->leb_size - wbuf->offs - wbuf->used; in ubifs_wbuf_write_nolock()
825 err = wbuf->sync_callback(c, wbuf->lnum, free, 0); in ubifs_wbuf_write_nolock()
836 ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d", in ubifs_wbuf_write_nolock()
838 ubifs_dump_node(c, buf); in ubifs_wbuf_write_nolock()
840 ubifs_dump_leb(c, wbuf->lnum); in ubifs_wbuf_write_nolock()
858 int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, in ubifs_write_node() argument
861 int err, buf_len = ALIGN(len, c->min_io_size); in ubifs_write_node()
866 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); in ubifs_write_node()
867 ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size); in ubifs_write_node()
868 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_write_node()
869 ubifs_assert(!c->space_fixup); in ubifs_write_node()
871 if (c->ro_error) in ubifs_write_node()
874 ubifs_prepare_node(c, buf, len, 1); in ubifs_write_node()
875 err = ubifs_leb_write(c, lnum, buf, offs, buf_len); in ubifs_write_node()
877 ubifs_dump_node(c, buf); in ubifs_write_node()
900 const struct ubifs_info *c = wbuf->c; in ubifs_read_node_wbuf() local
906 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); in ubifs_read_node_wbuf()
907 ubifs_assert(!(offs & 7) && offs < c->leb_size); in ubifs_read_node_wbuf()
915 return ubifs_read_node(c, buf, type, len, lnum, offs); in ubifs_read_node_wbuf()
929 err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0); in ubifs_read_node_wbuf()
935 ubifs_err(c, "bad node type (%d but expected %d)", in ubifs_read_node_wbuf()
940 err = ubifs_check_node(c, buf, lnum, offs, 0, 0); in ubifs_read_node_wbuf()
942 ubifs_err(c, "expected node type %d", type); in ubifs_read_node_wbuf()
948 ubifs_err(c, "bad node length %d, expected %d", rlen, len); in ubifs_read_node_wbuf()
955 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); in ubifs_read_node_wbuf()
956 ubifs_dump_node(c, buf); in ubifs_read_node_wbuf()
974 int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, in ubifs_read_node() argument
981 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); in ubifs_read_node()
982 ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size); in ubifs_read_node()
983 ubifs_assert(!(offs & 7) && offs < c->leb_size); in ubifs_read_node()
986 err = ubifs_leb_read(c, lnum, buf, offs, len, 0); in ubifs_read_node()
991 ubifs_errc(c, "bad node type (%d but expected %d)", in ubifs_read_node()
996 err = ubifs_check_node(c, buf, lnum, offs, 0, 0); in ubifs_read_node()
998 ubifs_errc(c, "expected node type %d", type); in ubifs_read_node()
1004 ubifs_errc(c, "bad node length %d, expected %d", l, len); in ubifs_read_node()
1011 ubifs_errc(c, "bad node at LEB %d:%d, LEB mapping status %d", lnum, in ubifs_read_node()
1012 offs, ubi_is_mapped(c->ubi, lnum)); in ubifs_read_node()
1013 if (!c->probing) { in ubifs_read_node()
1014 ubifs_dump_node(c, buf); in ubifs_read_node()
1028 int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) in ubifs_wbuf_init() argument
1032 wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL); in ubifs_wbuf_init()
1036 size = (c->max_write_size / UBIFS_CH_SZ + 1) * sizeof(ino_t); in ubifs_wbuf_init()
1052 size = c->max_write_size - (c->leb_start % c->max_write_size); in ubifs_wbuf_init()
1057 wbuf->c = c; in ubifs_wbuf_init()
1120 int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode) in ubifs_sync_wbufs_by_inode() argument
1124 for (i = 0; i < c->jhead_cnt; i++) { in ubifs_sync_wbufs_by_inode()
1125 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; in ubifs_sync_wbufs_by_inode()
1145 ubifs_ro_mode(c, err); in ubifs_sync_wbufs_by_inode()