This source file includes following definitions.
- jffs2_wbuf_pending_for_ino
- jffs2_clear_wbuf_ino_list
- jffs2_wbuf_dirties_inode
- jffs2_refile_wbuf_blocks
- jffs2_block_refile
- jffs2_incore_replace_raw
- jffs2_verify_write
- jffs2_wbuf_recover
- __jffs2_flush_wbuf
- jffs2_flush_wbuf_gc
- jffs2_flush_wbuf_pad
- jffs2_fill_wbuf
- jffs2_flash_writev
- jffs2_flash_write
- jffs2_flash_read
- jffs2_check_oob_empty
- jffs2_check_nand_cleanmarker
- jffs2_write_nand_cleanmarker
- jffs2_write_nand_badblock
- work_to_sb
- delayed_wbuf_sync
- jffs2_dirty_trigger
- jffs2_nand_flash_setup
- jffs2_nand_flash_cleanup
- jffs2_dataflash_setup
- jffs2_dataflash_cleanup
- jffs2_nor_wbuf_flash_setup
- jffs2_nor_wbuf_flash_cleanup
- jffs2_ubivol_setup
- jffs2_ubivol_cleanup
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/crc32.h>
20 #include <linux/mtd/rawnand.h>
21 #include <linux/jiffies.h>
22 #include <linux/sched.h>
23 #include <linux/writeback.h>
24
25 #include "nodelist.h"
26
27
28 #undef BREAKME
29 #undef BREAKMEHEADER
30
31 #ifdef BREAKME
32 static unsigned char *brokenbuf;
33 #endif
34
35 #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
36 #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
37
38
39 #define MAX_ERASE_FAILURES 2
40
41 struct jffs2_inodirty {
42 uint32_t ino;
43 struct jffs2_inodirty *next;
44 };
45
46 static struct jffs2_inodirty inodirty_nomem;
47
48 static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
49 {
50 struct jffs2_inodirty *this = c->wbuf_inodes;
51
52
53 if (this == &inodirty_nomem)
54 return 1;
55
56
57 if (this && !ino)
58 return 1;
59
60
61 while (this) {
62 if (this->ino == ino)
63 return 1;
64 this = this->next;
65 }
66 return 0;
67 }
68
69 static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
70 {
71 struct jffs2_inodirty *this;
72
73 this = c->wbuf_inodes;
74
75 if (this != &inodirty_nomem) {
76 while (this) {
77 struct jffs2_inodirty *next = this->next;
78 kfree(this);
79 this = next;
80 }
81 }
82 c->wbuf_inodes = NULL;
83 }
84
85 static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
86 {
87 struct jffs2_inodirty *new;
88
89
90 jffs2_dirty_trigger(c);
91
92 if (jffs2_wbuf_pending_for_ino(c, ino))
93 return;
94
95 new = kmalloc(sizeof(*new), GFP_KERNEL);
96 if (!new) {
97 jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
98 jffs2_clear_wbuf_ino_list(c);
99 c->wbuf_inodes = &inodirty_nomem;
100 return;
101 }
102 new->ino = ino;
103 new->next = c->wbuf_inodes;
104 c->wbuf_inodes = new;
105 return;
106 }
107
108 static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
109 {
110 struct list_head *this, *next;
111 static int n;
112
113 if (list_empty(&c->erasable_pending_wbuf_list))
114 return;
115
116 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
117 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
118
119 jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
120 jeb->offset);
121 list_del(this);
122 if ((jiffies + (n++)) & 127) {
123
124
125 jffs2_dbg(1, "...and adding to erase_pending_list\n");
126 list_add_tail(&jeb->list, &c->erase_pending_list);
127 c->nr_erasing_blocks++;
128 jffs2_garbage_collect_trigger(c);
129 } else {
130
131
132 jffs2_dbg(1, "...and adding to erasable_list\n");
133 list_add_tail(&jeb->list, &c->erasable_list);
134 }
135 }
136 }
137
138 #define REFILE_NOTEMPTY 0
139 #define REFILE_ANYWAY 1
140
141 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
142 {
143 jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
144
145
146 if (c->nextblock == jeb)
147 c->nextblock = NULL;
148 else
149 list_del(&jeb->list);
150 if (jeb->first_node) {
151 jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
152 jeb->offset);
153 list_add(&jeb->list, &c->bad_used_list);
154 } else {
155 BUG_ON(allow_empty == REFILE_NOTEMPTY);
156
157 jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
158 jeb->offset);
159 list_add(&jeb->list, &c->erase_pending_list);
160 c->nr_erasing_blocks++;
161 jffs2_garbage_collect_trigger(c);
162 }
163
164 if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
165 uint32_t oldfree = jeb->free_size;
166
167 jffs2_link_node_ref(c, jeb,
168 (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
169 oldfree, NULL);
170
171 c->wasted_size += oldfree;
172 jeb->wasted_size += oldfree;
173 c->dirty_size -= oldfree;
174 jeb->dirty_size -= oldfree;
175 }
176
177 jffs2_dbg_dump_block_lists_nolock(c);
178 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
179 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
180 }
181
182 static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
183 struct jffs2_inode_info *f,
184 struct jffs2_raw_node_ref *raw,
185 union jffs2_node_union *node)
186 {
187 struct jffs2_node_frag *frag;
188 struct jffs2_full_dirent *fd;
189
190 dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
191 node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
192
193 BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
194 je16_to_cpu(node->u.magic) != 0);
195
196 switch (je16_to_cpu(node->u.nodetype)) {
197 case JFFS2_NODETYPE_INODE:
198 if (f->metadata && f->metadata->raw == raw) {
199 dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
200 return &f->metadata->raw;
201 }
202 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
203 BUG_ON(!frag);
204
205 while (!frag->node || frag->node->raw != raw) {
206 frag = frag_next(frag);
207 BUG_ON(!frag);
208 }
209 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
210 return &frag->node->raw;
211
212 case JFFS2_NODETYPE_DIRENT:
213 for (fd = f->dents; fd; fd = fd->next) {
214 if (fd->raw == raw) {
215 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
216 return &fd->raw;
217 }
218 }
219 BUG();
220
221 default:
222 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
223 je16_to_cpu(node->u.nodetype));
224 break;
225 }
226 return NULL;
227 }
228
229 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
230 static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
231 uint32_t ofs)
232 {
233 int ret;
234 size_t retlen;
235 char *eccstr;
236
237 ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
238 if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
239 pr_warn("%s(): Read back of page at %08x failed: %d\n",
240 __func__, c->wbuf_ofs, ret);
241 return ret;
242 } else if (retlen != c->wbuf_pagesize) {
243 pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n",
244 __func__, ofs, retlen, c->wbuf_pagesize);
245 return -EIO;
246 }
247 if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
248 return 0;
249
250 if (ret == -EUCLEAN)
251 eccstr = "corrected";
252 else if (ret == -EBADMSG)
253 eccstr = "correction failed";
254 else
255 eccstr = "OK or unused";
256
257 pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n",
258 eccstr, c->wbuf_ofs);
259 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
260 c->wbuf, c->wbuf_pagesize, 0);
261
262 pr_warn("Read back:\n");
263 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
264 c->wbuf_verify, c->wbuf_pagesize, 0);
265
266 return -EIO;
267 }
268 #else
269 #define jffs2_verify_write(c,b,o) (0)
270 #endif
271
272
273
274
275 static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
276 {
277 struct jffs2_eraseblock *jeb, *new_jeb;
278 struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
279 size_t retlen;
280 int ret;
281 int nr_refile = 0;
282 unsigned char *buf;
283 uint32_t start, end, ofs, len;
284
285 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
286
287 spin_lock(&c->erase_completion_lock);
288 if (c->wbuf_ofs % c->mtd->erasesize)
289 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
290 else
291 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
292 spin_unlock(&c->erase_completion_lock);
293
294 BUG_ON(!ref_obsolete(jeb->last_node));
295
296
297
298 for (next = raw = jeb->first_node; next; raw = next) {
299 next = ref_next(raw);
300
301 if (ref_obsolete(raw) ||
302 (next && ref_offset(next) <= c->wbuf_ofs)) {
303 dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
304 ref_offset(raw), ref_flags(raw),
305 (ref_offset(raw) + ref_totlen(c, jeb, raw)),
306 c->wbuf_ofs);
307 continue;
308 }
309 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
310 ref_offset(raw), ref_flags(raw),
311 (ref_offset(raw) + ref_totlen(c, jeb, raw)));
312
313 first_raw = raw;
314 break;
315 }
316
317 if (!first_raw) {
318
319 jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
320 c->wbuf_len = 0;
321 return;
322 }
323
324 start = ref_offset(first_raw);
325 end = ref_offset(jeb->last_node);
326 nr_refile = 1;
327
328
329 while ((raw = ref_next(raw)) != jeb->last_node)
330 nr_refile++;
331
332 dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
333 start, end, end - start, nr_refile);
334
335 buf = NULL;
336 if (start < c->wbuf_ofs) {
337
338
339
340 buf = kmalloc(end - start, GFP_KERNEL);
341 if (!buf) {
342 pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n");
343
344 goto read_failed;
345 }
346
347
348 ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen,
349 buf);
350
351
352 if ((ret == -EUCLEAN || ret == -EBADMSG) &&
353 (retlen == c->wbuf_ofs - start))
354 ret = 0;
355
356 if (ret || retlen != c->wbuf_ofs - start) {
357 pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n");
358
359 kfree(buf);
360 buf = NULL;
361 read_failed:
362 first_raw = ref_next(first_raw);
363 nr_refile--;
364 while (first_raw && ref_obsolete(first_raw)) {
365 first_raw = ref_next(first_raw);
366 nr_refile--;
367 }
368
369
370 if (!first_raw) {
371 c->wbuf_len = 0;
372 return;
373 }
374
375
376 start = ref_offset(first_raw);
377 dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
378 start, end, end - start, nr_refile);
379
380 } else {
381
382 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
383 }
384 }
385
386
387
388
389 ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
390 if (ret) {
391 pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n");
392 kfree(buf);
393 return;
394 }
395
396
397 jffs2_sum_disable_collecting(c->summary);
398
399 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
400 if (ret) {
401 pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
402 kfree(buf);
403 return;
404 }
405
406 ofs = write_ofs(c);
407
408 if (end-start >= c->wbuf_pagesize) {
409
410
411
412
413
414 unsigned char *rewrite_buf = buf?:c->wbuf;
415 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
416
417 jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
418 towrite, ofs);
419
420 #ifdef BREAKMEHEADER
421 static int breakme;
422 if (breakme++ == 20) {
423 pr_notice("Faking write error at 0x%08x\n", ofs);
424 breakme = 0;
425 mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
426 ret = -EIO;
427 } else
428 #endif
429 ret = mtd_write(c->mtd, ofs, towrite, &retlen,
430 rewrite_buf);
431
432 if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
433
434 pr_crit("Recovery of wbuf failed due to a second write error\n");
435 kfree(buf);
436
437 if (retlen)
438 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
439
440 return;
441 }
442 pr_notice("Recovery of wbuf succeeded to %08x\n", ofs);
443
444 c->wbuf_len = (end - start) - towrite;
445 c->wbuf_ofs = ofs + towrite;
446 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
447
448 } else {
449
450 if (buf) {
451 memcpy(c->wbuf, buf, end-start);
452 } else {
453 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
454 }
455 c->wbuf_ofs = ofs;
456 c->wbuf_len = end - start;
457 }
458
459
460 new_jeb = &c->blocks[ofs / c->sector_size];
461
462 spin_lock(&c->erase_completion_lock);
463 for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
464 uint32_t rawlen = ref_totlen(c, jeb, raw);
465 struct jffs2_inode_cache *ic;
466 struct jffs2_raw_node_ref *new_ref;
467 struct jffs2_raw_node_ref **adjust_ref = NULL;
468 struct jffs2_inode_info *f = NULL;
469
470 jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
471 rawlen, ref_offset(raw), ref_flags(raw), ofs);
472
473 ic = jffs2_raw_ref_to_ic(raw);
474
475
476 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
477 struct jffs2_xattr_datum *xd = (void *)ic;
478 BUG_ON(xd->node != raw);
479 adjust_ref = &xd->node;
480 raw->next_in_ino = NULL;
481 ic = NULL;
482 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
483 struct jffs2_xattr_datum *xr = (void *)ic;
484 BUG_ON(xr->node != raw);
485 adjust_ref = &xr->node;
486 raw->next_in_ino = NULL;
487 ic = NULL;
488 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
489 struct jffs2_raw_node_ref **p = &ic->nodes;
490
491
492 while (*p && *p != (void *)ic) {
493 if (*p == raw) {
494 (*p) = (raw->next_in_ino);
495 raw->next_in_ino = NULL;
496 break;
497 }
498 p = &((*p)->next_in_ino);
499 }
500
501 if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
502
503
504
505 f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink);
506 if (IS_ERR(f)) {
507
508 JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
509 ic->ino, PTR_ERR(f));
510 BUG();
511 }
512
513
514
515
516
517 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
518 (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
519 } else if (unlikely(ic->state != INO_STATE_PRESENT &&
520 ic->state != INO_STATE_CHECKEDABSENT &&
521 ic->state != INO_STATE_GC)) {
522 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
523 BUG();
524 }
525 }
526
527 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
528
529 if (adjust_ref) {
530 BUG_ON(*adjust_ref != raw);
531 *adjust_ref = new_ref;
532 }
533 if (f)
534 jffs2_gc_release_inode(c, f);
535
536 if (!ref_obsolete(raw)) {
537 jeb->dirty_size += rawlen;
538 jeb->used_size -= rawlen;
539 c->dirty_size += rawlen;
540 c->used_size -= rawlen;
541 raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
542 BUG_ON(raw->next_in_ino);
543 }
544 ofs += rawlen;
545 }
546
547 kfree(buf);
548
549
550 if (first_raw == jeb->first_node) {
551 jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
552 jeb->offset);
553 list_move(&jeb->list, &c->erase_pending_list);
554 c->nr_erasing_blocks++;
555 jffs2_garbage_collect_trigger(c);
556 }
557
558 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
559 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
560
561 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
562 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
563
564 spin_unlock(&c->erase_completion_lock);
565
566 jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
567 c->wbuf_ofs, c->wbuf_len);
568
569 }
570
571
572
573
574
575
576 #define NOPAD 0
577 #define PAD_NOACCOUNT 1
578 #define PAD_ACCOUNTING 2
579
580 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
581 {
582 struct jffs2_eraseblock *wbuf_jeb;
583 int ret;
584 size_t retlen;
585
586
587
588 if (!jffs2_is_writebuffered(c))
589 return 0;
590
591 if (!mutex_is_locked(&c->alloc_sem)) {
592 pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n");
593 BUG();
594 }
595
596 if (!c->wbuf_len)
597 return 0;
598
599 wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
600 if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
601 return -ENOMEM;
602
603
604
605
606
607
608
609 if (pad ) {
610 c->wbuf_len = PAD(c->wbuf_len);
611
612
613
614 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
615
616 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
617 struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
618 padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
619 padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
620 padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
621 padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
622 }
623 }
624
625
626
627 #ifdef BREAKME
628 static int breakme;
629 if (breakme++ == 20) {
630 pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs);
631 breakme = 0;
632 mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
633 brokenbuf);
634 ret = -EIO;
635 } else
636 #endif
637
638 ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
639 &retlen, c->wbuf);
640
641 if (ret) {
642 pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret);
643 goto wfail;
644 } else if (retlen != c->wbuf_pagesize) {
645 pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
646 retlen, c->wbuf_pagesize);
647 ret = -EIO;
648 goto wfail;
649 } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
650 wfail:
651 jffs2_wbuf_recover(c);
652
653 return ret;
654 }
655
656
657 if (pad) {
658 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
659
660 jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
661 (wbuf_jeb == c->nextblock) ? "next" : "",
662 wbuf_jeb->offset);
663
664
665
666
667 if (wbuf_jeb->free_size < waste) {
668 pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
669 c->wbuf_ofs, c->wbuf_len, waste);
670 pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
671 wbuf_jeb->offset, wbuf_jeb->free_size);
672 BUG();
673 }
674
675 spin_lock(&c->erase_completion_lock);
676
677 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
678
679 wbuf_jeb->dirty_size -= waste;
680 c->dirty_size -= waste;
681 wbuf_jeb->wasted_size += waste;
682 c->wasted_size += waste;
683 } else
684 spin_lock(&c->erase_completion_lock);
685
686
687 jffs2_refile_wbuf_blocks(c);
688 jffs2_clear_wbuf_ino_list(c);
689 spin_unlock(&c->erase_completion_lock);
690
691 memset(c->wbuf,0xff,c->wbuf_pagesize);
692
693 c->wbuf_ofs += c->wbuf_pagesize;
694 c->wbuf_len = 0;
695 return 0;
696 }
697
698
699
700
701
702 int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
703 {
704 uint32_t old_wbuf_ofs;
705 uint32_t old_wbuf_len;
706 int ret = 0;
707
708 jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
709
710 if (!c->wbuf)
711 return 0;
712
713 mutex_lock(&c->alloc_sem);
714 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
715 jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
716 mutex_unlock(&c->alloc_sem);
717 return 0;
718 }
719
720 old_wbuf_ofs = c->wbuf_ofs;
721 old_wbuf_len = c->wbuf_len;
722
723 if (c->unchecked_size) {
724
725 jffs2_dbg(1, "%s(): padding. Not finished checking\n",
726 __func__);
727 down_write(&c->wbuf_sem);
728 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
729
730
731 if (ret)
732 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
733 up_write(&c->wbuf_sem);
734 } else while (old_wbuf_len &&
735 old_wbuf_ofs == c->wbuf_ofs) {
736
737 mutex_unlock(&c->alloc_sem);
738
739 jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
740
741 ret = jffs2_garbage_collect_pass(c);
742 if (ret) {
743
744 mutex_lock(&c->alloc_sem);
745 down_write(&c->wbuf_sem);
746 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
747
748
749 if (ret)
750 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
751 up_write(&c->wbuf_sem);
752 break;
753 }
754 mutex_lock(&c->alloc_sem);
755 }
756
757 jffs2_dbg(1, "%s(): ends...\n", __func__);
758
759 mutex_unlock(&c->alloc_sem);
760 return ret;
761 }
762
763
764 int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
765 {
766 int ret;
767
768 if (!c->wbuf)
769 return 0;
770
771 down_write(&c->wbuf_sem);
772 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
773
774 if (ret)
775 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
776 up_write(&c->wbuf_sem);
777
778 return ret;
779 }
780
781 static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
782 size_t len)
783 {
784 if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
785 return 0;
786
787 if (len > (c->wbuf_pagesize - c->wbuf_len))
788 len = c->wbuf_pagesize - c->wbuf_len;
789 memcpy(c->wbuf + c->wbuf_len, buf, len);
790 c->wbuf_len += (uint32_t) len;
791 return len;
792 }
793
794 int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
795 unsigned long count, loff_t to, size_t *retlen,
796 uint32_t ino)
797 {
798 struct jffs2_eraseblock *jeb;
799 size_t wbuf_retlen, donelen = 0;
800 uint32_t outvec_to = to;
801 int ret, invec;
802
803
804 if (!jffs2_is_writebuffered(c))
805 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
806
807 down_write(&c->wbuf_sem);
808
809
810 if (c->wbuf_ofs == 0xFFFFFFFF) {
811 c->wbuf_ofs = PAGE_DIV(to);
812 c->wbuf_len = PAGE_MOD(to);
813 memset(c->wbuf,0xff,c->wbuf_pagesize);
814 }
815
816
817
818
819
820
821
822
823 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
824
825 if (c->wbuf_len) {
826 jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
827 __func__, (unsigned long)to, c->wbuf_ofs);
828 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
829 if (ret)
830 goto outerr;
831 }
832
833 c->wbuf_ofs = PAGE_DIV(to);
834 c->wbuf_len = PAGE_MOD(to);
835 }
836
837 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
838
839 pr_crit("%s(): Non-contiguous write to %08lx\n",
840 __func__, (unsigned long)to);
841 if (c->wbuf_len)
842 pr_crit("wbuf was previously %08x-%08x\n",
843 c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len);
844 BUG();
845 }
846
847
848 if (c->wbuf_len != PAGE_MOD(to)) {
849 c->wbuf_len = PAGE_MOD(to);
850
851 if (!c->wbuf_len) {
852 c->wbuf_len = c->wbuf_pagesize;
853 ret = __jffs2_flush_wbuf(c, NOPAD);
854 if (ret)
855 goto outerr;
856 }
857 }
858
859 for (invec = 0; invec < count; invec++) {
860 int vlen = invecs[invec].iov_len;
861 uint8_t *v = invecs[invec].iov_base;
862
863 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
864
865 if (c->wbuf_len == c->wbuf_pagesize) {
866 ret = __jffs2_flush_wbuf(c, NOPAD);
867 if (ret)
868 goto outerr;
869 }
870 vlen -= wbuf_retlen;
871 outvec_to += wbuf_retlen;
872 donelen += wbuf_retlen;
873 v += wbuf_retlen;
874
875 if (vlen >= c->wbuf_pagesize) {
876 ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen),
877 &wbuf_retlen, v);
878 if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
879 goto outfile;
880
881 vlen -= wbuf_retlen;
882 outvec_to += wbuf_retlen;
883 c->wbuf_ofs = outvec_to;
884 donelen += wbuf_retlen;
885 v += wbuf_retlen;
886 }
887
888 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
889 if (c->wbuf_len == c->wbuf_pagesize) {
890 ret = __jffs2_flush_wbuf(c, NOPAD);
891 if (ret)
892 goto outerr;
893 }
894
895 outvec_to += wbuf_retlen;
896 donelen += wbuf_retlen;
897 }
898
899
900
901
902
903 *retlen = donelen;
904
905 if (jffs2_sum_active()) {
906 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
907 if (res)
908 return res;
909 }
910
911 if (c->wbuf_len && ino)
912 jffs2_wbuf_dirties_inode(c, ino);
913
914 ret = 0;
915 up_write(&c->wbuf_sem);
916 return ret;
917
918 outfile:
919
920
921
922
923
924 spin_lock(&c->erase_completion_lock);
925
926 jeb = &c->blocks[outvec_to / c->sector_size];
927 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
928
929 spin_unlock(&c->erase_completion_lock);
930
931 outerr:
932 *retlen = 0;
933 up_write(&c->wbuf_sem);
934 return ret;
935 }
936
937
938
939
940
941 int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
942 size_t *retlen, const u_char *buf)
943 {
944 struct kvec vecs[1];
945
946 if (!jffs2_is_writebuffered(c))
947 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
948
949 vecs[0].iov_base = (unsigned char *) buf;
950 vecs[0].iov_len = len;
951 return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
952 }
953
954
955
956
957 int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
958 {
959 loff_t orbf = 0, owbf = 0, lwbf = 0;
960 int ret;
961
962 if (!jffs2_is_writebuffered(c))
963 return mtd_read(c->mtd, ofs, len, retlen, buf);
964
965
966 down_read(&c->wbuf_sem);
967 ret = mtd_read(c->mtd, ofs, len, retlen, buf);
968
969 if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
970 if (ret == -EBADMSG)
971 pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
972 len, ofs);
973
974
975
976
977
978
979
980
981
982
983 ret = 0;
984 }
985
986
987 if (!c->wbuf_pagesize || !c->wbuf_len)
988 goto exit;
989
990
991 if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
992 goto exit;
993
994 if (ofs >= c->wbuf_ofs) {
995 owbf = (ofs - c->wbuf_ofs);
996 if (owbf > c->wbuf_len)
997 goto exit;
998 lwbf = c->wbuf_len - owbf;
999 if (lwbf > len)
1000 lwbf = len;
1001 } else {
1002 orbf = (c->wbuf_ofs - ofs);
1003 if (orbf > len)
1004 goto exit;
1005 lwbf = len - orbf;
1006 if (lwbf > c->wbuf_len)
1007 lwbf = c->wbuf_len;
1008 }
1009 if (lwbf > 0)
1010 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
1011
1012 exit:
1013 up_read(&c->wbuf_sem);
1014 return ret;
1015 }
1016
1017 #define NR_OOB_SCAN_PAGES 4
1018
1019
1020 #define OOB_CM_SIZE 8
1021
1022 static const struct jffs2_unknown_node oob_cleanmarker =
1023 {
1024 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
1025 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
1026 .totlen = constant_cpu_to_je32(8)
1027 };
1028
1029
1030
1031
1032
1033 int jffs2_check_oob_empty(struct jffs2_sb_info *c,
1034 struct jffs2_eraseblock *jeb, int mode)
1035 {
1036 int i, ret;
1037 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1038 struct mtd_oob_ops ops;
1039
1040 ops.mode = MTD_OPS_AUTO_OOB;
1041 ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
1042 ops.oobbuf = c->oobbuf;
1043 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1044 ops.datbuf = NULL;
1045
1046 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1047 if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
1048 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1049 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1050 if (!ret || mtd_is_bitflip(ret))
1051 ret = -EIO;
1052 return ret;
1053 }
1054
1055 for(i = 0; i < ops.ooblen; i++) {
1056 if (mode && i < cmlen)
1057
1058 continue;
1059
1060 if (ops.oobbuf[i] != 0xFF) {
1061 jffs2_dbg(2, "Found %02x at %x in OOB for "
1062 "%08x\n", ops.oobbuf[i], i, jeb->offset);
1063 return 1;
1064 }
1065 }
1066
1067 return 0;
1068 }
1069
1070
1071
1072
1073
1074
1075
1076 int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
1077 struct jffs2_eraseblock *jeb)
1078 {
1079 struct mtd_oob_ops ops;
1080 int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1081
1082 ops.mode = MTD_OPS_AUTO_OOB;
1083 ops.ooblen = cmlen;
1084 ops.oobbuf = c->oobbuf;
1085 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1086 ops.datbuf = NULL;
1087
1088 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1089 if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
1090 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1091 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1092 if (!ret || mtd_is_bitflip(ret))
1093 ret = -EIO;
1094 return ret;
1095 }
1096
1097 return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
1098 }
1099
1100 int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1101 struct jffs2_eraseblock *jeb)
1102 {
1103 int ret;
1104 struct mtd_oob_ops ops;
1105 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1106
1107 ops.mode = MTD_OPS_AUTO_OOB;
1108 ops.ooblen = cmlen;
1109 ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1110 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1111 ops.datbuf = NULL;
1112
1113 ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
1114 if (ret || ops.oobretlen != ops.ooblen) {
1115 pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1116 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1117 if (!ret)
1118 ret = -EIO;
1119 return ret;
1120 }
1121
1122 return 0;
1123 }
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1134 {
1135 int ret;
1136
1137
1138 if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1139 return 0;
1140
1141 pr_warn("marking eraseblock at %08x as bad\n", bad_offset);
1142 ret = mtd_block_markbad(c->mtd, bad_offset);
1143
1144 if (ret) {
1145 jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
1146 __func__, jeb->offset, ret);
1147 return ret;
1148 }
1149 return 1;
1150 }
1151
1152 static struct jffs2_sb_info *work_to_sb(struct work_struct *work)
1153 {
1154 struct delayed_work *dwork;
1155
1156 dwork = to_delayed_work(work);
1157 return container_of(dwork, struct jffs2_sb_info, wbuf_dwork);
1158 }
1159
1160 static void delayed_wbuf_sync(struct work_struct *work)
1161 {
1162 struct jffs2_sb_info *c = work_to_sb(work);
1163 struct super_block *sb = OFNI_BS_2SFFJ(c);
1164
1165 if (!sb_rdonly(sb)) {
1166 jffs2_dbg(1, "%s()\n", __func__);
1167 jffs2_flush_wbuf_gc(c, 0);
1168 }
1169 }
1170
1171 void jffs2_dirty_trigger(struct jffs2_sb_info *c)
1172 {
1173 struct super_block *sb = OFNI_BS_2SFFJ(c);
1174 unsigned long delay;
1175
1176 if (sb_rdonly(sb))
1177 return;
1178
1179 delay = msecs_to_jiffies(dirty_writeback_interval * 10);
1180 if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay))
1181 jffs2_dbg(1, "%s()\n", __func__);
1182 }
1183
1184 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1185 {
1186 if (!c->mtd->oobsize)
1187 return 0;
1188
1189
1190 c->cleanmarker_size = 0;
1191
1192 if (c->mtd->oobavail == 0) {
1193 pr_err("inconsistent device description\n");
1194 return -EINVAL;
1195 }
1196
1197 jffs2_dbg(1, "using OOB on NAND\n");
1198
1199 c->oobavail = c->mtd->oobavail;
1200
1201
1202 init_rwsem(&c->wbuf_sem);
1203 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1204 c->wbuf_pagesize = c->mtd->writesize;
1205 c->wbuf_ofs = 0xFFFFFFFF;
1206
1207 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1208 if (!c->wbuf)
1209 return -ENOMEM;
1210
1211 c->oobbuf = kmalloc_array(NR_OOB_SCAN_PAGES, c->oobavail, GFP_KERNEL);
1212 if (!c->oobbuf) {
1213 kfree(c->wbuf);
1214 return -ENOMEM;
1215 }
1216
1217 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1218 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1219 if (!c->wbuf_verify) {
1220 kfree(c->oobbuf);
1221 kfree(c->wbuf);
1222 return -ENOMEM;
1223 }
1224 #endif
1225 return 0;
1226 }
1227
1228 void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1229 {
1230 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1231 kfree(c->wbuf_verify);
1232 #endif
1233 kfree(c->wbuf);
1234 kfree(c->oobbuf);
1235 }
1236
1237 int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1238 c->cleanmarker_size = 0;
1239
1240
1241 init_rwsem(&c->wbuf_sem);
1242 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1243 c->wbuf_pagesize = c->mtd->erasesize;
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253 c->sector_size = 8 * c->mtd->erasesize;
1254
1255 while (c->sector_size < 8192) {
1256 c->sector_size *= 2;
1257 }
1258
1259
1260 c->flash_size = c->mtd->size;
1261
1262 if ((c->flash_size % c->sector_size) != 0) {
1263 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1264 pr_warn("flash size adjusted to %dKiB\n", c->flash_size);
1265 }
1266
1267 c->wbuf_ofs = 0xFFFFFFFF;
1268 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1269 if (!c->wbuf)
1270 return -ENOMEM;
1271
1272 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1273 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1274 if (!c->wbuf_verify) {
1275 kfree(c->wbuf);
1276 return -ENOMEM;
1277 }
1278 #endif
1279
1280 pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
1281 c->wbuf_pagesize, c->sector_size);
1282
1283 return 0;
1284 }
1285
1286 void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1287 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1288 kfree(c->wbuf_verify);
1289 #endif
1290 kfree(c->wbuf);
1291 }
1292
1293 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1294
1295
1296 c->cleanmarker_size = max(16u, c->mtd->writesize);
1297
1298
1299 init_rwsem(&c->wbuf_sem);
1300 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1301
1302 c->wbuf_pagesize = c->mtd->writesize;
1303 c->wbuf_ofs = 0xFFFFFFFF;
1304
1305 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1306 if (!c->wbuf)
1307 return -ENOMEM;
1308
1309 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1310 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1311 if (!c->wbuf_verify) {
1312 kfree(c->wbuf);
1313 return -ENOMEM;
1314 }
1315 #endif
1316 return 0;
1317 }
1318
1319 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1320 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1321 kfree(c->wbuf_verify);
1322 #endif
1323 kfree(c->wbuf);
1324 }
1325
1326 int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
1327 c->cleanmarker_size = 0;
1328
1329 if (c->mtd->writesize == 1)
1330
1331 return 0;
1332
1333 init_rwsem(&c->wbuf_sem);
1334 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1335
1336 c->wbuf_pagesize = c->mtd->writesize;
1337 c->wbuf_ofs = 0xFFFFFFFF;
1338 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1339 if (!c->wbuf)
1340 return -ENOMEM;
1341
1342 pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
1343 c->wbuf_pagesize, c->sector_size);
1344
1345 return 0;
1346 }
1347
1348 void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) {
1349 kfree(c->wbuf);
1350 }