This source file includes following definitions.
- nothing_to_commit
- do_commit
- run_bg_commit
- ubifs_bg_thread
- ubifs_commit_required
- ubifs_request_bg_commit
- wait_for_commit
- ubifs_run_commit
- ubifs_gc_should_commit
- dbg_old_index_check_init
- dbg_check_old_index
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #include <linux/freezer.h>
35 #include <linux/kthread.h>
36 #include <linux/slab.h>
37 #include "ubifs.h"
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 static int nothing_to_commit(struct ubifs_info *c)
57 {
58
59
60
61
62 if (c->mounting || c->remounting_rw)
63 return 0;
64
65
66
67
68
69 if (c->zroot.znode && ubifs_zn_dirty(c->zroot.znode))
70 return 0;
71
72
73
74
75
76
77
78
79 if (c->nroot && test_bit(DIRTY_CNODE, &c->nroot->flags))
80 return 0;
81
82 ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0);
83 ubifs_assert(c, c->dirty_pn_cnt == 0);
84 ubifs_assert(c, c->dirty_nn_cnt == 0);
85
86 return 1;
87 }
88
89
90
91
92
93
94
95
96
97 static int do_commit(struct ubifs_info *c)
98 {
99 int err, new_ltail_lnum, old_ltail_lnum, i;
100 struct ubifs_zbranch zroot;
101 struct ubifs_lp_stats lst;
102
103 dbg_cmt("start");
104 ubifs_assert(c, !c->ro_media && !c->ro_mount);
105
106 if (c->ro_error) {
107 err = -EROFS;
108 goto out_up;
109 }
110
111 if (nothing_to_commit(c)) {
112 up_write(&c->commit_sem);
113 err = 0;
114 goto out_cancel;
115 }
116
117
118 for (i = 0; i < c->jhead_cnt; i++) {
119 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
120 if (err)
121 goto out_up;
122 }
123
124 c->cmt_no += 1;
125 err = ubifs_gc_start_commit(c);
126 if (err)
127 goto out_up;
128 err = dbg_check_lprops(c);
129 if (err)
130 goto out_up;
131 err = ubifs_log_start_commit(c, &new_ltail_lnum);
132 if (err)
133 goto out_up;
134 err = ubifs_tnc_start_commit(c, &zroot);
135 if (err)
136 goto out_up;
137 err = ubifs_lpt_start_commit(c);
138 if (err)
139 goto out_up;
140 err = ubifs_orphan_start_commit(c);
141 if (err)
142 goto out_up;
143
144 ubifs_get_lp_stats(c, &lst);
145
146 up_write(&c->commit_sem);
147
148 err = ubifs_tnc_end_commit(c);
149 if (err)
150 goto out;
151 err = ubifs_lpt_end_commit(c);
152 if (err)
153 goto out;
154 err = ubifs_orphan_end_commit(c);
155 if (err)
156 goto out;
157 err = dbg_check_old_index(c, &zroot);
158 if (err)
159 goto out;
160
161 c->mst_node->cmt_no = cpu_to_le64(c->cmt_no);
162 c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum);
163 c->mst_node->root_lnum = cpu_to_le32(zroot.lnum);
164 c->mst_node->root_offs = cpu_to_le32(zroot.offs);
165 c->mst_node->root_len = cpu_to_le32(zroot.len);
166 c->mst_node->ihead_lnum = cpu_to_le32(c->ihead_lnum);
167 c->mst_node->ihead_offs = cpu_to_le32(c->ihead_offs);
168 c->mst_node->index_size = cpu_to_le64(c->bi.old_idx_sz);
169 c->mst_node->lpt_lnum = cpu_to_le32(c->lpt_lnum);
170 c->mst_node->lpt_offs = cpu_to_le32(c->lpt_offs);
171 c->mst_node->nhead_lnum = cpu_to_le32(c->nhead_lnum);
172 c->mst_node->nhead_offs = cpu_to_le32(c->nhead_offs);
173 c->mst_node->ltab_lnum = cpu_to_le32(c->ltab_lnum);
174 c->mst_node->ltab_offs = cpu_to_le32(c->ltab_offs);
175 c->mst_node->lsave_lnum = cpu_to_le32(c->lsave_lnum);
176 c->mst_node->lsave_offs = cpu_to_le32(c->lsave_offs);
177 c->mst_node->lscan_lnum = cpu_to_le32(c->lscan_lnum);
178 c->mst_node->empty_lebs = cpu_to_le32(lst.empty_lebs);
179 c->mst_node->idx_lebs = cpu_to_le32(lst.idx_lebs);
180 c->mst_node->total_free = cpu_to_le64(lst.total_free);
181 c->mst_node->total_dirty = cpu_to_le64(lst.total_dirty);
182 c->mst_node->total_used = cpu_to_le64(lst.total_used);
183 c->mst_node->total_dead = cpu_to_le64(lst.total_dead);
184 c->mst_node->total_dark = cpu_to_le64(lst.total_dark);
185 if (c->no_orphs)
186 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
187 else
188 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS);
189
190 old_ltail_lnum = c->ltail_lnum;
191 err = ubifs_log_end_commit(c, new_ltail_lnum);
192 if (err)
193 goto out;
194
195 err = ubifs_log_post_commit(c, old_ltail_lnum);
196 if (err)
197 goto out;
198 err = ubifs_gc_end_commit(c);
199 if (err)
200 goto out;
201 err = ubifs_lpt_post_commit(c);
202 if (err)
203 goto out;
204
205 out_cancel:
206 spin_lock(&c->cs_lock);
207 c->cmt_state = COMMIT_RESTING;
208 wake_up(&c->cmt_wq);
209 dbg_cmt("commit end");
210 spin_unlock(&c->cs_lock);
211 return 0;
212
213 out_up:
214 up_write(&c->commit_sem);
215 out:
216 ubifs_err(c, "commit failed, error %d", err);
217 spin_lock(&c->cs_lock);
218 c->cmt_state = COMMIT_BROKEN;
219 wake_up(&c->cmt_wq);
220 spin_unlock(&c->cs_lock);
221 ubifs_ro_mode(c, err);
222 return err;
223 }
224
225
226
227
228
229
230
231
232 static int run_bg_commit(struct ubifs_info *c)
233 {
234 spin_lock(&c->cs_lock);
235
236
237
238
239 if (c->cmt_state != COMMIT_BACKGROUND &&
240 c->cmt_state != COMMIT_REQUIRED)
241 goto out;
242 spin_unlock(&c->cs_lock);
243
244 down_write(&c->commit_sem);
245 spin_lock(&c->cs_lock);
246 if (c->cmt_state == COMMIT_REQUIRED)
247 c->cmt_state = COMMIT_RUNNING_REQUIRED;
248 else if (c->cmt_state == COMMIT_BACKGROUND)
249 c->cmt_state = COMMIT_RUNNING_BACKGROUND;
250 else
251 goto out_cmt_unlock;
252 spin_unlock(&c->cs_lock);
253
254 return do_commit(c);
255
256 out_cmt_unlock:
257 up_write(&c->commit_sem);
258 out:
259 spin_unlock(&c->cs_lock);
260 return 0;
261 }
262
263
264
265
266
267
268
269
270
271
272
273
274
275 int ubifs_bg_thread(void *info)
276 {
277 int err;
278 struct ubifs_info *c = info;
279
280 ubifs_msg(c, "background thread \"%s\" started, PID %d",
281 c->bgt_name, current->pid);
282 set_freezable();
283
284 while (1) {
285 if (kthread_should_stop())
286 break;
287
288 if (try_to_freeze())
289 continue;
290
291 set_current_state(TASK_INTERRUPTIBLE);
292
293 if (!c->need_bgt) {
294
295
296
297
298
299 if (kthread_should_stop())
300 break;
301 schedule();
302 continue;
303 } else
304 __set_current_state(TASK_RUNNING);
305
306 c->need_bgt = 0;
307 err = ubifs_bg_wbufs_sync(c);
308 if (err)
309 ubifs_ro_mode(c, err);
310
311 run_bg_commit(c);
312 cond_resched();
313 }
314
315 ubifs_msg(c, "background thread \"%s\" stops", c->bgt_name);
316 return 0;
317 }
318
319
320
321
322
323
324
325
326 void ubifs_commit_required(struct ubifs_info *c)
327 {
328 spin_lock(&c->cs_lock);
329 switch (c->cmt_state) {
330 case COMMIT_RESTING:
331 case COMMIT_BACKGROUND:
332 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state),
333 dbg_cstate(COMMIT_REQUIRED));
334 c->cmt_state = COMMIT_REQUIRED;
335 break;
336 case COMMIT_RUNNING_BACKGROUND:
337 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state),
338 dbg_cstate(COMMIT_RUNNING_REQUIRED));
339 c->cmt_state = COMMIT_RUNNING_REQUIRED;
340 break;
341 case COMMIT_REQUIRED:
342 case COMMIT_RUNNING_REQUIRED:
343 case COMMIT_BROKEN:
344 break;
345 }
346 spin_unlock(&c->cs_lock);
347 }
348
349
350
351
352
353
354
355
356 void ubifs_request_bg_commit(struct ubifs_info *c)
357 {
358 spin_lock(&c->cs_lock);
359 if (c->cmt_state == COMMIT_RESTING) {
360 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state),
361 dbg_cstate(COMMIT_BACKGROUND));
362 c->cmt_state = COMMIT_BACKGROUND;
363 spin_unlock(&c->cs_lock);
364 ubifs_wake_up_bgt(c);
365 } else
366 spin_unlock(&c->cs_lock);
367 }
368
369
370
371
372
373
374
375 static int wait_for_commit(struct ubifs_info *c)
376 {
377 dbg_cmt("pid %d goes sleep", current->pid);
378
379
380
381
382
383
384
385
386 wait_event(c->cmt_wq, c->cmt_state != COMMIT_RUNNING_BACKGROUND &&
387 c->cmt_state != COMMIT_RUNNING_REQUIRED);
388 dbg_cmt("commit finished, pid %d woke up", current->pid);
389 return 0;
390 }
391
392
393
394
395
396
397
398
399 int ubifs_run_commit(struct ubifs_info *c)
400 {
401 int err = 0;
402
403 spin_lock(&c->cs_lock);
404 if (c->cmt_state == COMMIT_BROKEN) {
405 err = -EROFS;
406 goto out;
407 }
408
409 if (c->cmt_state == COMMIT_RUNNING_BACKGROUND)
410
411
412
413
414 c->cmt_state = COMMIT_RUNNING_REQUIRED;
415
416 if (c->cmt_state == COMMIT_RUNNING_REQUIRED) {
417 spin_unlock(&c->cs_lock);
418 return wait_for_commit(c);
419 }
420 spin_unlock(&c->cs_lock);
421
422
423
424 down_write(&c->commit_sem);
425 spin_lock(&c->cs_lock);
426
427
428
429
430 if (c->cmt_state == COMMIT_BROKEN) {
431 err = -EROFS;
432 goto out_cmt_unlock;
433 }
434
435 if (c->cmt_state == COMMIT_RUNNING_BACKGROUND)
436 c->cmt_state = COMMIT_RUNNING_REQUIRED;
437
438 if (c->cmt_state == COMMIT_RUNNING_REQUIRED) {
439 up_write(&c->commit_sem);
440 spin_unlock(&c->cs_lock);
441 return wait_for_commit(c);
442 }
443 c->cmt_state = COMMIT_RUNNING_REQUIRED;
444 spin_unlock(&c->cs_lock);
445
446 err = do_commit(c);
447 return err;
448
449 out_cmt_unlock:
450 up_write(&c->commit_sem);
451 out:
452 spin_unlock(&c->cs_lock);
453 return err;
454 }
455
456
457
458
459
460
461
462
463
464
465
466
467 int ubifs_gc_should_commit(struct ubifs_info *c)
468 {
469 int ret = 0;
470
471 spin_lock(&c->cs_lock);
472 if (c->cmt_state == COMMIT_BACKGROUND) {
473 dbg_cmt("commit required now");
474 c->cmt_state = COMMIT_REQUIRED;
475 } else
476 dbg_cmt("commit not requested");
477 if (c->cmt_state == COMMIT_REQUIRED)
478 ret = 1;
479 spin_unlock(&c->cs_lock);
480 return ret;
481 }
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497 struct idx_node {
498 struct list_head list;
499 int iip;
500 union ubifs_key upper_key;
501 struct ubifs_idx_node idx __aligned(8);
502 };
503
504
505
506
507
508
509
510
511
512
513
514 int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot)
515 {
516 struct ubifs_idx_node *idx;
517 int lnum, offs, len, err = 0;
518 struct ubifs_debug_info *d = c->dbg;
519
520 d->old_zroot = *zroot;
521 lnum = d->old_zroot.lnum;
522 offs = d->old_zroot.offs;
523 len = d->old_zroot.len;
524
525 idx = kmalloc(c->max_idx_node_sz, GFP_NOFS);
526 if (!idx)
527 return -ENOMEM;
528
529 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs);
530 if (err)
531 goto out;
532
533 d->old_zroot_level = le16_to_cpu(idx->level);
534 d->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum);
535 out:
536 kfree(idx);
537 return err;
538 }
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553 int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot)
554 {
555 int lnum, offs, len, err = 0, uninitialized_var(last_level), child_cnt;
556 int first = 1, iip;
557 struct ubifs_debug_info *d = c->dbg;
558 union ubifs_key uninitialized_var(lower_key), upper_key, l_key, u_key;
559 unsigned long long uninitialized_var(last_sqnum);
560 struct ubifs_idx_node *idx;
561 struct list_head list;
562 struct idx_node *i;
563 size_t sz;
564
565 if (!dbg_is_chk_index(c))
566 return 0;
567
568 INIT_LIST_HEAD(&list);
569
570 sz = sizeof(struct idx_node) + ubifs_idx_node_sz(c, c->fanout) -
571 UBIFS_IDX_NODE_SZ;
572
573
574 lnum = d->old_zroot.lnum;
575 offs = d->old_zroot.offs;
576 len = d->old_zroot.len;
577 iip = 0;
578
579
580
581
582
583 while (1) {
584 struct ubifs_branch *br;
585
586
587 i = kmalloc(sz, GFP_NOFS);
588 if (!i) {
589 err = -ENOMEM;
590 goto out_free;
591 }
592 i->iip = iip;
593
594 list_add_tail(&i->list, &list);
595
596 idx = &i->idx;
597 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs);
598 if (err)
599 goto out_free;
600
601 child_cnt = le16_to_cpu(idx->child_cnt);
602 if (child_cnt < 1 || child_cnt > c->fanout) {
603 err = 1;
604 goto out_dump;
605 }
606 if (first) {
607 first = 0;
608
609 if (le16_to_cpu(idx->level) != d->old_zroot_level) {
610 err = 2;
611 goto out_dump;
612 }
613 if (le64_to_cpu(idx->ch.sqnum) != d->old_zroot_sqnum) {
614 err = 3;
615 goto out_dump;
616 }
617
618 last_level = le16_to_cpu(idx->level) + 1;
619 last_sqnum = le64_to_cpu(idx->ch.sqnum) + 1;
620 key_read(c, ubifs_idx_key(c, idx), &lower_key);
621 highest_ino_key(c, &upper_key, INUM_WATERMARK);
622 }
623 key_copy(c, &upper_key, &i->upper_key);
624 if (le16_to_cpu(idx->level) != last_level - 1) {
625 err = 3;
626 goto out_dump;
627 }
628
629
630
631
632 if (le64_to_cpu(idx->ch.sqnum) >= last_sqnum) {
633 err = 4;
634 goto out_dump;
635 }
636
637 key_read(c, ubifs_idx_key(c, idx), &l_key);
638 br = ubifs_idx_branch(c, idx, child_cnt - 1);
639 key_read(c, &br->key, &u_key);
640 if (keys_cmp(c, &lower_key, &l_key) > 0) {
641 err = 5;
642 goto out_dump;
643 }
644 if (keys_cmp(c, &upper_key, &u_key) < 0) {
645 err = 6;
646 goto out_dump;
647 }
648 if (keys_cmp(c, &upper_key, &u_key) == 0)
649 if (!is_hash_key(c, &u_key)) {
650 err = 7;
651 goto out_dump;
652 }
653
654 if (le16_to_cpu(idx->level) == 0) {
655
656 while (1) {
657
658 list_del(&i->list);
659 kfree(i);
660
661 if (list_empty(&list))
662 goto out;
663
664 i = list_entry(list.prev, struct idx_node,
665 list);
666 idx = &i->idx;
667
668 if (iip + 1 < le16_to_cpu(idx->child_cnt)) {
669 iip = iip + 1;
670 break;
671 } else
672
673 iip = i->iip;
674 }
675 } else
676
677 iip = 0;
678
679
680
681
682 last_level = le16_to_cpu(idx->level);
683 last_sqnum = le64_to_cpu(idx->ch.sqnum);
684 br = ubifs_idx_branch(c, idx, iip);
685 lnum = le32_to_cpu(br->lnum);
686 offs = le32_to_cpu(br->offs);
687 len = le32_to_cpu(br->len);
688 key_read(c, &br->key, &lower_key);
689 if (iip + 1 < le16_to_cpu(idx->child_cnt)) {
690 br = ubifs_idx_branch(c, idx, iip + 1);
691 key_read(c, &br->key, &upper_key);
692 } else
693 key_copy(c, &i->upper_key, &upper_key);
694 }
695 out:
696 err = dbg_old_index_check_init(c, zroot);
697 if (err)
698 goto out_free;
699
700 return 0;
701
702 out_dump:
703 ubifs_err(c, "dumping index node (iip=%d)", i->iip);
704 ubifs_dump_node(c, idx);
705 list_del(&i->list);
706 kfree(i);
707 if (!list_empty(&list)) {
708 i = list_entry(list.prev, struct idx_node, list);
709 ubifs_err(c, "dumping parent index node");
710 ubifs_dump_node(c, &i->idx);
711 }
712 out_free:
713 while (!list_empty(&list)) {
714 i = list_entry(list.next, struct idx_node, list);
715 list_del(&i->list);
716 kfree(i);
717 }
718 ubifs_err(c, "failed, error %d", err);
719 if (err > 0)
720 err = -EINVAL;
721 return err;
722 }