1/* 2 * This file is part of UBIFS. 3 * 4 * Copyright (C) 2006-2008 Nokia Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published by 8 * the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 51 17 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 * 19 * Authors: Adrian Hunter 20 * Artem Bityutskiy (���������������� ����������) 21 */ 22 23/* 24 * This file implements functions needed to recover from unclean un-mounts. 25 * When UBIFS is mounted, it checks a flag on the master node to determine if 26 * an un-mount was completed successfully. If not, the process of mounting 27 * incorporates additional checking and fixing of on-flash data structures. 28 * UBIFS always cleans away all remnants of an unclean un-mount, so that 29 * errors do not accumulate. However UBIFS defers recovery if it is mounted 30 * read-only, and the flash is not modified in that case. 31 * 32 * The general UBIFS approach to the recovery is that it recovers from 33 * corruptions which could be caused by power cuts, but it refuses to recover 34 * from corruption caused by other reasons. And UBIFS tries to distinguish 35 * between these 2 reasons of corruptions and silently recover in the former 36 * case and loudly complain in the latter case. 37 * 38 * UBIFS writes only to erased LEBs, so it writes only to the flash space 39 * containing only 0xFFs. UBIFS also always writes strictly from the beginning 40 * of the LEB to the end. And UBIFS assumes that the underlying flash media 41 * writes in @c->max_write_size bytes at a time. 42 * 43 * Hence, if UBIFS finds a corrupted node at offset X, it expects only the min. 44 * I/O unit corresponding to offset X to contain corrupted data, all the 45 * following min. I/O units have to contain empty space (all 0xFFs). If this is 46 * not true, the corruption cannot be the result of a power cut, and UBIFS 47 * refuses to mount. 48 */ 49 50#include <linux/crc32.h> 51#include <linux/slab.h> 52#include "ubifs.h" 53 54/** 55 * is_empty - determine whether a buffer is empty (contains all 0xff). 56 * @buf: buffer to clean 57 * @len: length of buffer 58 * 59 * This function returns %1 if the buffer is empty (contains all 0xff) otherwise 60 * %0 is returned. 61 */ 62static int is_empty(void *buf, int len) 63{ 64 uint8_t *p = buf; 65 int i; 66 67 for (i = 0; i < len; i++) 68 if (*p++ != 0xff) 69 return 0; 70 return 1; 71} 72 73/** 74 * first_non_ff - find offset of the first non-0xff byte. 75 * @buf: buffer to search in 76 * @len: length of buffer 77 * 78 * This function returns offset of the first non-0xff byte in @buf or %-1 if 79 * the buffer contains only 0xff bytes. 80 */ 81static int first_non_ff(void *buf, int len) 82{ 83 uint8_t *p = buf; 84 int i; 85 86 for (i = 0; i < len; i++) 87 if (*p++ != 0xff) 88 return i; 89 return -1; 90} 91 92/** 93 * get_master_node - get the last valid master node allowing for corruption. 94 * @c: UBIFS file-system description object 95 * @lnum: LEB number 96 * @pbuf: buffer containing the LEB read, is returned here 97 * @mst: master node, if found, is returned here 98 * @cor: corruption, if found, is returned here 99 * 100 * This function allocates a buffer, reads the LEB into it, and finds and 101 * returns the last valid master node allowing for one area of corruption. 102 * The corrupt area, if there is one, must be consistent with the assumption 103 * that it is the result of an unclean unmount while the master node was being 104 * written. Under those circumstances, it is valid to use the previously written 105 * master node. 106 * 107 * This function returns %0 on success and a negative error code on failure. 108 */ 109static int get_master_node(const struct ubifs_info *c, int lnum, void **pbuf, 110 struct ubifs_mst_node **mst, void **cor) 111{ 112 const int sz = c->mst_node_alsz; 113 int err, offs, len; 114 void *sbuf, *buf; 115 116 sbuf = vmalloc(c->leb_size); 117 if (!sbuf) 118 return -ENOMEM; 119 120 err = ubifs_leb_read(c, lnum, sbuf, 0, c->leb_size, 0); 121 if (err && err != -EBADMSG) 122 goto out_free; 123 124 /* Find the first position that is definitely not a node */ 125 offs = 0; 126 buf = sbuf; 127 len = c->leb_size; 128 while (offs + UBIFS_MST_NODE_SZ <= c->leb_size) { 129 struct ubifs_ch *ch = buf; 130 131 if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC) 132 break; 133 offs += sz; 134 buf += sz; 135 len -= sz; 136 } 137 /* See if there was a valid master node before that */ 138 if (offs) { 139 int ret; 140 141 offs -= sz; 142 buf -= sz; 143 len += sz; 144 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1); 145 if (ret != SCANNED_A_NODE && offs) { 146 /* Could have been corruption so check one place back */ 147 offs -= sz; 148 buf -= sz; 149 len += sz; 150 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1); 151 if (ret != SCANNED_A_NODE) 152 /* 153 * We accept only one area of corruption because 154 * we are assuming that it was caused while 155 * trying to write a master node. 156 */ 157 goto out_err; 158 } 159 if (ret == SCANNED_A_NODE) { 160 struct ubifs_ch *ch = buf; 161 162 if (ch->node_type != UBIFS_MST_NODE) 163 goto out_err; 164 dbg_rcvry("found a master node at %d:%d", lnum, offs); 165 *mst = buf; 166 offs += sz; 167 buf += sz; 168 len -= sz; 169 } 170 } 171 /* Check for corruption */ 172 if (offs < c->leb_size) { 173 if (!is_empty(buf, min_t(int, len, sz))) { 174 *cor = buf; 175 dbg_rcvry("found corruption at %d:%d", lnum, offs); 176 } 177 offs += sz; 178 buf += sz; 179 len -= sz; 180 } 181 /* Check remaining empty space */ 182 if (offs < c->leb_size) 183 if (!is_empty(buf, len)) 184 goto out_err; 185 *pbuf = sbuf; 186 return 0; 187 188out_err: 189 err = -EINVAL; 190out_free: 191 vfree(sbuf); 192 *mst = NULL; 193 *cor = NULL; 194 return err; 195} 196 197/** 198 * write_rcvrd_mst_node - write recovered master node. 199 * @c: UBIFS file-system description object 200 * @mst: master node 201 * 202 * This function returns %0 on success and a negative error code on failure. 203 */ 204static int write_rcvrd_mst_node(struct ubifs_info *c, 205 struct ubifs_mst_node *mst) 206{ 207 int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz; 208 __le32 save_flags; 209 210 dbg_rcvry("recovery"); 211 212 save_flags = mst->flags; 213 mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY); 214 215 ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1); 216 err = ubifs_leb_change(c, lnum, mst, sz); 217 if (err) 218 goto out; 219 err = ubifs_leb_change(c, lnum + 1, mst, sz); 220 if (err) 221 goto out; 222out: 223 mst->flags = save_flags; 224 return err; 225} 226 227/** 228 * ubifs_recover_master_node - recover the master node. 229 * @c: UBIFS file-system description object 230 * 231 * This function recovers the master node from corruption that may occur due to 232 * an unclean unmount. 233 * 234 * This function returns %0 on success and a negative error code on failure. 235 */ 236int ubifs_recover_master_node(struct ubifs_info *c) 237{ 238 void *buf1 = NULL, *buf2 = NULL, *cor1 = NULL, *cor2 = NULL; 239 struct ubifs_mst_node *mst1 = NULL, *mst2 = NULL, *mst; 240 const int sz = c->mst_node_alsz; 241 int err, offs1, offs2; 242 243 dbg_rcvry("recovery"); 244 245 err = get_master_node(c, UBIFS_MST_LNUM, &buf1, &mst1, &cor1); 246 if (err) 247 goto out_free; 248 249 err = get_master_node(c, UBIFS_MST_LNUM + 1, &buf2, &mst2, &cor2); 250 if (err) 251 goto out_free; 252 253 if (mst1) { 254 offs1 = (void *)mst1 - buf1; 255 if ((le32_to_cpu(mst1->flags) & UBIFS_MST_RCVRY) && 256 (offs1 == 0 && !cor1)) { 257 /* 258 * mst1 was written by recovery at offset 0 with no 259 * corruption. 260 */ 261 dbg_rcvry("recovery recovery"); 262 mst = mst1; 263 } else if (mst2) { 264 offs2 = (void *)mst2 - buf2; 265 if (offs1 == offs2) { 266 /* Same offset, so must be the same */ 267 if (memcmp((void *)mst1 + UBIFS_CH_SZ, 268 (void *)mst2 + UBIFS_CH_SZ, 269 UBIFS_MST_NODE_SZ - UBIFS_CH_SZ)) 270 goto out_err; 271 mst = mst1; 272 } else if (offs2 + sz == offs1) { 273 /* 1st LEB was written, 2nd was not */ 274 if (cor1) 275 goto out_err; 276 mst = mst1; 277 } else if (offs1 == 0 && 278 c->leb_size - offs2 - sz < sz) { 279 /* 1st LEB was unmapped and written, 2nd not */ 280 if (cor1) 281 goto out_err; 282 mst = mst1; 283 } else 284 goto out_err; 285 } else { 286 /* 287 * 2nd LEB was unmapped and about to be written, so 288 * there must be only one master node in the first LEB 289 * and no corruption. 290 */ 291 if (offs1 != 0 || cor1) 292 goto out_err; 293 mst = mst1; 294 } 295 } else { 296 if (!mst2) 297 goto out_err; 298 /* 299 * 1st LEB was unmapped and about to be written, so there must 300 * be no room left in 2nd LEB. 301 */ 302 offs2 = (void *)mst2 - buf2; 303 if (offs2 + sz + sz <= c->leb_size) 304 goto out_err; 305 mst = mst2; 306 } 307 308 ubifs_msg(c, "recovered master node from LEB %d", 309 (mst == mst1 ? UBIFS_MST_LNUM : UBIFS_MST_LNUM + 1)); 310 311 memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ); 312 313 if (c->ro_mount) { 314 /* Read-only mode. Keep a copy for switching to rw mode */ 315 c->rcvrd_mst_node = kmalloc(sz, GFP_KERNEL); 316 if (!c->rcvrd_mst_node) { 317 err = -ENOMEM; 318 goto out_free; 319 } 320 memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ); 321 322 /* 323 * We had to recover the master node, which means there was an 324 * unclean reboot. However, it is possible that the master node 325 * is clean at this point, i.e., %UBIFS_MST_DIRTY is not set. 326 * E.g., consider the following chain of events: 327 * 328 * 1. UBIFS was cleanly unmounted, so the master node is clean 329 * 2. UBIFS is being mounted R/W and starts changing the master 330 * node in the first (%UBIFS_MST_LNUM). A power cut happens, 331 * so this LEB ends up with some amount of garbage at the 332 * end. 333 * 3. UBIFS is being mounted R/O. We reach this place and 334 * recover the master node from the second LEB 335 * (%UBIFS_MST_LNUM + 1). But we cannot update the media 336 * because we are being mounted R/O. We have to defer the 337 * operation. 338 * 4. However, this master node (@c->mst_node) is marked as 339 * clean (since the step 1). And if we just return, the 340 * mount code will be confused and won't recover the master 341 * node when it is re-mounter R/W later. 342 * 343 * Thus, to force the recovery by marking the master node as 344 * dirty. 345 */ 346 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); 347 } else { 348 /* Write the recovered master node */ 349 c->max_sqnum = le64_to_cpu(mst->ch.sqnum) - 1; 350 err = write_rcvrd_mst_node(c, c->mst_node); 351 if (err) 352 goto out_free; 353 } 354 355 vfree(buf2); 356 vfree(buf1); 357 358 return 0; 359 360out_err: 361 err = -EINVAL; 362out_free: 363 ubifs_err(c, "failed to recover master node"); 364 if (mst1) { 365 ubifs_err(c, "dumping first master node"); 366 ubifs_dump_node(c, mst1); 367 } 368 if (mst2) { 369 ubifs_err(c, "dumping second master node"); 370 ubifs_dump_node(c, mst2); 371 } 372 vfree(buf2); 373 vfree(buf1); 374 return err; 375} 376 377/** 378 * ubifs_write_rcvrd_mst_node - write the recovered master node. 379 * @c: UBIFS file-system description object 380 * 381 * This function writes the master node that was recovered during mounting in 382 * read-only mode and must now be written because we are remounting rw. 383 * 384 * This function returns %0 on success and a negative error code on failure. 385 */ 386int ubifs_write_rcvrd_mst_node(struct ubifs_info *c) 387{ 388 int err; 389 390 if (!c->rcvrd_mst_node) 391 return 0; 392 c->rcvrd_mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); 393 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); 394 err = write_rcvrd_mst_node(c, c->rcvrd_mst_node); 395 if (err) 396 return err; 397 kfree(c->rcvrd_mst_node); 398 c->rcvrd_mst_node = NULL; 399 return 0; 400} 401 402/** 403 * is_last_write - determine if an offset was in the last write to a LEB. 404 * @c: UBIFS file-system description object 405 * @buf: buffer to check 406 * @offs: offset to check 407 * 408 * This function returns %1 if @offs was in the last write to the LEB whose data 409 * is in @buf, otherwise %0 is returned. The determination is made by checking 410 * for subsequent empty space starting from the next @c->max_write_size 411 * boundary. 412 */ 413static int is_last_write(const struct ubifs_info *c, void *buf, int offs) 414{ 415 int empty_offs, check_len; 416 uint8_t *p; 417 418 /* 419 * Round up to the next @c->max_write_size boundary i.e. @offs is in 420 * the last wbuf written. After that should be empty space. 421 */ 422 empty_offs = ALIGN(offs + 1, c->max_write_size); 423 check_len = c->leb_size - empty_offs; 424 p = buf + empty_offs - offs; 425 return is_empty(p, check_len); 426} 427 428/** 429 * clean_buf - clean the data from an LEB sitting in a buffer. 430 * @c: UBIFS file-system description object 431 * @buf: buffer to clean 432 * @lnum: LEB number to clean 433 * @offs: offset from which to clean 434 * @len: length of buffer 435 * 436 * This function pads up to the next min_io_size boundary (if there is one) and 437 * sets empty space to all 0xff. @buf, @offs and @len are updated to the next 438 * @c->min_io_size boundary. 439 */ 440static void clean_buf(const struct ubifs_info *c, void **buf, int lnum, 441 int *offs, int *len) 442{ 443 int empty_offs, pad_len; 444 445 lnum = lnum; 446 dbg_rcvry("cleaning corruption at %d:%d", lnum, *offs); 447 448 ubifs_assert(!(*offs & 7)); 449 empty_offs = ALIGN(*offs, c->min_io_size); 450 pad_len = empty_offs - *offs; 451 ubifs_pad(c, *buf, pad_len); 452 *offs += pad_len; 453 *buf += pad_len; 454 *len -= pad_len; 455 memset(*buf, 0xff, c->leb_size - empty_offs); 456} 457 458/** 459 * no_more_nodes - determine if there are no more nodes in a buffer. 460 * @c: UBIFS file-system description object 461 * @buf: buffer to check 462 * @len: length of buffer 463 * @lnum: LEB number of the LEB from which @buf was read 464 * @offs: offset from which @buf was read 465 * 466 * This function ensures that the corrupted node at @offs is the last thing 467 * written to a LEB. This function returns %1 if more data is not found and 468 * %0 if more data is found. 469 */ 470static int no_more_nodes(const struct ubifs_info *c, void *buf, int len, 471 int lnum, int offs) 472{ 473 struct ubifs_ch *ch = buf; 474 int skip, dlen = le32_to_cpu(ch->len); 475 476 /* Check for empty space after the corrupt node's common header */ 477 skip = ALIGN(offs + UBIFS_CH_SZ, c->max_write_size) - offs; 478 if (is_empty(buf + skip, len - skip)) 479 return 1; 480 /* 481 * The area after the common header size is not empty, so the common 482 * header must be intact. Check it. 483 */ 484 if (ubifs_check_node(c, buf, lnum, offs, 1, 0) != -EUCLEAN) { 485 dbg_rcvry("unexpected bad common header at %d:%d", lnum, offs); 486 return 0; 487 } 488 /* Now we know the corrupt node's length we can skip over it */ 489 skip = ALIGN(offs + dlen, c->max_write_size) - offs; 490 /* After which there should be empty space */ 491 if (is_empty(buf + skip, len - skip)) 492 return 1; 493 dbg_rcvry("unexpected data at %d:%d", lnum, offs + skip); 494 return 0; 495} 496 497/** 498 * fix_unclean_leb - fix an unclean LEB. 499 * @c: UBIFS file-system description object 500 * @sleb: scanned LEB information 501 * @start: offset where scan started 502 */ 503static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, 504 int start) 505{ 506 int lnum = sleb->lnum, endpt = start; 507 508 /* Get the end offset of the last node we are keeping */ 509 if (!list_empty(&sleb->nodes)) { 510 struct ubifs_scan_node *snod; 511 512 snod = list_entry(sleb->nodes.prev, 513 struct ubifs_scan_node, list); 514 endpt = snod->offs + snod->len; 515 } 516 517 if (c->ro_mount && !c->remounting_rw) { 518 /* Add to recovery list */ 519 struct ubifs_unclean_leb *ucleb; 520 521 dbg_rcvry("need to fix LEB %d start %d endpt %d", 522 lnum, start, sleb->endpt); 523 ucleb = kzalloc(sizeof(struct ubifs_unclean_leb), GFP_NOFS); 524 if (!ucleb) 525 return -ENOMEM; 526 ucleb->lnum = lnum; 527 ucleb->endpt = endpt; 528 list_add_tail(&ucleb->list, &c->unclean_leb_list); 529 } else { 530 /* Write the fixed LEB back to flash */ 531 int err; 532 533 dbg_rcvry("fixing LEB %d start %d endpt %d", 534 lnum, start, sleb->endpt); 535 if (endpt == 0) { 536 err = ubifs_leb_unmap(c, lnum); 537 if (err) 538 return err; 539 } else { 540 int len = ALIGN(endpt, c->min_io_size); 541 542 if (start) { 543 err = ubifs_leb_read(c, lnum, sleb->buf, 0, 544 start, 1); 545 if (err) 546 return err; 547 } 548 /* Pad to min_io_size */ 549 if (len > endpt) { 550 int pad_len = len - ALIGN(endpt, 8); 551 552 if (pad_len > 0) { 553 void *buf = sleb->buf + len - pad_len; 554 555 ubifs_pad(c, buf, pad_len); 556 } 557 } 558 err = ubifs_leb_change(c, lnum, sleb->buf, len); 559 if (err) 560 return err; 561 } 562 } 563 return 0; 564} 565 566/** 567 * drop_last_group - drop the last group of nodes. 568 * @sleb: scanned LEB information 569 * @offs: offset of dropped nodes is returned here 570 * 571 * This is a helper function for 'ubifs_recover_leb()' which drops the last 572 * group of nodes of the scanned LEB. 573 */ 574static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs) 575{ 576 while (!list_empty(&sleb->nodes)) { 577 struct ubifs_scan_node *snod; 578 struct ubifs_ch *ch; 579 580 snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node, 581 list); 582 ch = snod->node; 583 if (ch->group_type != UBIFS_IN_NODE_GROUP) 584 break; 585 586 dbg_rcvry("dropping grouped node at %d:%d", 587 sleb->lnum, snod->offs); 588 *offs = snod->offs; 589 list_del(&snod->list); 590 kfree(snod); 591 sleb->nodes_cnt -= 1; 592 } 593} 594 595/** 596 * drop_last_node - drop the last node. 597 * @sleb: scanned LEB information 598 * @offs: offset of dropped nodes is returned here 599 * 600 * This is a helper function for 'ubifs_recover_leb()' which drops the last 601 * node of the scanned LEB. 602 */ 603static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs) 604{ 605 struct ubifs_scan_node *snod; 606 607 if (!list_empty(&sleb->nodes)) { 608 snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node, 609 list); 610 611 dbg_rcvry("dropping last node at %d:%d", 612 sleb->lnum, snod->offs); 613 *offs = snod->offs; 614 list_del(&snod->list); 615 kfree(snod); 616 sleb->nodes_cnt -= 1; 617 } 618} 619 620/** 621 * ubifs_recover_leb - scan and recover a LEB. 622 * @c: UBIFS file-system description object 623 * @lnum: LEB number 624 * @offs: offset 625 * @sbuf: LEB-sized buffer to use 626 * @jhead: journal head number this LEB belongs to (%-1 if the LEB does not 627 * belong to any journal head) 628 * 629 * This function does a scan of a LEB, but caters for errors that might have 630 * been caused by the unclean unmount from which we are attempting to recover. 631 * Returns the scanned information on success and a negative error code on 632 * failure. 633 */ 634struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, 635 int offs, void *sbuf, int jhead) 636{ 637 int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit; 638 int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped; 639 struct ubifs_scan_leb *sleb; 640 void *buf = sbuf + offs; 641 642 dbg_rcvry("%d:%d, jhead %d, grouped %d", lnum, offs, jhead, grouped); 643 644 sleb = ubifs_start_scan(c, lnum, offs, sbuf); 645 if (IS_ERR(sleb)) 646 return sleb; 647 648 ubifs_assert(len >= 8); 649 while (len >= 8) { 650 dbg_scan("look at LEB %d:%d (%d bytes left)", 651 lnum, offs, len); 652 653 cond_resched(); 654 655 /* 656 * Scan quietly until there is an error from which we cannot 657 * recover 658 */ 659 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1); 660 if (ret == SCANNED_A_NODE) { 661 /* A valid node, and not a padding node */ 662 struct ubifs_ch *ch = buf; 663 int node_len; 664 665 err = ubifs_add_snod(c, sleb, buf, offs); 666 if (err) 667 goto error; 668 node_len = ALIGN(le32_to_cpu(ch->len), 8); 669 offs += node_len; 670 buf += node_len; 671 len -= node_len; 672 } else if (ret > 0) { 673 /* Padding bytes or a valid padding node */ 674 offs += ret; 675 buf += ret; 676 len -= ret; 677 } else if (ret == SCANNED_EMPTY_SPACE || 678 ret == SCANNED_GARBAGE || 679 ret == SCANNED_A_BAD_PAD_NODE || 680 ret == SCANNED_A_CORRUPT_NODE) { 681 dbg_rcvry("found corruption (%d) at %d:%d", 682 ret, lnum, offs); 683 break; 684 } else { 685 ubifs_err(c, "unexpected return value %d", ret); 686 err = -EINVAL; 687 goto error; 688 } 689 } 690 691 if (ret == SCANNED_GARBAGE || ret == SCANNED_A_BAD_PAD_NODE) { 692 if (!is_last_write(c, buf, offs)) 693 goto corrupted_rescan; 694 } else if (ret == SCANNED_A_CORRUPT_NODE) { 695 if (!no_more_nodes(c, buf, len, lnum, offs)) 696 goto corrupted_rescan; 697 } else if (!is_empty(buf, len)) { 698 if (!is_last_write(c, buf, offs)) { 699 int corruption = first_non_ff(buf, len); 700 701 /* 702 * See header comment for this file for more 703 * explanations about the reasons we have this check. 704 */ 705 ubifs_err(c, "corrupt empty space LEB %d:%d, corruption starts at %d", 706 lnum, offs, corruption); 707 /* Make sure we dump interesting non-0xFF data */ 708 offs += corruption; 709 buf += corruption; 710 goto corrupted; 711 } 712 } 713 714 min_io_unit = round_down(offs, c->min_io_size); 715 if (grouped) 716 /* 717 * If nodes are grouped, always drop the incomplete group at 718 * the end. 719 */ 720 drop_last_group(sleb, &offs); 721 722 if (jhead == GCHD) { 723 /* 724 * If this LEB belongs to the GC head then while we are in the 725 * middle of the same min. I/O unit keep dropping nodes. So 726 * basically, what we want is to make sure that the last min. 727 * I/O unit where we saw the corruption is dropped completely 728 * with all the uncorrupted nodes which may possibly sit there. 729 * 730 * In other words, let's name the min. I/O unit where the 731 * corruption starts B, and the previous min. I/O unit A. The 732 * below code tries to deal with a situation when half of B 733 * contains valid nodes or the end of a valid node, and the 734 * second half of B contains corrupted data or garbage. This 735 * means that UBIFS had been writing to B just before the power 736 * cut happened. I do not know how realistic is this scenario 737 * that half of the min. I/O unit had been written successfully 738 * and the other half not, but this is possible in our 'failure 739 * mode emulation' infrastructure at least. 740 * 741 * So what is the problem, why we need to drop those nodes? Why 742 * can't we just clean-up the second half of B by putting a 743 * padding node there? We can, and this works fine with one 744 * exception which was reproduced with power cut emulation 745 * testing and happens extremely rarely. 746 * 747 * Imagine the file-system is full, we run GC which starts 748 * moving valid nodes from LEB X to LEB Y (obviously, LEB Y is 749 * the current GC head LEB). The @c->gc_lnum is -1, which means 750 * that GC will retain LEB X and will try to continue. Imagine 751 * that LEB X is currently the dirtiest LEB, and the amount of 752 * used space in LEB Y is exactly the same as amount of free 753 * space in LEB X. 754 * 755 * And a power cut happens when nodes are moved from LEB X to 756 * LEB Y. We are here trying to recover LEB Y which is the GC 757 * head LEB. We find the min. I/O unit B as described above. 758 * Then we clean-up LEB Y by padding min. I/O unit. And later 759 * 'ubifs_rcvry_gc_commit()' function fails, because it cannot 760 * find a dirty LEB which could be GC'd into LEB Y! Even LEB X 761 * does not match because the amount of valid nodes there does 762 * not fit the free space in LEB Y any more! And this is 763 * because of the padding node which we added to LEB Y. The 764 * user-visible effect of this which I once observed and 765 * analysed is that we cannot mount the file-system with 766 * -ENOSPC error. 767 * 768 * So obviously, to make sure that situation does not happen we 769 * should free min. I/O unit B in LEB Y completely and the last 770 * used min. I/O unit in LEB Y should be A. This is basically 771 * what the below code tries to do. 772 */ 773 while (offs > min_io_unit) 774 drop_last_node(sleb, &offs); 775 } 776 777 buf = sbuf + offs; 778 len = c->leb_size - offs; 779 780 clean_buf(c, &buf, lnum, &offs, &len); 781 ubifs_end_scan(c, sleb, lnum, offs); 782 783 err = fix_unclean_leb(c, sleb, start); 784 if (err) 785 goto error; 786 787 return sleb; 788 789corrupted_rescan: 790 /* Re-scan the corrupted data with verbose messages */ 791 ubifs_err(c, "corruption %d", ret); 792 ubifs_scan_a_node(c, buf, len, lnum, offs, 0); 793corrupted: 794 ubifs_scanned_corruption(c, lnum, offs, buf); 795 err = -EUCLEAN; 796error: 797 ubifs_err(c, "LEB %d scanning failed", lnum); 798 ubifs_scan_destroy(sleb); 799 return ERR_PTR(err); 800} 801 802/** 803 * get_cs_sqnum - get commit start sequence number. 804 * @c: UBIFS file-system description object 805 * @lnum: LEB number of commit start node 806 * @offs: offset of commit start node 807 * @cs_sqnum: commit start sequence number is returned here 808 * 809 * This function returns %0 on success and a negative error code on failure. 810 */ 811static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs, 812 unsigned long long *cs_sqnum) 813{ 814 struct ubifs_cs_node *cs_node = NULL; 815 int err, ret; 816 817 dbg_rcvry("at %d:%d", lnum, offs); 818 cs_node = kmalloc(UBIFS_CS_NODE_SZ, GFP_KERNEL); 819 if (!cs_node) 820 return -ENOMEM; 821 if (c->leb_size - offs < UBIFS_CS_NODE_SZ) 822 goto out_err; 823 err = ubifs_leb_read(c, lnum, (void *)cs_node, offs, 824 UBIFS_CS_NODE_SZ, 0); 825 if (err && err != -EBADMSG) 826 goto out_free; 827 ret = ubifs_scan_a_node(c, cs_node, UBIFS_CS_NODE_SZ, lnum, offs, 0); 828 if (ret != SCANNED_A_NODE) { 829 ubifs_err(c, "Not a valid node"); 830 goto out_err; 831 } 832 if (cs_node->ch.node_type != UBIFS_CS_NODE) { 833 ubifs_err(c, "Node a CS node, type is %d", cs_node->ch.node_type); 834 goto out_err; 835 } 836 if (le64_to_cpu(cs_node->cmt_no) != c->cmt_no) { 837 ubifs_err(c, "CS node cmt_no %llu != current cmt_no %llu", 838 (unsigned long long)le64_to_cpu(cs_node->cmt_no), 839 c->cmt_no); 840 goto out_err; 841 } 842 *cs_sqnum = le64_to_cpu(cs_node->ch.sqnum); 843 dbg_rcvry("commit start sqnum %llu", *cs_sqnum); 844 kfree(cs_node); 845 return 0; 846 847out_err: 848 err = -EINVAL; 849out_free: 850 ubifs_err(c, "failed to get CS sqnum"); 851 kfree(cs_node); 852 return err; 853} 854 855/** 856 * ubifs_recover_log_leb - scan and recover a log LEB. 857 * @c: UBIFS file-system description object 858 * @lnum: LEB number 859 * @offs: offset 860 * @sbuf: LEB-sized buffer to use 861 * 862 * This function does a scan of a LEB, but caters for errors that might have 863 * been caused by unclean reboots from which we are attempting to recover 864 * (assume that only the last log LEB can be corrupted by an unclean reboot). 865 * 866 * This function returns %0 on success and a negative error code on failure. 867 */ 868struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, 869 int offs, void *sbuf) 870{ 871 struct ubifs_scan_leb *sleb; 872 int next_lnum; 873 874 dbg_rcvry("LEB %d", lnum); 875 next_lnum = lnum + 1; 876 if (next_lnum >= UBIFS_LOG_LNUM + c->log_lebs) 877 next_lnum = UBIFS_LOG_LNUM; 878 if (next_lnum != c->ltail_lnum) { 879 /* 880 * We can only recover at the end of the log, so check that the 881 * next log LEB is empty or out of date. 882 */ 883 sleb = ubifs_scan(c, next_lnum, 0, sbuf, 0); 884 if (IS_ERR(sleb)) 885 return sleb; 886 if (sleb->nodes_cnt) { 887 struct ubifs_scan_node *snod; 888 unsigned long long cs_sqnum = c->cs_sqnum; 889 890 snod = list_entry(sleb->nodes.next, 891 struct ubifs_scan_node, list); 892 if (cs_sqnum == 0) { 893 int err; 894 895 err = get_cs_sqnum(c, lnum, offs, &cs_sqnum); 896 if (err) { 897 ubifs_scan_destroy(sleb); 898 return ERR_PTR(err); 899 } 900 } 901 if (snod->sqnum > cs_sqnum) { 902 ubifs_err(c, "unrecoverable log corruption in LEB %d", 903 lnum); 904 ubifs_scan_destroy(sleb); 905 return ERR_PTR(-EUCLEAN); 906 } 907 } 908 ubifs_scan_destroy(sleb); 909 } 910 return ubifs_recover_leb(c, lnum, offs, sbuf, -1); 911} 912 913/** 914 * recover_head - recover a head. 915 * @c: UBIFS file-system description object 916 * @lnum: LEB number of head to recover 917 * @offs: offset of head to recover 918 * @sbuf: LEB-sized buffer to use 919 * 920 * This function ensures that there is no data on the flash at a head location. 921 * 922 * This function returns %0 on success and a negative error code on failure. 923 */ 924static int recover_head(struct ubifs_info *c, int lnum, int offs, void *sbuf) 925{ 926 int len = c->max_write_size, err; 927 928 if (offs + len > c->leb_size) 929 len = c->leb_size - offs; 930 931 if (!len) 932 return 0; 933 934 /* Read at the head location and check it is empty flash */ 935 err = ubifs_leb_read(c, lnum, sbuf, offs, len, 1); 936 if (err || !is_empty(sbuf, len)) { 937 dbg_rcvry("cleaning head at %d:%d", lnum, offs); 938 if (offs == 0) 939 return ubifs_leb_unmap(c, lnum); 940 err = ubifs_leb_read(c, lnum, sbuf, 0, offs, 1); 941 if (err) 942 return err; 943 return ubifs_leb_change(c, lnum, sbuf, offs); 944 } 945 946 return 0; 947} 948 949/** 950 * ubifs_recover_inl_heads - recover index and LPT heads. 951 * @c: UBIFS file-system description object 952 * @sbuf: LEB-sized buffer to use 953 * 954 * This function ensures that there is no data on the flash at the index and 955 * LPT head locations. 956 * 957 * This deals with the recovery of a half-completed journal commit. UBIFS is 958 * careful never to overwrite the last version of the index or the LPT. Because 959 * the index and LPT are wandering trees, data from a half-completed commit will 960 * not be referenced anywhere in UBIFS. The data will be either in LEBs that are 961 * assumed to be empty and will be unmapped anyway before use, or in the index 962 * and LPT heads. 963 * 964 * This function returns %0 on success and a negative error code on failure. 965 */ 966int ubifs_recover_inl_heads(struct ubifs_info *c, void *sbuf) 967{ 968 int err; 969 970 ubifs_assert(!c->ro_mount || c->remounting_rw); 971 972 dbg_rcvry("checking index head at %d:%d", c->ihead_lnum, c->ihead_offs); 973 err = recover_head(c, c->ihead_lnum, c->ihead_offs, sbuf); 974 if (err) 975 return err; 976 977 dbg_rcvry("checking LPT head at %d:%d", c->nhead_lnum, c->nhead_offs); 978 979 return recover_head(c, c->nhead_lnum, c->nhead_offs, sbuf); 980} 981 982/** 983 * clean_an_unclean_leb - read and write a LEB to remove corruption. 984 * @c: UBIFS file-system description object 985 * @ucleb: unclean LEB information 986 * @sbuf: LEB-sized buffer to use 987 * 988 * This function reads a LEB up to a point pre-determined by the mount recovery, 989 * checks the nodes, and writes the result back to the flash, thereby cleaning 990 * off any following corruption, or non-fatal ECC errors. 991 * 992 * This function returns %0 on success and a negative error code on failure. 993 */ 994static int clean_an_unclean_leb(struct ubifs_info *c, 995 struct ubifs_unclean_leb *ucleb, void *sbuf) 996{ 997 int err, lnum = ucleb->lnum, offs = 0, len = ucleb->endpt, quiet = 1; 998 void *buf = sbuf; 999 1000 dbg_rcvry("LEB %d len %d", lnum, len); 1001 1002 if (len == 0) { 1003 /* Nothing to read, just unmap it */ 1004 return ubifs_leb_unmap(c, lnum); 1005 } 1006 1007 err = ubifs_leb_read(c, lnum, buf, offs, len, 0); 1008 if (err && err != -EBADMSG) 1009 return err; 1010 1011 while (len >= 8) { 1012 int ret; 1013 1014 cond_resched(); 1015 1016 /* Scan quietly until there is an error */ 1017 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet); 1018 1019 if (ret == SCANNED_A_NODE) { 1020 /* A valid node, and not a padding node */ 1021 struct ubifs_ch *ch = buf; 1022 int node_len; 1023 1024 node_len = ALIGN(le32_to_cpu(ch->len), 8); 1025 offs += node_len; 1026 buf += node_len; 1027 len -= node_len; 1028 continue; 1029 } 1030 1031 if (ret > 0) { 1032 /* Padding bytes or a valid padding node */ 1033 offs += ret; 1034 buf += ret; 1035 len -= ret; 1036 continue; 1037 } 1038 1039 if (ret == SCANNED_EMPTY_SPACE) { 1040 ubifs_err(c, "unexpected empty space at %d:%d", 1041 lnum, offs); 1042 return -EUCLEAN; 1043 } 1044 1045 if (quiet) { 1046 /* Redo the last scan but noisily */ 1047 quiet = 0; 1048 continue; 1049 } 1050 1051 ubifs_scanned_corruption(c, lnum, offs, buf); 1052 return -EUCLEAN; 1053 } 1054 1055 /* Pad to min_io_size */ 1056 len = ALIGN(ucleb->endpt, c->min_io_size); 1057 if (len > ucleb->endpt) { 1058 int pad_len = len - ALIGN(ucleb->endpt, 8); 1059 1060 if (pad_len > 0) { 1061 buf = c->sbuf + len - pad_len; 1062 ubifs_pad(c, buf, pad_len); 1063 } 1064 } 1065 1066 /* Write back the LEB atomically */ 1067 err = ubifs_leb_change(c, lnum, sbuf, len); 1068 if (err) 1069 return err; 1070 1071 dbg_rcvry("cleaned LEB %d", lnum); 1072 1073 return 0; 1074} 1075 1076/** 1077 * ubifs_clean_lebs - clean LEBs recovered during read-only mount. 1078 * @c: UBIFS file-system description object 1079 * @sbuf: LEB-sized buffer to use 1080 * 1081 * This function cleans a LEB identified during recovery that needs to be 1082 * written but was not because UBIFS was mounted read-only. This happens when 1083 * remounting to read-write mode. 1084 * 1085 * This function returns %0 on success and a negative error code on failure. 1086 */ 1087int ubifs_clean_lebs(struct ubifs_info *c, void *sbuf) 1088{ 1089 dbg_rcvry("recovery"); 1090 while (!list_empty(&c->unclean_leb_list)) { 1091 struct ubifs_unclean_leb *ucleb; 1092 int err; 1093 1094 ucleb = list_entry(c->unclean_leb_list.next, 1095 struct ubifs_unclean_leb, list); 1096 err = clean_an_unclean_leb(c, ucleb, sbuf); 1097 if (err) 1098 return err; 1099 list_del(&ucleb->list); 1100 kfree(ucleb); 1101 } 1102 return 0; 1103} 1104 1105/** 1106 * grab_empty_leb - grab an empty LEB to use as GC LEB and run commit. 1107 * @c: UBIFS file-system description object 1108 * 1109 * This is a helper function for 'ubifs_rcvry_gc_commit()' which grabs an empty 1110 * LEB to be used as GC LEB (@c->gc_lnum), and then runs the commit. Returns 1111 * zero in case of success and a negative error code in case of failure. 1112 */ 1113static int grab_empty_leb(struct ubifs_info *c) 1114{ 1115 int lnum, err; 1116 1117 /* 1118 * Note, it is very important to first search for an empty LEB and then 1119 * run the commit, not vice-versa. The reason is that there might be 1120 * only one empty LEB at the moment, the one which has been the 1121 * @c->gc_lnum just before the power cut happened. During the regular 1122 * UBIFS operation (not now) @c->gc_lnum is marked as "taken", so no 1123 * one but GC can grab it. But at this moment this single empty LEB is 1124 * not marked as taken, so if we run commit - what happens? Right, the 1125 * commit will grab it and write the index there. Remember that the 1126 * index always expands as long as there is free space, and it only 1127 * starts consolidating when we run out of space. 1128 * 1129 * IOW, if we run commit now, we might not be able to find a free LEB 1130 * after this. 1131 */ 1132 lnum = ubifs_find_free_leb_for_idx(c); 1133 if (lnum < 0) { 1134 ubifs_err(c, "could not find an empty LEB"); 1135 ubifs_dump_lprops(c); 1136 ubifs_dump_budg(c, &c->bi); 1137 return lnum; 1138 } 1139 1140 /* Reset the index flag */ 1141 err = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0, 1142 LPROPS_INDEX, 0); 1143 if (err) 1144 return err; 1145 1146 c->gc_lnum = lnum; 1147 dbg_rcvry("found empty LEB %d, run commit", lnum); 1148 1149 return ubifs_run_commit(c); 1150} 1151 1152/** 1153 * ubifs_rcvry_gc_commit - recover the GC LEB number and run the commit. 1154 * @c: UBIFS file-system description object 1155 * 1156 * Out-of-place garbage collection requires always one empty LEB with which to 1157 * start garbage collection. The LEB number is recorded in c->gc_lnum and is 1158 * written to the master node on unmounting. In the case of an unclean unmount 1159 * the value of gc_lnum recorded in the master node is out of date and cannot 1160 * be used. Instead, recovery must allocate an empty LEB for this purpose. 1161 * However, there may not be enough empty space, in which case it must be 1162 * possible to GC the dirtiest LEB into the GC head LEB. 1163 * 1164 * This function also runs the commit which causes the TNC updates from 1165 * size-recovery and orphans to be written to the flash. That is important to 1166 * ensure correct replay order for subsequent mounts. 1167 * 1168 * This function returns %0 on success and a negative error code on failure. 1169 */ 1170int ubifs_rcvry_gc_commit(struct ubifs_info *c) 1171{ 1172 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; 1173 struct ubifs_lprops lp; 1174 int err; 1175 1176 dbg_rcvry("GC head LEB %d, offs %d", wbuf->lnum, wbuf->offs); 1177 1178 c->gc_lnum = -1; 1179 if (wbuf->lnum == -1 || wbuf->offs == c->leb_size) 1180 return grab_empty_leb(c); 1181 1182 err = ubifs_find_dirty_leb(c, &lp, wbuf->offs, 2); 1183 if (err) { 1184 if (err != -ENOSPC) 1185 return err; 1186 1187 dbg_rcvry("could not find a dirty LEB"); 1188 return grab_empty_leb(c); 1189 } 1190 1191 ubifs_assert(!(lp.flags & LPROPS_INDEX)); 1192 ubifs_assert(lp.free + lp.dirty >= wbuf->offs); 1193 1194 /* 1195 * We run the commit before garbage collection otherwise subsequent 1196 * mounts will see the GC and orphan deletion in a different order. 1197 */ 1198 dbg_rcvry("committing"); 1199 err = ubifs_run_commit(c); 1200 if (err) 1201 return err; 1202 1203 dbg_rcvry("GC'ing LEB %d", lp.lnum); 1204 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 1205 err = ubifs_garbage_collect_leb(c, &lp); 1206 if (err >= 0) { 1207 int err2 = ubifs_wbuf_sync_nolock(wbuf); 1208 1209 if (err2) 1210 err = err2; 1211 } 1212 mutex_unlock(&wbuf->io_mutex); 1213 if (err < 0) { 1214 ubifs_err(c, "GC failed, error %d", err); 1215 if (err == -EAGAIN) 1216 err = -EINVAL; 1217 return err; 1218 } 1219 1220 ubifs_assert(err == LEB_RETAINED); 1221 if (err != LEB_RETAINED) 1222 return -EINVAL; 1223 1224 err = ubifs_leb_unmap(c, c->gc_lnum); 1225 if (err) 1226 return err; 1227 1228 dbg_rcvry("allocated LEB %d for GC", lp.lnum); 1229 return 0; 1230} 1231 1232/** 1233 * struct size_entry - inode size information for recovery. 1234 * @rb: link in the RB-tree of sizes 1235 * @inum: inode number 1236 * @i_size: size on inode 1237 * @d_size: maximum size based on data nodes 1238 * @exists: indicates whether the inode exists 1239 * @inode: inode if pinned in memory awaiting rw mode to fix it 1240 */ 1241struct size_entry { 1242 struct rb_node rb; 1243 ino_t inum; 1244 loff_t i_size; 1245 loff_t d_size; 1246 int exists; 1247 struct inode *inode; 1248}; 1249 1250/** 1251 * add_ino - add an entry to the size tree. 1252 * @c: UBIFS file-system description object 1253 * @inum: inode number 1254 * @i_size: size on inode 1255 * @d_size: maximum size based on data nodes 1256 * @exists: indicates whether the inode exists 1257 */ 1258static int add_ino(struct ubifs_info *c, ino_t inum, loff_t i_size, 1259 loff_t d_size, int exists) 1260{ 1261 struct rb_node **p = &c->size_tree.rb_node, *parent = NULL; 1262 struct size_entry *e; 1263 1264 while (*p) { 1265 parent = *p; 1266 e = rb_entry(parent, struct size_entry, rb); 1267 if (inum < e->inum) 1268 p = &(*p)->rb_left; 1269 else 1270 p = &(*p)->rb_right; 1271 } 1272 1273 e = kzalloc(sizeof(struct size_entry), GFP_KERNEL); 1274 if (!e) 1275 return -ENOMEM; 1276 1277 e->inum = inum; 1278 e->i_size = i_size; 1279 e->d_size = d_size; 1280 e->exists = exists; 1281 1282 rb_link_node(&e->rb, parent, p); 1283 rb_insert_color(&e->rb, &c->size_tree); 1284 1285 return 0; 1286} 1287 1288/** 1289 * find_ino - find an entry on the size tree. 1290 * @c: UBIFS file-system description object 1291 * @inum: inode number 1292 */ 1293static struct size_entry *find_ino(struct ubifs_info *c, ino_t inum) 1294{ 1295 struct rb_node *p = c->size_tree.rb_node; 1296 struct size_entry *e; 1297 1298 while (p) { 1299 e = rb_entry(p, struct size_entry, rb); 1300 if (inum < e->inum) 1301 p = p->rb_left; 1302 else if (inum > e->inum) 1303 p = p->rb_right; 1304 else 1305 return e; 1306 } 1307 return NULL; 1308} 1309 1310/** 1311 * remove_ino - remove an entry from the size tree. 1312 * @c: UBIFS file-system description object 1313 * @inum: inode number 1314 */ 1315static void remove_ino(struct ubifs_info *c, ino_t inum) 1316{ 1317 struct size_entry *e = find_ino(c, inum); 1318 1319 if (!e) 1320 return; 1321 rb_erase(&e->rb, &c->size_tree); 1322 kfree(e); 1323} 1324 1325/** 1326 * ubifs_destroy_size_tree - free resources related to the size tree. 1327 * @c: UBIFS file-system description object 1328 */ 1329void ubifs_destroy_size_tree(struct ubifs_info *c) 1330{ 1331 struct size_entry *e, *n; 1332 1333 rbtree_postorder_for_each_entry_safe(e, n, &c->size_tree, rb) { 1334 iput(e->inode); 1335 kfree(e); 1336 } 1337 1338 c->size_tree = RB_ROOT; 1339} 1340 1341/** 1342 * ubifs_recover_size_accum - accumulate inode sizes for recovery. 1343 * @c: UBIFS file-system description object 1344 * @key: node key 1345 * @deletion: node is for a deletion 1346 * @new_size: inode size 1347 * 1348 * This function has two purposes: 1349 * 1) to ensure there are no data nodes that fall outside the inode size 1350 * 2) to ensure there are no data nodes for inodes that do not exist 1351 * To accomplish those purposes, a rb-tree is constructed containing an entry 1352 * for each inode number in the journal that has not been deleted, and recording 1353 * the size from the inode node, the maximum size of any data node (also altered 1354 * by truncations) and a flag indicating a inode number for which no inode node 1355 * was present in the journal. 1356 * 1357 * Note that there is still the possibility that there are data nodes that have 1358 * been committed that are beyond the inode size, however the only way to find 1359 * them would be to scan the entire index. Alternatively, some provision could 1360 * be made to record the size of inodes at the start of commit, which would seem 1361 * very cumbersome for a scenario that is quite unlikely and the only negative 1362 * consequence of which is wasted space. 1363 * 1364 * This functions returns %0 on success and a negative error code on failure. 1365 */ 1366int ubifs_recover_size_accum(struct ubifs_info *c, union ubifs_key *key, 1367 int deletion, loff_t new_size) 1368{ 1369 ino_t inum = key_inum(c, key); 1370 struct size_entry *e; 1371 int err; 1372 1373 switch (key_type(c, key)) { 1374 case UBIFS_INO_KEY: 1375 if (deletion) 1376 remove_ino(c, inum); 1377 else { 1378 e = find_ino(c, inum); 1379 if (e) { 1380 e->i_size = new_size; 1381 e->exists = 1; 1382 } else { 1383 err = add_ino(c, inum, new_size, 0, 1); 1384 if (err) 1385 return err; 1386 } 1387 } 1388 break; 1389 case UBIFS_DATA_KEY: 1390 e = find_ino(c, inum); 1391 if (e) { 1392 if (new_size > e->d_size) 1393 e->d_size = new_size; 1394 } else { 1395 err = add_ino(c, inum, 0, new_size, 0); 1396 if (err) 1397 return err; 1398 } 1399 break; 1400 case UBIFS_TRUN_KEY: 1401 e = find_ino(c, inum); 1402 if (e) 1403 e->d_size = new_size; 1404 break; 1405 } 1406 return 0; 1407} 1408 1409/** 1410 * fix_size_in_place - fix inode size in place on flash. 1411 * @c: UBIFS file-system description object 1412 * @e: inode size information for recovery 1413 */ 1414static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e) 1415{ 1416 struct ubifs_ino_node *ino = c->sbuf; 1417 unsigned char *p; 1418 union ubifs_key key; 1419 int err, lnum, offs, len; 1420 loff_t i_size; 1421 uint32_t crc; 1422 1423 /* Locate the inode node LEB number and offset */ 1424 ino_key_init(c, &key, e->inum); 1425 err = ubifs_tnc_locate(c, &key, ino, &lnum, &offs); 1426 if (err) 1427 goto out; 1428 /* 1429 * If the size recorded on the inode node is greater than the size that 1430 * was calculated from nodes in the journal then don't change the inode. 1431 */ 1432 i_size = le64_to_cpu(ino->size); 1433 if (i_size >= e->d_size) 1434 return 0; 1435 /* Read the LEB */ 1436 err = ubifs_leb_read(c, lnum, c->sbuf, 0, c->leb_size, 1); 1437 if (err) 1438 goto out; 1439 /* Change the size field and recalculate the CRC */ 1440 ino = c->sbuf + offs; 1441 ino->size = cpu_to_le64(e->d_size); 1442 len = le32_to_cpu(ino->ch.len); 1443 crc = crc32(UBIFS_CRC32_INIT, (void *)ino + 8, len - 8); 1444 ino->ch.crc = cpu_to_le32(crc); 1445 /* Work out where data in the LEB ends and free space begins */ 1446 p = c->sbuf; 1447 len = c->leb_size - 1; 1448 while (p[len] == 0xff) 1449 len -= 1; 1450 len = ALIGN(len + 1, c->min_io_size); 1451 /* Atomically write the fixed LEB back again */ 1452 err = ubifs_leb_change(c, lnum, c->sbuf, len); 1453 if (err) 1454 goto out; 1455 dbg_rcvry("inode %lu at %d:%d size %lld -> %lld", 1456 (unsigned long)e->inum, lnum, offs, i_size, e->d_size); 1457 return 0; 1458 1459out: 1460 ubifs_warn(c, "inode %lu failed to fix size %lld -> %lld error %d", 1461 (unsigned long)e->inum, e->i_size, e->d_size, err); 1462 return err; 1463} 1464 1465/** 1466 * ubifs_recover_size - recover inode size. 1467 * @c: UBIFS file-system description object 1468 * 1469 * This function attempts to fix inode size discrepancies identified by the 1470 * 'ubifs_recover_size_accum()' function. 1471 * 1472 * This functions returns %0 on success and a negative error code on failure. 1473 */ 1474int ubifs_recover_size(struct ubifs_info *c) 1475{ 1476 struct rb_node *this = rb_first(&c->size_tree); 1477 1478 while (this) { 1479 struct size_entry *e; 1480 int err; 1481 1482 e = rb_entry(this, struct size_entry, rb); 1483 if (!e->exists) { 1484 union ubifs_key key; 1485 1486 ino_key_init(c, &key, e->inum); 1487 err = ubifs_tnc_lookup(c, &key, c->sbuf); 1488 if (err && err != -ENOENT) 1489 return err; 1490 if (err == -ENOENT) { 1491 /* Remove data nodes that have no inode */ 1492 dbg_rcvry("removing ino %lu", 1493 (unsigned long)e->inum); 1494 err = ubifs_tnc_remove_ino(c, e->inum); 1495 if (err) 1496 return err; 1497 } else { 1498 struct ubifs_ino_node *ino = c->sbuf; 1499 1500 e->exists = 1; 1501 e->i_size = le64_to_cpu(ino->size); 1502 } 1503 } 1504 1505 if (e->exists && e->i_size < e->d_size) { 1506 if (c->ro_mount) { 1507 /* Fix the inode size and pin it in memory */ 1508 struct inode *inode; 1509 struct ubifs_inode *ui; 1510 1511 ubifs_assert(!e->inode); 1512 1513 inode = ubifs_iget(c->vfs_sb, e->inum); 1514 if (IS_ERR(inode)) 1515 return PTR_ERR(inode); 1516 1517 ui = ubifs_inode(inode); 1518 if (inode->i_size < e->d_size) { 1519 dbg_rcvry("ino %lu size %lld -> %lld", 1520 (unsigned long)e->inum, 1521 inode->i_size, e->d_size); 1522 inode->i_size = e->d_size; 1523 ui->ui_size = e->d_size; 1524 ui->synced_i_size = e->d_size; 1525 e->inode = inode; 1526 this = rb_next(this); 1527 continue; 1528 } 1529 iput(inode); 1530 } else { 1531 /* Fix the size in place */ 1532 err = fix_size_in_place(c, e); 1533 if (err) 1534 return err; 1535 iput(e->inode); 1536 } 1537 } 1538 1539 this = rb_next(this); 1540 rb_erase(&e->rb, &c->size_tree); 1541 kfree(e); 1542 } 1543 1544 return 0; 1545} 1546