1/* 2 * linux/fs/jbd2/revoke.c 3 * 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000 5 * 6 * Copyright 2000 Red Hat corp --- All Rights Reserved 7 * 8 * This file is part of the Linux kernel and is made available under 9 * the terms of the GNU General Public License, version 2, or at your 10 * option, any later version, incorporated herein by reference. 11 * 12 * Journal revoke routines for the generic filesystem journaling code; 13 * part of the ext2fs journaling system. 14 * 15 * Revoke is the mechanism used to prevent old log records for deleted 16 * metadata from being replayed on top of newer data using the same 17 * blocks. The revoke mechanism is used in two separate places: 18 * 19 * + Commit: during commit we write the entire list of the current 20 * transaction's revoked blocks to the journal 21 * 22 * + Recovery: during recovery we record the transaction ID of all 23 * revoked blocks. If there are multiple revoke records in the log 24 * for a single block, only the last one counts, and if there is a log 25 * entry for a block beyond the last revoke, then that log entry still 26 * gets replayed. 27 * 28 * We can get interactions between revokes and new log data within a 29 * single transaction: 30 * 31 * Block is revoked and then journaled: 32 * The desired end result is the journaling of the new block, so we 33 * cancel the revoke before the transaction commits. 34 * 35 * Block is journaled and then revoked: 36 * The revoke must take precedence over the write of the block, so we 37 * need either to cancel the journal entry or to write the revoke 38 * later in the log than the log block. In this case, we choose the 39 * latter: journaling a block cancels any revoke record for that block 40 * in the current transaction, so any revoke for that block in the 41 * transaction must have happened after the block was journaled and so 42 * the revoke must take precedence. 43 * 44 * Block is revoked and then written as data: 45 * The data write is allowed to succeed, but the revoke is _not_ 46 * cancelled. We still need to prevent old log records from 47 * overwriting the new data. We don't even need to clear the revoke 48 * bit here. 49 * 50 * We cache revoke status of a buffer in the current transaction in b_states 51 * bits. As the name says, revokevalid flag indicates that the cached revoke 52 * status of a buffer is valid and we can rely on the cached status. 53 * 54 * Revoke information on buffers is a tri-state value: 55 * 56 * RevokeValid clear: no cached revoke status, need to look it up 57 * RevokeValid set, Revoked clear: 58 * buffer has not been revoked, and cancel_revoke 59 * need do nothing. 60 * RevokeValid set, Revoked set: 61 * buffer has been revoked. 62 * 63 * Locking rules: 64 * We keep two hash tables of revoke records. One hashtable belongs to the 65 * running transaction (is pointed to by journal->j_revoke), the other one 66 * belongs to the committing transaction. Accesses to the second hash table 67 * happen only from the kjournald and no other thread touches this table. Also 68 * journal_switch_revoke_table() which switches which hashtable belongs to the 69 * running and which to the committing transaction is called only from 70 * kjournald. Therefore we need no locks when accessing the hashtable belonging 71 * to the committing transaction. 72 * 73 * All users operating on the hash table belonging to the running transaction 74 * have a handle to the transaction. Therefore they are safe from kjournald 75 * switching hash tables under them. For operations on the lists of entries in 76 * the hash table j_revoke_lock is used. 77 * 78 * Finally, also replay code uses the hash tables but at this moment no one else 79 * can touch them (filesystem isn't mounted yet) and hence no locking is 80 * needed. 81 */ 82 83#ifndef __KERNEL__ 84#include "jfs_user.h" 85#else 86#include <linux/time.h> 87#include <linux/fs.h> 88#include <linux/jbd2.h> 89#include <linux/errno.h> 90#include <linux/slab.h> 91#include <linux/list.h> 92#include <linux/init.h> 93#include <linux/bio.h> 94#include <linux/log2.h> 95#include <linux/hash.h> 96#endif 97 98static struct kmem_cache *jbd2_revoke_record_cache; 99static struct kmem_cache *jbd2_revoke_table_cache; 100 101/* Each revoke record represents one single revoked block. During 102 journal replay, this involves recording the transaction ID of the 103 last transaction to revoke this block. */ 104 105struct jbd2_revoke_record_s 106{ 107 struct list_head hash; 108 tid_t sequence; /* Used for recovery only */ 109 unsigned long long blocknr; 110}; 111 112 113/* The revoke table is just a simple hash table of revoke records. */ 114struct jbd2_revoke_table_s 115{ 116 /* It is conceivable that we might want a larger hash table 117 * for recovery. Must be a power of two. */ 118 int hash_size; 119 int hash_shift; 120 struct list_head *hash_table; 121}; 122 123 124#ifdef __KERNEL__ 125static void write_one_revoke_record(journal_t *, transaction_t *, 126 struct list_head *, 127 struct buffer_head **, int *, 128 struct jbd2_revoke_record_s *, int); 129static void flush_descriptor(journal_t *, struct buffer_head *, int, int); 130#endif 131 132/* Utility functions to maintain the revoke table */ 133 134static inline int hash(journal_t *journal, unsigned long long block) 135{ 136 return hash_64(block, journal->j_revoke->hash_shift); 137} 138 139static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr, 140 tid_t seq) 141{ 142 struct list_head *hash_list; 143 struct jbd2_revoke_record_s *record; 144 gfp_t gfp_mask = GFP_NOFS; 145 146 if (journal_oom_retry) 147 gfp_mask |= __GFP_NOFAIL; 148 record = kmem_cache_alloc(jbd2_revoke_record_cache, gfp_mask); 149 if (!record) 150 return -ENOMEM; 151 152 record->sequence = seq; 153 record->blocknr = blocknr; 154 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; 155 spin_lock(&journal->j_revoke_lock); 156 list_add(&record->hash, hash_list); 157 spin_unlock(&journal->j_revoke_lock); 158 return 0; 159} 160 161/* Find a revoke record in the journal's hash table. */ 162 163static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal, 164 unsigned long long blocknr) 165{ 166 struct list_head *hash_list; 167 struct jbd2_revoke_record_s *record; 168 169 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; 170 171 spin_lock(&journal->j_revoke_lock); 172 record = (struct jbd2_revoke_record_s *) hash_list->next; 173 while (&(record->hash) != hash_list) { 174 if (record->blocknr == blocknr) { 175 spin_unlock(&journal->j_revoke_lock); 176 return record; 177 } 178 record = (struct jbd2_revoke_record_s *) record->hash.next; 179 } 180 spin_unlock(&journal->j_revoke_lock); 181 return NULL; 182} 183 184void jbd2_journal_destroy_revoke_caches(void) 185{ 186 if (jbd2_revoke_record_cache) { 187 kmem_cache_destroy(jbd2_revoke_record_cache); 188 jbd2_revoke_record_cache = NULL; 189 } 190 if (jbd2_revoke_table_cache) { 191 kmem_cache_destroy(jbd2_revoke_table_cache); 192 jbd2_revoke_table_cache = NULL; 193 } 194} 195 196int __init jbd2_journal_init_revoke_caches(void) 197{ 198 J_ASSERT(!jbd2_revoke_record_cache); 199 J_ASSERT(!jbd2_revoke_table_cache); 200 201 jbd2_revoke_record_cache = KMEM_CACHE(jbd2_revoke_record_s, 202 SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY); 203 if (!jbd2_revoke_record_cache) 204 goto record_cache_failure; 205 206 jbd2_revoke_table_cache = KMEM_CACHE(jbd2_revoke_table_s, 207 SLAB_TEMPORARY); 208 if (!jbd2_revoke_table_cache) 209 goto table_cache_failure; 210 return 0; 211table_cache_failure: 212 jbd2_journal_destroy_revoke_caches(); 213record_cache_failure: 214 return -ENOMEM; 215} 216 217static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size) 218{ 219 int shift = 0; 220 int tmp = hash_size; 221 struct jbd2_revoke_table_s *table; 222 223 table = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL); 224 if (!table) 225 goto out; 226 227 while((tmp >>= 1UL) != 0UL) 228 shift++; 229 230 table->hash_size = hash_size; 231 table->hash_shift = shift; 232 table->hash_table = 233 kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL); 234 if (!table->hash_table) { 235 kmem_cache_free(jbd2_revoke_table_cache, table); 236 table = NULL; 237 goto out; 238 } 239 240 for (tmp = 0; tmp < hash_size; tmp++) 241 INIT_LIST_HEAD(&table->hash_table[tmp]); 242 243out: 244 return table; 245} 246 247static void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table) 248{ 249 int i; 250 struct list_head *hash_list; 251 252 for (i = 0; i < table->hash_size; i++) { 253 hash_list = &table->hash_table[i]; 254 J_ASSERT(list_empty(hash_list)); 255 } 256 257 kfree(table->hash_table); 258 kmem_cache_free(jbd2_revoke_table_cache, table); 259} 260 261/* Initialise the revoke table for a given journal to a given size. */ 262int jbd2_journal_init_revoke(journal_t *journal, int hash_size) 263{ 264 J_ASSERT(journal->j_revoke_table[0] == NULL); 265 J_ASSERT(is_power_of_2(hash_size)); 266 267 journal->j_revoke_table[0] = jbd2_journal_init_revoke_table(hash_size); 268 if (!journal->j_revoke_table[0]) 269 goto fail0; 270 271 journal->j_revoke_table[1] = jbd2_journal_init_revoke_table(hash_size); 272 if (!journal->j_revoke_table[1]) 273 goto fail1; 274 275 journal->j_revoke = journal->j_revoke_table[1]; 276 277 spin_lock_init(&journal->j_revoke_lock); 278 279 return 0; 280 281fail1: 282 jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]); 283fail0: 284 return -ENOMEM; 285} 286 287/* Destroy a journal's revoke table. The table must already be empty! */ 288void jbd2_journal_destroy_revoke(journal_t *journal) 289{ 290 journal->j_revoke = NULL; 291 if (journal->j_revoke_table[0]) 292 jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]); 293 if (journal->j_revoke_table[1]) 294 jbd2_journal_destroy_revoke_table(journal->j_revoke_table[1]); 295} 296 297 298#ifdef __KERNEL__ 299 300/* 301 * jbd2_journal_revoke: revoke a given buffer_head from the journal. This 302 * prevents the block from being replayed during recovery if we take a 303 * crash after this current transaction commits. Any subsequent 304 * metadata writes of the buffer in this transaction cancel the 305 * revoke. 306 * 307 * Note that this call may block --- it is up to the caller to make 308 * sure that there are no further calls to journal_write_metadata 309 * before the revoke is complete. In ext3, this implies calling the 310 * revoke before clearing the block bitmap when we are deleting 311 * metadata. 312 * 313 * Revoke performs a jbd2_journal_forget on any buffer_head passed in as a 314 * parameter, but does _not_ forget the buffer_head if the bh was only 315 * found implicitly. 316 * 317 * bh_in may not be a journalled buffer - it may have come off 318 * the hash tables without an attached journal_head. 319 * 320 * If bh_in is non-zero, jbd2_journal_revoke() will decrement its b_count 321 * by one. 322 */ 323 324int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr, 325 struct buffer_head *bh_in) 326{ 327 struct buffer_head *bh = NULL; 328 journal_t *journal; 329 struct block_device *bdev; 330 int err; 331 332 might_sleep(); 333 if (bh_in) 334 BUFFER_TRACE(bh_in, "enter"); 335 336 journal = handle->h_transaction->t_journal; 337 if (!jbd2_journal_set_features(journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)){ 338 J_ASSERT (!"Cannot set revoke feature!"); 339 return -EINVAL; 340 } 341 342 bdev = journal->j_fs_dev; 343 bh = bh_in; 344 345 if (!bh) { 346 bh = __find_get_block(bdev, blocknr, journal->j_blocksize); 347 if (bh) 348 BUFFER_TRACE(bh, "found on hash"); 349 } 350#ifdef JBD2_EXPENSIVE_CHECKING 351 else { 352 struct buffer_head *bh2; 353 354 /* If there is a different buffer_head lying around in 355 * memory anywhere... */ 356 bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize); 357 if (bh2) { 358 /* ... and it has RevokeValid status... */ 359 if (bh2 != bh && buffer_revokevalid(bh2)) 360 /* ...then it better be revoked too, 361 * since it's illegal to create a revoke 362 * record against a buffer_head which is 363 * not marked revoked --- that would 364 * risk missing a subsequent revoke 365 * cancel. */ 366 J_ASSERT_BH(bh2, buffer_revoked(bh2)); 367 put_bh(bh2); 368 } 369 } 370#endif 371 372 /* We really ought not ever to revoke twice in a row without 373 first having the revoke cancelled: it's illegal to free a 374 block twice without allocating it in between! */ 375 if (bh) { 376 if (!J_EXPECT_BH(bh, !buffer_revoked(bh), 377 "inconsistent data on disk")) { 378 if (!bh_in) 379 brelse(bh); 380 return -EIO; 381 } 382 set_buffer_revoked(bh); 383 set_buffer_revokevalid(bh); 384 if (bh_in) { 385 BUFFER_TRACE(bh_in, "call jbd2_journal_forget"); 386 jbd2_journal_forget(handle, bh_in); 387 } else { 388 BUFFER_TRACE(bh, "call brelse"); 389 __brelse(bh); 390 } 391 } 392 393 jbd_debug(2, "insert revoke for block %llu, bh_in=%p\n",blocknr, bh_in); 394 err = insert_revoke_hash(journal, blocknr, 395 handle->h_transaction->t_tid); 396 BUFFER_TRACE(bh_in, "exit"); 397 return err; 398} 399 400/* 401 * Cancel an outstanding revoke. For use only internally by the 402 * journaling code (called from jbd2_journal_get_write_access). 403 * 404 * We trust buffer_revoked() on the buffer if the buffer is already 405 * being journaled: if there is no revoke pending on the buffer, then we 406 * don't do anything here. 407 * 408 * This would break if it were possible for a buffer to be revoked and 409 * discarded, and then reallocated within the same transaction. In such 410 * a case we would have lost the revoked bit, but when we arrived here 411 * the second time we would still have a pending revoke to cancel. So, 412 * do not trust the Revoked bit on buffers unless RevokeValid is also 413 * set. 414 */ 415int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh) 416{ 417 struct jbd2_revoke_record_s *record; 418 journal_t *journal = handle->h_transaction->t_journal; 419 int need_cancel; 420 int did_revoke = 0; /* akpm: debug */ 421 struct buffer_head *bh = jh2bh(jh); 422 423 jbd_debug(4, "journal_head %p, cancelling revoke\n", jh); 424 425 /* Is the existing Revoke bit valid? If so, we trust it, and 426 * only perform the full cancel if the revoke bit is set. If 427 * not, we can't trust the revoke bit, and we need to do the 428 * full search for a revoke record. */ 429 if (test_set_buffer_revokevalid(bh)) { 430 need_cancel = test_clear_buffer_revoked(bh); 431 } else { 432 need_cancel = 1; 433 clear_buffer_revoked(bh); 434 } 435 436 if (need_cancel) { 437 record = find_revoke_record(journal, bh->b_blocknr); 438 if (record) { 439 jbd_debug(4, "cancelled existing revoke on " 440 "blocknr %llu\n", (unsigned long long)bh->b_blocknr); 441 spin_lock(&journal->j_revoke_lock); 442 list_del(&record->hash); 443 spin_unlock(&journal->j_revoke_lock); 444 kmem_cache_free(jbd2_revoke_record_cache, record); 445 did_revoke = 1; 446 } 447 } 448 449#ifdef JBD2_EXPENSIVE_CHECKING 450 /* There better not be one left behind by now! */ 451 record = find_revoke_record(journal, bh->b_blocknr); 452 J_ASSERT_JH(jh, record == NULL); 453#endif 454 455 /* Finally, have we just cleared revoke on an unhashed 456 * buffer_head? If so, we'd better make sure we clear the 457 * revoked status on any hashed alias too, otherwise the revoke 458 * state machine will get very upset later on. */ 459 if (need_cancel) { 460 struct buffer_head *bh2; 461 bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size); 462 if (bh2) { 463 if (bh2 != bh) 464 clear_buffer_revoked(bh2); 465 __brelse(bh2); 466 } 467 } 468 return did_revoke; 469} 470 471/* 472 * journal_clear_revoked_flag clears revoked flag of buffers in 473 * revoke table to reflect there is no revoked buffers in the next 474 * transaction which is going to be started. 475 */ 476void jbd2_clear_buffer_revoked_flags(journal_t *journal) 477{ 478 struct jbd2_revoke_table_s *revoke = journal->j_revoke; 479 int i = 0; 480 481 for (i = 0; i < revoke->hash_size; i++) { 482 struct list_head *hash_list; 483 struct list_head *list_entry; 484 hash_list = &revoke->hash_table[i]; 485 486 list_for_each(list_entry, hash_list) { 487 struct jbd2_revoke_record_s *record; 488 struct buffer_head *bh; 489 record = (struct jbd2_revoke_record_s *)list_entry; 490 bh = __find_get_block(journal->j_fs_dev, 491 record->blocknr, 492 journal->j_blocksize); 493 if (bh) { 494 clear_buffer_revoked(bh); 495 __brelse(bh); 496 } 497 } 498 } 499} 500 501/* journal_switch_revoke table select j_revoke for next transaction 502 * we do not want to suspend any processing until all revokes are 503 * written -bzzz 504 */ 505void jbd2_journal_switch_revoke_table(journal_t *journal) 506{ 507 int i; 508 509 if (journal->j_revoke == journal->j_revoke_table[0]) 510 journal->j_revoke = journal->j_revoke_table[1]; 511 else 512 journal->j_revoke = journal->j_revoke_table[0]; 513 514 for (i = 0; i < journal->j_revoke->hash_size; i++) 515 INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]); 516} 517 518/* 519 * Write revoke records to the journal for all entries in the current 520 * revoke hash, deleting the entries as we go. 521 */ 522void jbd2_journal_write_revoke_records(journal_t *journal, 523 transaction_t *transaction, 524 struct list_head *log_bufs, 525 int write_op) 526{ 527 struct buffer_head *descriptor; 528 struct jbd2_revoke_record_s *record; 529 struct jbd2_revoke_table_s *revoke; 530 struct list_head *hash_list; 531 int i, offset, count; 532 533 descriptor = NULL; 534 offset = 0; 535 count = 0; 536 537 /* select revoke table for committing transaction */ 538 revoke = journal->j_revoke == journal->j_revoke_table[0] ? 539 journal->j_revoke_table[1] : journal->j_revoke_table[0]; 540 541 for (i = 0; i < revoke->hash_size; i++) { 542 hash_list = &revoke->hash_table[i]; 543 544 while (!list_empty(hash_list)) { 545 record = (struct jbd2_revoke_record_s *) 546 hash_list->next; 547 write_one_revoke_record(journal, transaction, log_bufs, 548 &descriptor, &offset, 549 record, write_op); 550 count++; 551 list_del(&record->hash); 552 kmem_cache_free(jbd2_revoke_record_cache, record); 553 } 554 } 555 if (descriptor) 556 flush_descriptor(journal, descriptor, offset, write_op); 557 jbd_debug(1, "Wrote %d revoke records\n", count); 558} 559 560/* 561 * Write out one revoke record. We need to create a new descriptor 562 * block if the old one is full or if we have not already created one. 563 */ 564 565static void write_one_revoke_record(journal_t *journal, 566 transaction_t *transaction, 567 struct list_head *log_bufs, 568 struct buffer_head **descriptorp, 569 int *offsetp, 570 struct jbd2_revoke_record_s *record, 571 int write_op) 572{ 573 int csum_size = 0; 574 struct buffer_head *descriptor; 575 int sz, offset; 576 journal_header_t *header; 577 578 /* If we are already aborting, this all becomes a noop. We 579 still need to go round the loop in 580 jbd2_journal_write_revoke_records in order to free all of the 581 revoke records: only the IO to the journal is omitted. */ 582 if (is_journal_aborted(journal)) 583 return; 584 585 descriptor = *descriptorp; 586 offset = *offsetp; 587 588 /* Do we need to leave space at the end for a checksum? */ 589 if (jbd2_journal_has_csum_v2or3(journal)) 590 csum_size = sizeof(struct jbd2_journal_revoke_tail); 591 592 if (jbd2_has_feature_64bit(journal)) 593 sz = 8; 594 else 595 sz = 4; 596 597 /* Make sure we have a descriptor with space left for the record */ 598 if (descriptor) { 599 if (offset + sz > journal->j_blocksize - csum_size) { 600 flush_descriptor(journal, descriptor, offset, write_op); 601 descriptor = NULL; 602 } 603 } 604 605 if (!descriptor) { 606 descriptor = jbd2_journal_get_descriptor_buffer(journal); 607 if (!descriptor) 608 return; 609 header = (journal_header_t *)descriptor->b_data; 610 header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER); 611 header->h_blocktype = cpu_to_be32(JBD2_REVOKE_BLOCK); 612 header->h_sequence = cpu_to_be32(transaction->t_tid); 613 614 /* Record it so that we can wait for IO completion later */ 615 BUFFER_TRACE(descriptor, "file in log_bufs"); 616 jbd2_file_log_bh(log_bufs, descriptor); 617 618 offset = sizeof(jbd2_journal_revoke_header_t); 619 *descriptorp = descriptor; 620 } 621 622 if (jbd2_has_feature_64bit(journal)) 623 * ((__be64 *)(&descriptor->b_data[offset])) = 624 cpu_to_be64(record->blocknr); 625 else 626 * ((__be32 *)(&descriptor->b_data[offset])) = 627 cpu_to_be32(record->blocknr); 628 offset += sz; 629 630 *offsetp = offset; 631} 632 633static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh) 634{ 635 struct jbd2_journal_revoke_tail *tail; 636 __u32 csum; 637 638 if (!jbd2_journal_has_csum_v2or3(j)) 639 return; 640 641 tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize - 642 sizeof(struct jbd2_journal_revoke_tail)); 643 tail->r_checksum = 0; 644 csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize); 645 tail->r_checksum = cpu_to_be32(csum); 646} 647 648/* 649 * Flush a revoke descriptor out to the journal. If we are aborting, 650 * this is a noop; otherwise we are generating a buffer which needs to 651 * be waited for during commit, so it has to go onto the appropriate 652 * journal buffer list. 653 */ 654 655static void flush_descriptor(journal_t *journal, 656 struct buffer_head *descriptor, 657 int offset, int write_op) 658{ 659 jbd2_journal_revoke_header_t *header; 660 661 if (is_journal_aborted(journal)) { 662 put_bh(descriptor); 663 return; 664 } 665 666 header = (jbd2_journal_revoke_header_t *)descriptor->b_data; 667 header->r_count = cpu_to_be32(offset); 668 jbd2_revoke_csum_set(journal, descriptor); 669 670 set_buffer_jwrite(descriptor); 671 BUFFER_TRACE(descriptor, "write"); 672 set_buffer_dirty(descriptor); 673 write_dirty_buffer(descriptor, write_op); 674} 675#endif 676 677/* 678 * Revoke support for recovery. 679 * 680 * Recovery needs to be able to: 681 * 682 * record all revoke records, including the tid of the latest instance 683 * of each revoke in the journal 684 * 685 * check whether a given block in a given transaction should be replayed 686 * (ie. has not been revoked by a revoke record in that or a subsequent 687 * transaction) 688 * 689 * empty the revoke table after recovery. 690 */ 691 692/* 693 * First, setting revoke records. We create a new revoke record for 694 * every block ever revoked in the log as we scan it for recovery, and 695 * we update the existing records if we find multiple revokes for a 696 * single block. 697 */ 698 699int jbd2_journal_set_revoke(journal_t *journal, 700 unsigned long long blocknr, 701 tid_t sequence) 702{ 703 struct jbd2_revoke_record_s *record; 704 705 record = find_revoke_record(journal, blocknr); 706 if (record) { 707 /* If we have multiple occurrences, only record the 708 * latest sequence number in the hashed record */ 709 if (tid_gt(sequence, record->sequence)) 710 record->sequence = sequence; 711 return 0; 712 } 713 return insert_revoke_hash(journal, blocknr, sequence); 714} 715 716/* 717 * Test revoke records. For a given block referenced in the log, has 718 * that block been revoked? A revoke record with a given transaction 719 * sequence number revokes all blocks in that transaction and earlier 720 * ones, but later transactions still need replayed. 721 */ 722 723int jbd2_journal_test_revoke(journal_t *journal, 724 unsigned long long blocknr, 725 tid_t sequence) 726{ 727 struct jbd2_revoke_record_s *record; 728 729 record = find_revoke_record(journal, blocknr); 730 if (!record) 731 return 0; 732 if (tid_gt(sequence, record->sequence)) 733 return 0; 734 return 1; 735} 736 737/* 738 * Finally, once recovery is over, we need to clear the revoke table so 739 * that it can be reused by the running filesystem. 740 */ 741 742void jbd2_journal_clear_revoke(journal_t *journal) 743{ 744 int i; 745 struct list_head *hash_list; 746 struct jbd2_revoke_record_s *record; 747 struct jbd2_revoke_table_s *revoke; 748 749 revoke = journal->j_revoke; 750 751 for (i = 0; i < revoke->hash_size; i++) { 752 hash_list = &revoke->hash_table[i]; 753 while (!list_empty(hash_list)) { 754 record = (struct jbd2_revoke_record_s*) hash_list->next; 755 list_del(&record->hash); 756 kmem_cache_free(jbd2_revoke_record_cache, record); 757 } 758 } 759} 760