1/* 2 * linux/fs/jbd/revoke.c 3 * 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000 5 * 6 * Copyright 2000 Red Hat corp --- All Rights Reserved 7 * 8 * This file is part of the Linux kernel and is made available under 9 * the terms of the GNU General Public License, version 2, or at your 10 * option, any later version, incorporated herein by reference. 11 * 12 * Journal revoke routines for the generic filesystem journaling code; 13 * part of the ext2fs journaling system. 14 * 15 * Revoke is the mechanism used to prevent old log records for deleted 16 * metadata from being replayed on top of newer data using the same 17 * blocks. The revoke mechanism is used in two separate places: 18 * 19 * + Commit: during commit we write the entire list of the current 20 * transaction's revoked blocks to the journal 21 * 22 * + Recovery: during recovery we record the transaction ID of all 23 * revoked blocks. If there are multiple revoke records in the log 24 * for a single block, only the last one counts, and if there is a log 25 * entry for a block beyond the last revoke, then that log entry still 26 * gets replayed. 27 * 28 * We can get interactions between revokes and new log data within a 29 * single transaction: 30 * 31 * Block is revoked and then journaled: 32 * The desired end result is the journaling of the new block, so we 33 * cancel the revoke before the transaction commits. 34 * 35 * Block is journaled and then revoked: 36 * The revoke must take precedence over the write of the block, so we 37 * need either to cancel the journal entry or to write the revoke 38 * later in the log than the log block. In this case, we choose the 39 * latter: journaling a block cancels any revoke record for that block 40 * in the current transaction, so any revoke for that block in the 41 * transaction must have happened after the block was journaled and so 42 * the revoke must take precedence. 43 * 44 * Block is revoked and then written as data: 45 * The data write is allowed to succeed, but the revoke is _not_ 46 * cancelled. We still need to prevent old log records from 47 * overwriting the new data. We don't even need to clear the revoke 48 * bit here. 49 * 50 * We cache revoke status of a buffer in the current transaction in b_states 51 * bits. As the name says, revokevalid flag indicates that the cached revoke 52 * status of a buffer is valid and we can rely on the cached status. 53 * 54 * Revoke information on buffers is a tri-state value: 55 * 56 * RevokeValid clear: no cached revoke status, need to look it up 57 * RevokeValid set, Revoked clear: 58 * buffer has not been revoked, and cancel_revoke 59 * need do nothing. 60 * RevokeValid set, Revoked set: 61 * buffer has been revoked. 62 * 63 * Locking rules: 64 * We keep two hash tables of revoke records. One hashtable belongs to the 65 * running transaction (is pointed to by journal->j_revoke), the other one 66 * belongs to the committing transaction. Accesses to the second hash table 67 * happen only from the kjournald and no other thread touches this table. Also 68 * journal_switch_revoke_table() which switches which hashtable belongs to the 69 * running and which to the committing transaction is called only from 70 * kjournald. Therefore we need no locks when accessing the hashtable belonging 71 * to the committing transaction. 72 * 73 * All users operating on the hash table belonging to the running transaction 74 * have a handle to the transaction. Therefore they are safe from kjournald 75 * switching hash tables under them. For operations on the lists of entries in 76 * the hash table j_revoke_lock is used. 77 * 78 * Finally, also replay code uses the hash tables but at this moment no one else 79 * can touch them (filesystem isn't mounted yet) and hence no locking is 80 * needed. 81 */ 82 83#ifndef __KERNEL__ 84#include "jfs_user.h" 85#else 86#include <linux/time.h> 87#include <linux/fs.h> 88#include <linux/jbd.h> 89#include <linux/errno.h> 90#include <linux/slab.h> 91#include <linux/list.h> 92#include <linux/init.h> 93#include <linux/bio.h> 94#endif 95#include <linux/log2.h> 96#include <linux/hash.h> 97 98static struct kmem_cache *revoke_record_cache; 99static struct kmem_cache *revoke_table_cache; 100 101/* Each revoke record represents one single revoked block. During 102 journal replay, this involves recording the transaction ID of the 103 last transaction to revoke this block. */ 104 105struct jbd_revoke_record_s 106{ 107 struct list_head hash; 108 tid_t sequence; /* Used for recovery only */ 109 unsigned int blocknr; 110}; 111 112 113/* The revoke table is just a simple hash table of revoke records. */ 114struct jbd_revoke_table_s 115{ 116 /* It is conceivable that we might want a larger hash table 117 * for recovery. Must be a power of two. */ 118 int hash_size; 119 int hash_shift; 120 struct list_head *hash_table; 121}; 122 123 124#ifdef __KERNEL__ 125static void write_one_revoke_record(journal_t *, transaction_t *, 126 struct journal_head **, int *, 127 struct jbd_revoke_record_s *, int); 128static void flush_descriptor(journal_t *, struct journal_head *, int, int); 129#endif 130 131/* Utility functions to maintain the revoke table */ 132 133static inline int hash(journal_t *journal, unsigned int block) 134{ 135 struct jbd_revoke_table_s *table = journal->j_revoke; 136 137 return hash_32(block, table->hash_shift); 138} 139 140static int insert_revoke_hash(journal_t *journal, unsigned int blocknr, 141 tid_t seq) 142{ 143 struct list_head *hash_list; 144 struct jbd_revoke_record_s *record; 145 146repeat: 147 record = kmem_cache_alloc(revoke_record_cache, GFP_NOFS); 148 if (!record) 149 goto oom; 150 151 record->sequence = seq; 152 record->blocknr = blocknr; 153 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; 154 spin_lock(&journal->j_revoke_lock); 155 list_add(&record->hash, hash_list); 156 spin_unlock(&journal->j_revoke_lock); 157 return 0; 158 159oom: 160 if (!journal_oom_retry) 161 return -ENOMEM; 162 jbd_debug(1, "ENOMEM in %s, retrying\n", __func__); 163 yield(); 164 goto repeat; 165} 166 167/* Find a revoke record in the journal's hash table. */ 168 169static struct jbd_revoke_record_s *find_revoke_record(journal_t *journal, 170 unsigned int blocknr) 171{ 172 struct list_head *hash_list; 173 struct jbd_revoke_record_s *record; 174 175 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; 176 177 spin_lock(&journal->j_revoke_lock); 178 record = (struct jbd_revoke_record_s *) hash_list->next; 179 while (&(record->hash) != hash_list) { 180 if (record->blocknr == blocknr) { 181 spin_unlock(&journal->j_revoke_lock); 182 return record; 183 } 184 record = (struct jbd_revoke_record_s *) record->hash.next; 185 } 186 spin_unlock(&journal->j_revoke_lock); 187 return NULL; 188} 189 190void journal_destroy_revoke_caches(void) 191{ 192 if (revoke_record_cache) { 193 kmem_cache_destroy(revoke_record_cache); 194 revoke_record_cache = NULL; 195 } 196 if (revoke_table_cache) { 197 kmem_cache_destroy(revoke_table_cache); 198 revoke_table_cache = NULL; 199 } 200} 201 202int __init journal_init_revoke_caches(void) 203{ 204 J_ASSERT(!revoke_record_cache); 205 J_ASSERT(!revoke_table_cache); 206 207 revoke_record_cache = kmem_cache_create("revoke_record", 208 sizeof(struct jbd_revoke_record_s), 209 0, 210 SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY, 211 NULL); 212 if (!revoke_record_cache) 213 goto record_cache_failure; 214 215 revoke_table_cache = kmem_cache_create("revoke_table", 216 sizeof(struct jbd_revoke_table_s), 217 0, SLAB_TEMPORARY, NULL); 218 if (!revoke_table_cache) 219 goto table_cache_failure; 220 221 return 0; 222 223table_cache_failure: 224 journal_destroy_revoke_caches(); 225record_cache_failure: 226 return -ENOMEM; 227} 228 229static struct jbd_revoke_table_s *journal_init_revoke_table(int hash_size) 230{ 231 int i; 232 struct jbd_revoke_table_s *table; 233 234 table = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL); 235 if (!table) 236 goto out; 237 238 table->hash_size = hash_size; 239 table->hash_shift = ilog2(hash_size); 240 table->hash_table = 241 kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL); 242 if (!table->hash_table) { 243 kmem_cache_free(revoke_table_cache, table); 244 table = NULL; 245 goto out; 246 } 247 248 for (i = 0; i < hash_size; i++) 249 INIT_LIST_HEAD(&table->hash_table[i]); 250 251out: 252 return table; 253} 254 255static void journal_destroy_revoke_table(struct jbd_revoke_table_s *table) 256{ 257 int i; 258 struct list_head *hash_list; 259 260 for (i = 0; i < table->hash_size; i++) { 261 hash_list = &table->hash_table[i]; 262 J_ASSERT(list_empty(hash_list)); 263 } 264 265 kfree(table->hash_table); 266 kmem_cache_free(revoke_table_cache, table); 267} 268 269/* Initialise the revoke table for a given journal to a given size. */ 270int journal_init_revoke(journal_t *journal, int hash_size) 271{ 272 J_ASSERT(journal->j_revoke_table[0] == NULL); 273 J_ASSERT(is_power_of_2(hash_size)); 274 275 journal->j_revoke_table[0] = journal_init_revoke_table(hash_size); 276 if (!journal->j_revoke_table[0]) 277 goto fail0; 278 279 journal->j_revoke_table[1] = journal_init_revoke_table(hash_size); 280 if (!journal->j_revoke_table[1]) 281 goto fail1; 282 283 journal->j_revoke = journal->j_revoke_table[1]; 284 285 spin_lock_init(&journal->j_revoke_lock); 286 287 return 0; 288 289fail1: 290 journal_destroy_revoke_table(journal->j_revoke_table[0]); 291fail0: 292 return -ENOMEM; 293} 294 295/* Destroy a journal's revoke table. The table must already be empty! */ 296void journal_destroy_revoke(journal_t *journal) 297{ 298 journal->j_revoke = NULL; 299 if (journal->j_revoke_table[0]) 300 journal_destroy_revoke_table(journal->j_revoke_table[0]); 301 if (journal->j_revoke_table[1]) 302 journal_destroy_revoke_table(journal->j_revoke_table[1]); 303} 304 305 306#ifdef __KERNEL__ 307 308/* 309 * journal_revoke: revoke a given buffer_head from the journal. This 310 * prevents the block from being replayed during recovery if we take a 311 * crash after this current transaction commits. Any subsequent 312 * metadata writes of the buffer in this transaction cancel the 313 * revoke. 314 * 315 * Note that this call may block --- it is up to the caller to make 316 * sure that there are no further calls to journal_write_metadata 317 * before the revoke is complete. In ext3, this implies calling the 318 * revoke before clearing the block bitmap when we are deleting 319 * metadata. 320 * 321 * Revoke performs a journal_forget on any buffer_head passed in as a 322 * parameter, but does _not_ forget the buffer_head if the bh was only 323 * found implicitly. 324 * 325 * bh_in may not be a journalled buffer - it may have come off 326 * the hash tables without an attached journal_head. 327 * 328 * If bh_in is non-zero, journal_revoke() will decrement its b_count 329 * by one. 330 */ 331 332int journal_revoke(handle_t *handle, unsigned int blocknr, 333 struct buffer_head *bh_in) 334{ 335 struct buffer_head *bh = NULL; 336 journal_t *journal; 337 struct block_device *bdev; 338 int err; 339 340 might_sleep(); 341 if (bh_in) 342 BUFFER_TRACE(bh_in, "enter"); 343 344 journal = handle->h_transaction->t_journal; 345 if (!journal_set_features(journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)){ 346 J_ASSERT (!"Cannot set revoke feature!"); 347 return -EINVAL; 348 } 349 350 bdev = journal->j_fs_dev; 351 bh = bh_in; 352 353 if (!bh) { 354 bh = __find_get_block(bdev, blocknr, journal->j_blocksize); 355 if (bh) 356 BUFFER_TRACE(bh, "found on hash"); 357 } 358#ifdef JBD_EXPENSIVE_CHECKING 359 else { 360 struct buffer_head *bh2; 361 362 /* If there is a different buffer_head lying around in 363 * memory anywhere... */ 364 bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize); 365 if (bh2) { 366 /* ... and it has RevokeValid status... */ 367 if (bh2 != bh && buffer_revokevalid(bh2)) 368 /* ...then it better be revoked too, 369 * since it's illegal to create a revoke 370 * record against a buffer_head which is 371 * not marked revoked --- that would 372 * risk missing a subsequent revoke 373 * cancel. */ 374 J_ASSERT_BH(bh2, buffer_revoked(bh2)); 375 put_bh(bh2); 376 } 377 } 378#endif 379 380 /* We really ought not ever to revoke twice in a row without 381 first having the revoke cancelled: it's illegal to free a 382 block twice without allocating it in between! */ 383 if (bh) { 384 if (!J_EXPECT_BH(bh, !buffer_revoked(bh), 385 "inconsistent data on disk")) { 386 if (!bh_in) 387 brelse(bh); 388 return -EIO; 389 } 390 set_buffer_revoked(bh); 391 set_buffer_revokevalid(bh); 392 if (bh_in) { 393 BUFFER_TRACE(bh_in, "call journal_forget"); 394 journal_forget(handle, bh_in); 395 } else { 396 BUFFER_TRACE(bh, "call brelse"); 397 __brelse(bh); 398 } 399 } 400 401 jbd_debug(2, "insert revoke for block %u, bh_in=%p\n", blocknr, bh_in); 402 err = insert_revoke_hash(journal, blocknr, 403 handle->h_transaction->t_tid); 404 BUFFER_TRACE(bh_in, "exit"); 405 return err; 406} 407 408/* 409 * Cancel an outstanding revoke. For use only internally by the 410 * journaling code (called from journal_get_write_access). 411 * 412 * We trust buffer_revoked() on the buffer if the buffer is already 413 * being journaled: if there is no revoke pending on the buffer, then we 414 * don't do anything here. 415 * 416 * This would break if it were possible for a buffer to be revoked and 417 * discarded, and then reallocated within the same transaction. In such 418 * a case we would have lost the revoked bit, but when we arrived here 419 * the second time we would still have a pending revoke to cancel. So, 420 * do not trust the Revoked bit on buffers unless RevokeValid is also 421 * set. 422 */ 423int journal_cancel_revoke(handle_t *handle, struct journal_head *jh) 424{ 425 struct jbd_revoke_record_s *record; 426 journal_t *journal = handle->h_transaction->t_journal; 427 int need_cancel; 428 int did_revoke = 0; /* akpm: debug */ 429 struct buffer_head *bh = jh2bh(jh); 430 431 jbd_debug(4, "journal_head %p, cancelling revoke\n", jh); 432 433 /* Is the existing Revoke bit valid? If so, we trust it, and 434 * only perform the full cancel if the revoke bit is set. If 435 * not, we can't trust the revoke bit, and we need to do the 436 * full search for a revoke record. */ 437 if (test_set_buffer_revokevalid(bh)) { 438 need_cancel = test_clear_buffer_revoked(bh); 439 } else { 440 need_cancel = 1; 441 clear_buffer_revoked(bh); 442 } 443 444 if (need_cancel) { 445 record = find_revoke_record(journal, bh->b_blocknr); 446 if (record) { 447 jbd_debug(4, "cancelled existing revoke on " 448 "blocknr %llu\n", (unsigned long long)bh->b_blocknr); 449 spin_lock(&journal->j_revoke_lock); 450 list_del(&record->hash); 451 spin_unlock(&journal->j_revoke_lock); 452 kmem_cache_free(revoke_record_cache, record); 453 did_revoke = 1; 454 } 455 } 456 457#ifdef JBD_EXPENSIVE_CHECKING 458 /* There better not be one left behind by now! */ 459 record = find_revoke_record(journal, bh->b_blocknr); 460 J_ASSERT_JH(jh, record == NULL); 461#endif 462 463 /* Finally, have we just cleared revoke on an unhashed 464 * buffer_head? If so, we'd better make sure we clear the 465 * revoked status on any hashed alias too, otherwise the revoke 466 * state machine will get very upset later on. */ 467 if (need_cancel) { 468 struct buffer_head *bh2; 469 bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size); 470 if (bh2) { 471 if (bh2 != bh) 472 clear_buffer_revoked(bh2); 473 __brelse(bh2); 474 } 475 } 476 return did_revoke; 477} 478 479/* 480 * journal_clear_revoked_flags clears revoked flag of buffers in 481 * revoke table to reflect there is no revoked buffer in the next 482 * transaction which is going to be started. 483 */ 484void journal_clear_buffer_revoked_flags(journal_t *journal) 485{ 486 struct jbd_revoke_table_s *revoke = journal->j_revoke; 487 int i = 0; 488 489 for (i = 0; i < revoke->hash_size; i++) { 490 struct list_head *hash_list; 491 struct list_head *list_entry; 492 hash_list = &revoke->hash_table[i]; 493 494 list_for_each(list_entry, hash_list) { 495 struct jbd_revoke_record_s *record; 496 struct buffer_head *bh; 497 record = (struct jbd_revoke_record_s *)list_entry; 498 bh = __find_get_block(journal->j_fs_dev, 499 record->blocknr, 500 journal->j_blocksize); 501 if (bh) { 502 clear_buffer_revoked(bh); 503 __brelse(bh); 504 } 505 } 506 } 507} 508 509/* journal_switch_revoke table select j_revoke for next transaction 510 * we do not want to suspend any processing until all revokes are 511 * written -bzzz 512 */ 513void journal_switch_revoke_table(journal_t *journal) 514{ 515 int i; 516 517 if (journal->j_revoke == journal->j_revoke_table[0]) 518 journal->j_revoke = journal->j_revoke_table[1]; 519 else 520 journal->j_revoke = journal->j_revoke_table[0]; 521 522 for (i = 0; i < journal->j_revoke->hash_size; i++) 523 INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]); 524} 525 526/* 527 * Write revoke records to the journal for all entries in the current 528 * revoke hash, deleting the entries as we go. 529 */ 530void journal_write_revoke_records(journal_t *journal, 531 transaction_t *transaction, int write_op) 532{ 533 struct journal_head *descriptor; 534 struct jbd_revoke_record_s *record; 535 struct jbd_revoke_table_s *revoke; 536 struct list_head *hash_list; 537 int i, offset, count; 538 539 descriptor = NULL; 540 offset = 0; 541 count = 0; 542 543 /* select revoke table for committing transaction */ 544 revoke = journal->j_revoke == journal->j_revoke_table[0] ? 545 journal->j_revoke_table[1] : journal->j_revoke_table[0]; 546 547 for (i = 0; i < revoke->hash_size; i++) { 548 hash_list = &revoke->hash_table[i]; 549 550 while (!list_empty(hash_list)) { 551 record = (struct jbd_revoke_record_s *) 552 hash_list->next; 553 write_one_revoke_record(journal, transaction, 554 &descriptor, &offset, 555 record, write_op); 556 count++; 557 list_del(&record->hash); 558 kmem_cache_free(revoke_record_cache, record); 559 } 560 } 561 if (descriptor) 562 flush_descriptor(journal, descriptor, offset, write_op); 563 jbd_debug(1, "Wrote %d revoke records\n", count); 564} 565 566/* 567 * Write out one revoke record. We need to create a new descriptor 568 * block if the old one is full or if we have not already created one. 569 */ 570 571static void write_one_revoke_record(journal_t *journal, 572 transaction_t *transaction, 573 struct journal_head **descriptorp, 574 int *offsetp, 575 struct jbd_revoke_record_s *record, 576 int write_op) 577{ 578 struct journal_head *descriptor; 579 int offset; 580 journal_header_t *header; 581 582 /* If we are already aborting, this all becomes a noop. We 583 still need to go round the loop in 584 journal_write_revoke_records in order to free all of the 585 revoke records: only the IO to the journal is omitted. */ 586 if (is_journal_aborted(journal)) 587 return; 588 589 descriptor = *descriptorp; 590 offset = *offsetp; 591 592 /* Make sure we have a descriptor with space left for the record */ 593 if (descriptor) { 594 if (offset == journal->j_blocksize) { 595 flush_descriptor(journal, descriptor, offset, write_op); 596 descriptor = NULL; 597 } 598 } 599 600 if (!descriptor) { 601 descriptor = journal_get_descriptor_buffer(journal); 602 if (!descriptor) 603 return; 604 header = (journal_header_t *) &jh2bh(descriptor)->b_data[0]; 605 header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER); 606 header->h_blocktype = cpu_to_be32(JFS_REVOKE_BLOCK); 607 header->h_sequence = cpu_to_be32(transaction->t_tid); 608 609 /* Record it so that we can wait for IO completion later */ 610 JBUFFER_TRACE(descriptor, "file as BJ_LogCtl"); 611 journal_file_buffer(descriptor, transaction, BJ_LogCtl); 612 613 offset = sizeof(journal_revoke_header_t); 614 *descriptorp = descriptor; 615 } 616 617 * ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) = 618 cpu_to_be32(record->blocknr); 619 offset += 4; 620 *offsetp = offset; 621} 622 623/* 624 * Flush a revoke descriptor out to the journal. If we are aborting, 625 * this is a noop; otherwise we are generating a buffer which needs to 626 * be waited for during commit, so it has to go onto the appropriate 627 * journal buffer list. 628 */ 629 630static void flush_descriptor(journal_t *journal, 631 struct journal_head *descriptor, 632 int offset, int write_op) 633{ 634 journal_revoke_header_t *header; 635 struct buffer_head *bh = jh2bh(descriptor); 636 637 if (is_journal_aborted(journal)) { 638 put_bh(bh); 639 return; 640 } 641 642 header = (journal_revoke_header_t *) jh2bh(descriptor)->b_data; 643 header->r_count = cpu_to_be32(offset); 644 set_buffer_jwrite(bh); 645 BUFFER_TRACE(bh, "write"); 646 set_buffer_dirty(bh); 647 write_dirty_buffer(bh, write_op); 648} 649#endif 650 651/* 652 * Revoke support for recovery. 653 * 654 * Recovery needs to be able to: 655 * 656 * record all revoke records, including the tid of the latest instance 657 * of each revoke in the journal 658 * 659 * check whether a given block in a given transaction should be replayed 660 * (ie. has not been revoked by a revoke record in that or a subsequent 661 * transaction) 662 * 663 * empty the revoke table after recovery. 664 */ 665 666/* 667 * First, setting revoke records. We create a new revoke record for 668 * every block ever revoked in the log as we scan it for recovery, and 669 * we update the existing records if we find multiple revokes for a 670 * single block. 671 */ 672 673int journal_set_revoke(journal_t *journal, 674 unsigned int blocknr, 675 tid_t sequence) 676{ 677 struct jbd_revoke_record_s *record; 678 679 record = find_revoke_record(journal, blocknr); 680 if (record) { 681 /* If we have multiple occurrences, only record the 682 * latest sequence number in the hashed record */ 683 if (tid_gt(sequence, record->sequence)) 684 record->sequence = sequence; 685 return 0; 686 } 687 return insert_revoke_hash(journal, blocknr, sequence); 688} 689 690/* 691 * Test revoke records. For a given block referenced in the log, has 692 * that block been revoked? A revoke record with a given transaction 693 * sequence number revokes all blocks in that transaction and earlier 694 * ones, but later transactions still need replayed. 695 */ 696 697int journal_test_revoke(journal_t *journal, 698 unsigned int blocknr, 699 tid_t sequence) 700{ 701 struct jbd_revoke_record_s *record; 702 703 record = find_revoke_record(journal, blocknr); 704 if (!record) 705 return 0; 706 if (tid_gt(sequence, record->sequence)) 707 return 0; 708 return 1; 709} 710 711/* 712 * Finally, once recovery is over, we need to clear the revoke table so 713 * that it can be reused by the running filesystem. 714 */ 715 716void journal_clear_revoke(journal_t *journal) 717{ 718 int i; 719 struct list_head *hash_list; 720 struct jbd_revoke_record_s *record; 721 struct jbd_revoke_table_s *revoke; 722 723 revoke = journal->j_revoke; 724 725 for (i = 0; i < revoke->hash_size; i++) { 726 hash_list = &revoke->hash_table[i]; 727 while (!list_empty(hash_list)) { 728 record = (struct jbd_revoke_record_s*) hash_list->next; 729 list_del(&record->hash); 730 kmem_cache_free(revoke_record_cache, record); 731 } 732 } 733} 734