root/fs/xfs/xfs_trans_buf.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xfs_trans_buf_item_match
  2. _xfs_trans_bjoin
  3. xfs_trans_bjoin
  4. xfs_trans_get_buf_map
  5. xfs_trans_getsb
  6. xfs_trans_read_buf_map
  7. xfs_trans_buf_is_dirty
  8. xfs_trans_brelse
  9. xfs_trans_bhold
  10. xfs_trans_bhold_release
  11. xfs_trans_dirty_buf
  12. xfs_trans_log_buf
  13. xfs_trans_binval
  14. xfs_trans_inode_buf
  15. xfs_trans_stale_inode_buf
  16. xfs_trans_inode_alloc_buf
  17. xfs_trans_ordered_buf
  18. xfs_trans_buf_set_type
  19. xfs_trans_buf_copy_type
  20. xfs_trans_dquot_buf

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
   4  * All Rights Reserved.
   5  */
   6 #include "xfs.h"
   7 #include "xfs_fs.h"
   8 #include "xfs_shared.h"
   9 #include "xfs_format.h"
  10 #include "xfs_log_format.h"
  11 #include "xfs_trans_resv.h"
  12 #include "xfs_mount.h"
  13 #include "xfs_trans.h"
  14 #include "xfs_buf_item.h"
  15 #include "xfs_trans_priv.h"
  16 #include "xfs_trace.h"
  17 
  18 /*
  19  * Check to see if a buffer matching the given parameters is already
  20  * a part of the given transaction.
  21  */
  22 STATIC struct xfs_buf *
  23 xfs_trans_buf_item_match(
  24         struct xfs_trans        *tp,
  25         struct xfs_buftarg      *target,
  26         struct xfs_buf_map      *map,
  27         int                     nmaps)
  28 {
  29         struct xfs_log_item     *lip;
  30         struct xfs_buf_log_item *blip;
  31         int                     len = 0;
  32         int                     i;
  33 
  34         for (i = 0; i < nmaps; i++)
  35                 len += map[i].bm_len;
  36 
  37         list_for_each_entry(lip, &tp->t_items, li_trans) {
  38                 blip = (struct xfs_buf_log_item *)lip;
  39                 if (blip->bli_item.li_type == XFS_LI_BUF &&
  40                     blip->bli_buf->b_target == target &&
  41                     XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn &&
  42                     blip->bli_buf->b_length == len) {
  43                         ASSERT(blip->bli_buf->b_map_count == nmaps);
  44                         return blip->bli_buf;
  45                 }
  46         }
  47 
  48         return NULL;
  49 }
  50 
  51 /*
  52  * Add the locked buffer to the transaction.
  53  *
  54  * The buffer must be locked, and it cannot be associated with any
  55  * transaction.
  56  *
  57  * If the buffer does not yet have a buf log item associated with it,
  58  * then allocate one for it.  Then add the buf item to the transaction.
  59  */
  60 STATIC void
  61 _xfs_trans_bjoin(
  62         struct xfs_trans        *tp,
  63         struct xfs_buf          *bp,
  64         int                     reset_recur)
  65 {
  66         struct xfs_buf_log_item *bip;
  67 
  68         ASSERT(bp->b_transp == NULL);
  69 
  70         /*
  71          * The xfs_buf_log_item pointer is stored in b_log_item.  If
  72          * it doesn't have one yet, then allocate one and initialize it.
  73          * The checks to see if one is there are in xfs_buf_item_init().
  74          */
  75         xfs_buf_item_init(bp, tp->t_mountp);
  76         bip = bp->b_log_item;
  77         ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
  78         ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
  79         ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
  80         if (reset_recur)
  81                 bip->bli_recur = 0;
  82 
  83         /*
  84          * Take a reference for this transaction on the buf item.
  85          */
  86         atomic_inc(&bip->bli_refcount);
  87 
  88         /*
  89          * Attach the item to the transaction so we can find it in
  90          * xfs_trans_get_buf() and friends.
  91          */
  92         xfs_trans_add_item(tp, &bip->bli_item);
  93         bp->b_transp = tp;
  94 
  95 }
  96 
  97 void
  98 xfs_trans_bjoin(
  99         struct xfs_trans        *tp,
 100         struct xfs_buf          *bp)
 101 {
 102         _xfs_trans_bjoin(tp, bp, 0);
 103         trace_xfs_trans_bjoin(bp->b_log_item);
 104 }
 105 
 106 /*
 107  * Get and lock the buffer for the caller if it is not already
 108  * locked within the given transaction.  If it is already locked
 109  * within the transaction, just increment its lock recursion count
 110  * and return a pointer to it.
 111  *
 112  * If the transaction pointer is NULL, make this just a normal
 113  * get_buf() call.
 114  */
 115 struct xfs_buf *
 116 xfs_trans_get_buf_map(
 117         struct xfs_trans        *tp,
 118         struct xfs_buftarg      *target,
 119         struct xfs_buf_map      *map,
 120         int                     nmaps,
 121         xfs_buf_flags_t         flags)
 122 {
 123         xfs_buf_t               *bp;
 124         struct xfs_buf_log_item *bip;
 125 
 126         if (!tp)
 127                 return xfs_buf_get_map(target, map, nmaps, flags);
 128 
 129         /*
 130          * If we find the buffer in the cache with this transaction
 131          * pointer in its b_fsprivate2 field, then we know we already
 132          * have it locked.  In this case we just increment the lock
 133          * recursion count and return the buffer to the caller.
 134          */
 135         bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
 136         if (bp != NULL) {
 137                 ASSERT(xfs_buf_islocked(bp));
 138                 if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
 139                         xfs_buf_stale(bp);
 140                         bp->b_flags |= XBF_DONE;
 141                 }
 142 
 143                 ASSERT(bp->b_transp == tp);
 144                 bip = bp->b_log_item;
 145                 ASSERT(bip != NULL);
 146                 ASSERT(atomic_read(&bip->bli_refcount) > 0);
 147                 bip->bli_recur++;
 148                 trace_xfs_trans_get_buf_recur(bip);
 149                 return bp;
 150         }
 151 
 152         bp = xfs_buf_get_map(target, map, nmaps, flags);
 153         if (bp == NULL) {
 154                 return NULL;
 155         }
 156 
 157         ASSERT(!bp->b_error);
 158 
 159         _xfs_trans_bjoin(tp, bp, 1);
 160         trace_xfs_trans_get_buf(bp->b_log_item);
 161         return bp;
 162 }
 163 
 164 /*
 165  * Get and lock the superblock buffer of this file system for the
 166  * given transaction.
 167  *
 168  * We don't need to use incore_match() here, because the superblock
 169  * buffer is a private buffer which we keep a pointer to in the
 170  * mount structure.
 171  */
 172 xfs_buf_t *
 173 xfs_trans_getsb(
 174         xfs_trans_t             *tp,
 175         struct xfs_mount        *mp)
 176 {
 177         xfs_buf_t               *bp;
 178         struct xfs_buf_log_item *bip;
 179 
 180         /*
 181          * Default to just trying to lock the superblock buffer
 182          * if tp is NULL.
 183          */
 184         if (tp == NULL)
 185                 return xfs_getsb(mp);
 186 
 187         /*
 188          * If the superblock buffer already has this transaction
 189          * pointer in its b_fsprivate2 field, then we know we already
 190          * have it locked.  In this case we just increment the lock
 191          * recursion count and return the buffer to the caller.
 192          */
 193         bp = mp->m_sb_bp;
 194         if (bp->b_transp == tp) {
 195                 bip = bp->b_log_item;
 196                 ASSERT(bip != NULL);
 197                 ASSERT(atomic_read(&bip->bli_refcount) > 0);
 198                 bip->bli_recur++;
 199                 trace_xfs_trans_getsb_recur(bip);
 200                 return bp;
 201         }
 202 
 203         bp = xfs_getsb(mp);
 204         if (bp == NULL)
 205                 return NULL;
 206 
 207         _xfs_trans_bjoin(tp, bp, 1);
 208         trace_xfs_trans_getsb(bp->b_log_item);
 209         return bp;
 210 }
 211 
 212 /*
 213  * Get and lock the buffer for the caller if it is not already
 214  * locked within the given transaction.  If it has not yet been
 215  * read in, read it from disk. If it is already locked
 216  * within the transaction and already read in, just increment its
 217  * lock recursion count and return a pointer to it.
 218  *
 219  * If the transaction pointer is NULL, make this just a normal
 220  * read_buf() call.
 221  */
 222 int
 223 xfs_trans_read_buf_map(
 224         struct xfs_mount        *mp,
 225         struct xfs_trans        *tp,
 226         struct xfs_buftarg      *target,
 227         struct xfs_buf_map      *map,
 228         int                     nmaps,
 229         xfs_buf_flags_t         flags,
 230         struct xfs_buf          **bpp,
 231         const struct xfs_buf_ops *ops)
 232 {
 233         struct xfs_buf          *bp = NULL;
 234         struct xfs_buf_log_item *bip;
 235         int                     error;
 236 
 237         *bpp = NULL;
 238         /*
 239          * If we find the buffer in the cache with this transaction
 240          * pointer in its b_fsprivate2 field, then we know we already
 241          * have it locked.  If it is already read in we just increment
 242          * the lock recursion count and return the buffer to the caller.
 243          * If the buffer is not yet read in, then we read it in, increment
 244          * the lock recursion count, and return it to the caller.
 245          */
 246         if (tp)
 247                 bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
 248         if (bp) {
 249                 ASSERT(xfs_buf_islocked(bp));
 250                 ASSERT(bp->b_transp == tp);
 251                 ASSERT(bp->b_log_item != NULL);
 252                 ASSERT(!bp->b_error);
 253                 ASSERT(bp->b_flags & XBF_DONE);
 254 
 255                 /*
 256                  * We never locked this buf ourselves, so we shouldn't
 257                  * brelse it either. Just get out.
 258                  */
 259                 if (XFS_FORCED_SHUTDOWN(mp)) {
 260                         trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
 261                         return -EIO;
 262                 }
 263 
 264                 /*
 265                  * Check if the caller is trying to read a buffer that is
 266                  * already attached to the transaction yet has no buffer ops
 267                  * assigned.  Ops are usually attached when the buffer is
 268                  * attached to the transaction, or by the read caller if
 269                  * special circumstances.  That didn't happen, which is not
 270                  * how this is supposed to go.
 271                  *
 272                  * If the buffer passes verification we'll let this go, but if
 273                  * not we have to shut down.  Let the transaction cleanup code
 274                  * release this buffer when it kills the tranaction.
 275                  */
 276                 ASSERT(bp->b_ops != NULL);
 277                 error = xfs_buf_reverify(bp, ops);
 278                 if (error) {
 279                         xfs_buf_ioerror_alert(bp, __func__);
 280 
 281                         if (tp->t_flags & XFS_TRANS_DIRTY)
 282                                 xfs_force_shutdown(tp->t_mountp,
 283                                                 SHUTDOWN_META_IO_ERROR);
 284 
 285                         /* bad CRC means corrupted metadata */
 286                         if (error == -EFSBADCRC)
 287                                 error = -EFSCORRUPTED;
 288                         return error;
 289                 }
 290 
 291                 bip = bp->b_log_item;
 292                 bip->bli_recur++;
 293 
 294                 ASSERT(atomic_read(&bip->bli_refcount) > 0);
 295                 trace_xfs_trans_read_buf_recur(bip);
 296                 ASSERT(bp->b_ops != NULL || ops == NULL);
 297                 *bpp = bp;
 298                 return 0;
 299         }
 300 
 301         bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
 302         if (!bp) {
 303                 if (!(flags & XBF_TRYLOCK))
 304                         return -ENOMEM;
 305                 return tp ? 0 : -EAGAIN;
 306         }
 307 
 308         /*
 309          * If we've had a read error, then the contents of the buffer are
 310          * invalid and should not be used. To ensure that a followup read tries
 311          * to pull the buffer from disk again, we clear the XBF_DONE flag and
 312          * mark the buffer stale. This ensures that anyone who has a current
 313          * reference to the buffer will interpret it's contents correctly and
 314          * future cache lookups will also treat it as an empty, uninitialised
 315          * buffer.
 316          */
 317         if (bp->b_error) {
 318                 error = bp->b_error;
 319                 if (!XFS_FORCED_SHUTDOWN(mp))
 320                         xfs_buf_ioerror_alert(bp, __func__);
 321                 bp->b_flags &= ~XBF_DONE;
 322                 xfs_buf_stale(bp);
 323 
 324                 if (tp && (tp->t_flags & XFS_TRANS_DIRTY))
 325                         xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
 326                 xfs_buf_relse(bp);
 327 
 328                 /* bad CRC means corrupted metadata */
 329                 if (error == -EFSBADCRC)
 330                         error = -EFSCORRUPTED;
 331                 return error;
 332         }
 333 
 334         if (XFS_FORCED_SHUTDOWN(mp)) {
 335                 xfs_buf_relse(bp);
 336                 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
 337                 return -EIO;
 338         }
 339 
 340         if (tp) {
 341                 _xfs_trans_bjoin(tp, bp, 1);
 342                 trace_xfs_trans_read_buf(bp->b_log_item);
 343         }
 344         ASSERT(bp->b_ops != NULL || ops == NULL);
 345         *bpp = bp;
 346         return 0;
 347 
 348 }
 349 
 350 /* Has this buffer been dirtied by anyone? */
 351 bool
 352 xfs_trans_buf_is_dirty(
 353         struct xfs_buf          *bp)
 354 {
 355         struct xfs_buf_log_item *bip = bp->b_log_item;
 356 
 357         if (!bip)
 358                 return false;
 359         ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
 360         return test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
 361 }
 362 
 363 /*
 364  * Release a buffer previously joined to the transaction. If the buffer is
 365  * modified within this transaction, decrement the recursion count but do not
 366  * release the buffer even if the count goes to 0. If the buffer is not modified
 367  * within the transaction, decrement the recursion count and release the buffer
 368  * if the recursion count goes to 0.
 369  *
 370  * If the buffer is to be released and it was not already dirty before this
 371  * transaction began, then also free the buf_log_item associated with it.
 372  *
 373  * If the transaction pointer is NULL, this is a normal xfs_buf_relse() call.
 374  */
 375 void
 376 xfs_trans_brelse(
 377         struct xfs_trans        *tp,
 378         struct xfs_buf          *bp)
 379 {
 380         struct xfs_buf_log_item *bip = bp->b_log_item;
 381 
 382         ASSERT(bp->b_transp == tp);
 383 
 384         if (!tp) {
 385                 xfs_buf_relse(bp);
 386                 return;
 387         }
 388 
 389         trace_xfs_trans_brelse(bip);
 390         ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
 391         ASSERT(atomic_read(&bip->bli_refcount) > 0);
 392 
 393         /*
 394          * If the release is for a recursive lookup, then decrement the count
 395          * and return.
 396          */
 397         if (bip->bli_recur > 0) {
 398                 bip->bli_recur--;
 399                 return;
 400         }
 401 
 402         /*
 403          * If the buffer is invalidated or dirty in this transaction, we can't
 404          * release it until we commit.
 405          */
 406         if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags))
 407                 return;
 408         if (bip->bli_flags & XFS_BLI_STALE)
 409                 return;
 410 
 411         /*
 412          * Unlink the log item from the transaction and clear the hold flag, if
 413          * set. We wouldn't want the next user of the buffer to get confused.
 414          */
 415         ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
 416         xfs_trans_del_item(&bip->bli_item);
 417         bip->bli_flags &= ~XFS_BLI_HOLD;
 418 
 419         /* drop the reference to the bli */
 420         xfs_buf_item_put(bip);
 421 
 422         bp->b_transp = NULL;
 423         xfs_buf_relse(bp);
 424 }
 425 
 426 /*
 427  * Mark the buffer as not needing to be unlocked when the buf item's
 428  * iop_committing() routine is called.  The buffer must already be locked
 429  * and associated with the given transaction.
 430  */
 431 /* ARGSUSED */
 432 void
 433 xfs_trans_bhold(
 434         xfs_trans_t             *tp,
 435         xfs_buf_t               *bp)
 436 {
 437         struct xfs_buf_log_item *bip = bp->b_log_item;
 438 
 439         ASSERT(bp->b_transp == tp);
 440         ASSERT(bip != NULL);
 441         ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
 442         ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
 443         ASSERT(atomic_read(&bip->bli_refcount) > 0);
 444 
 445         bip->bli_flags |= XFS_BLI_HOLD;
 446         trace_xfs_trans_bhold(bip);
 447 }
 448 
 449 /*
 450  * Cancel the previous buffer hold request made on this buffer
 451  * for this transaction.
 452  */
 453 void
 454 xfs_trans_bhold_release(
 455         xfs_trans_t             *tp,
 456         xfs_buf_t               *bp)
 457 {
 458         struct xfs_buf_log_item *bip = bp->b_log_item;
 459 
 460         ASSERT(bp->b_transp == tp);
 461         ASSERT(bip != NULL);
 462         ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
 463         ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
 464         ASSERT(atomic_read(&bip->bli_refcount) > 0);
 465         ASSERT(bip->bli_flags & XFS_BLI_HOLD);
 466 
 467         bip->bli_flags &= ~XFS_BLI_HOLD;
 468         trace_xfs_trans_bhold_release(bip);
 469 }
 470 
 471 /*
 472  * Mark a buffer dirty in the transaction.
 473  */
 474 void
 475 xfs_trans_dirty_buf(
 476         struct xfs_trans        *tp,
 477         struct xfs_buf          *bp)
 478 {
 479         struct xfs_buf_log_item *bip = bp->b_log_item;
 480 
 481         ASSERT(bp->b_transp == tp);
 482         ASSERT(bip != NULL);
 483         ASSERT(bp->b_iodone == NULL ||
 484                bp->b_iodone == xfs_buf_iodone_callbacks);
 485 
 486         /*
 487          * Mark the buffer as needing to be written out eventually,
 488          * and set its iodone function to remove the buffer's buf log
 489          * item from the AIL and free it when the buffer is flushed
 490          * to disk.  See xfs_buf_attach_iodone() for more details
 491          * on li_cb and xfs_buf_iodone_callbacks().
 492          * If we end up aborting this transaction, we trap this buffer
 493          * inside the b_bdstrat callback so that this won't get written to
 494          * disk.
 495          */
 496         bp->b_flags |= XBF_DONE;
 497 
 498         ASSERT(atomic_read(&bip->bli_refcount) > 0);
 499         bp->b_iodone = xfs_buf_iodone_callbacks;
 500         bip->bli_item.li_cb = xfs_buf_iodone;
 501 
 502         /*
 503          * If we invalidated the buffer within this transaction, then
 504          * cancel the invalidation now that we're dirtying the buffer
 505          * again.  There are no races with the code in xfs_buf_item_unpin(),
 506          * because we have a reference to the buffer this entire time.
 507          */
 508         if (bip->bli_flags & XFS_BLI_STALE) {
 509                 bip->bli_flags &= ~XFS_BLI_STALE;
 510                 ASSERT(bp->b_flags & XBF_STALE);
 511                 bp->b_flags &= ~XBF_STALE;
 512                 bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
 513         }
 514         bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED;
 515 
 516         tp->t_flags |= XFS_TRANS_DIRTY;
 517         set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
 518 }
 519 
 520 /*
 521  * This is called to mark bytes first through last inclusive of the given
 522  * buffer as needing to be logged when the transaction is committed.
 523  * The buffer must already be associated with the given transaction.
 524  *
 525  * First and last are numbers relative to the beginning of this buffer,
 526  * so the first byte in the buffer is numbered 0 regardless of the
 527  * value of b_blkno.
 528  */
 529 void
 530 xfs_trans_log_buf(
 531         struct xfs_trans        *tp,
 532         struct xfs_buf          *bp,
 533         uint                    first,
 534         uint                    last)
 535 {
 536         struct xfs_buf_log_item *bip = bp->b_log_item;
 537 
 538         ASSERT(first <= last && last < BBTOB(bp->b_length));
 539         ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED));
 540 
 541         xfs_trans_dirty_buf(tp, bp);
 542 
 543         trace_xfs_trans_log_buf(bip);
 544         xfs_buf_item_log(bip, first, last);
 545 }
 546 
 547 
 548 /*
 549  * Invalidate a buffer that is being used within a transaction.
 550  *
 551  * Typically this is because the blocks in the buffer are being freed, so we
 552  * need to prevent it from being written out when we're done.  Allowing it
 553  * to be written again might overwrite data in the free blocks if they are
 554  * reallocated to a file.
 555  *
 556  * We prevent the buffer from being written out by marking it stale.  We can't
 557  * get rid of the buf log item at this point because the buffer may still be
 558  * pinned by another transaction.  If that is the case, then we'll wait until
 559  * the buffer is committed to disk for the last time (we can tell by the ref
 560  * count) and free it in xfs_buf_item_unpin().  Until that happens we will
 561  * keep the buffer locked so that the buffer and buf log item are not reused.
 562  *
 563  * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log
 564  * the buf item.  This will be used at recovery time to determine that copies
 565  * of the buffer in the log before this should not be replayed.
 566  *
 567  * We mark the item descriptor and the transaction dirty so that we'll hold
 568  * the buffer until after the commit.
 569  *
 570  * Since we're invalidating the buffer, we also clear the state about which
 571  * parts of the buffer have been logged.  We also clear the flag indicating
 572  * that this is an inode buffer since the data in the buffer will no longer
 573  * be valid.
 574  *
 575  * We set the stale bit in the buffer as well since we're getting rid of it.
 576  */
 577 void
 578 xfs_trans_binval(
 579         xfs_trans_t             *tp,
 580         xfs_buf_t               *bp)
 581 {
 582         struct xfs_buf_log_item *bip = bp->b_log_item;
 583         int                     i;
 584 
 585         ASSERT(bp->b_transp == tp);
 586         ASSERT(bip != NULL);
 587         ASSERT(atomic_read(&bip->bli_refcount) > 0);
 588 
 589         trace_xfs_trans_binval(bip);
 590 
 591         if (bip->bli_flags & XFS_BLI_STALE) {
 592                 /*
 593                  * If the buffer is already invalidated, then
 594                  * just return.
 595                  */
 596                 ASSERT(bp->b_flags & XBF_STALE);
 597                 ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
 598                 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF));
 599                 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLFT_MASK));
 600                 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
 601                 ASSERT(test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags));
 602                 ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
 603                 return;
 604         }
 605 
 606         xfs_buf_stale(bp);
 607 
 608         bip->bli_flags |= XFS_BLI_STALE;
 609         bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
 610         bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
 611         bip->__bli_format.blf_flags |= XFS_BLF_CANCEL;
 612         bip->__bli_format.blf_flags &= ~XFS_BLFT_MASK;
 613         for (i = 0; i < bip->bli_format_count; i++) {
 614                 memset(bip->bli_formats[i].blf_data_map, 0,
 615                        (bip->bli_formats[i].blf_map_size * sizeof(uint)));
 616         }
 617         set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
 618         tp->t_flags |= XFS_TRANS_DIRTY;
 619 }
 620 
 621 /*
 622  * This call is used to indicate that the buffer contains on-disk inodes which
 623  * must be handled specially during recovery.  They require special handling
 624  * because only the di_next_unlinked from the inodes in the buffer should be
 625  * recovered.  The rest of the data in the buffer is logged via the inodes
 626  * themselves.
 627  *
 628  * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be
 629  * transferred to the buffer's log format structure so that we'll know what to
 630  * do at recovery time.
 631  */
 632 void
 633 xfs_trans_inode_buf(
 634         xfs_trans_t             *tp,
 635         xfs_buf_t               *bp)
 636 {
 637         struct xfs_buf_log_item *bip = bp->b_log_item;
 638 
 639         ASSERT(bp->b_transp == tp);
 640         ASSERT(bip != NULL);
 641         ASSERT(atomic_read(&bip->bli_refcount) > 0);
 642 
 643         bip->bli_flags |= XFS_BLI_INODE_BUF;
 644         xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
 645 }
 646 
 647 /*
 648  * This call is used to indicate that the buffer is going to
 649  * be staled and was an inode buffer. This means it gets
 650  * special processing during unpin - where any inodes
 651  * associated with the buffer should be removed from ail.
 652  * There is also special processing during recovery,
 653  * any replay of the inodes in the buffer needs to be
 654  * prevented as the buffer may have been reused.
 655  */
 656 void
 657 xfs_trans_stale_inode_buf(
 658         xfs_trans_t             *tp,
 659         xfs_buf_t               *bp)
 660 {
 661         struct xfs_buf_log_item *bip = bp->b_log_item;
 662 
 663         ASSERT(bp->b_transp == tp);
 664         ASSERT(bip != NULL);
 665         ASSERT(atomic_read(&bip->bli_refcount) > 0);
 666 
 667         bip->bli_flags |= XFS_BLI_STALE_INODE;
 668         bip->bli_item.li_cb = xfs_buf_iodone;
 669         xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
 670 }
 671 
 672 /*
 673  * Mark the buffer as being one which contains newly allocated
 674  * inodes.  We need to make sure that even if this buffer is
 675  * relogged as an 'inode buf' we still recover all of the inode
 676  * images in the face of a crash.  This works in coordination with
 677  * xfs_buf_item_committed() to ensure that the buffer remains in the
 678  * AIL at its original location even after it has been relogged.
 679  */
 680 /* ARGSUSED */
 681 void
 682 xfs_trans_inode_alloc_buf(
 683         xfs_trans_t             *tp,
 684         xfs_buf_t               *bp)
 685 {
 686         struct xfs_buf_log_item *bip = bp->b_log_item;
 687 
 688         ASSERT(bp->b_transp == tp);
 689         ASSERT(bip != NULL);
 690         ASSERT(atomic_read(&bip->bli_refcount) > 0);
 691 
 692         bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
 693         xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
 694 }
 695 
 696 /*
 697  * Mark the buffer as ordered for this transaction. This means that the contents
 698  * of the buffer are not recorded in the transaction but it is tracked in the
 699  * AIL as though it was. This allows us to record logical changes in
 700  * transactions rather than the physical changes we make to the buffer without
 701  * changing writeback ordering constraints of metadata buffers.
 702  */
 703 bool
 704 xfs_trans_ordered_buf(
 705         struct xfs_trans        *tp,
 706         struct xfs_buf          *bp)
 707 {
 708         struct xfs_buf_log_item *bip = bp->b_log_item;
 709 
 710         ASSERT(bp->b_transp == tp);
 711         ASSERT(bip != NULL);
 712         ASSERT(atomic_read(&bip->bli_refcount) > 0);
 713 
 714         if (xfs_buf_item_dirty_format(bip))
 715                 return false;
 716 
 717         bip->bli_flags |= XFS_BLI_ORDERED;
 718         trace_xfs_buf_item_ordered(bip);
 719 
 720         /*
 721          * We don't log a dirty range of an ordered buffer but it still needs
 722          * to be marked dirty and that it has been logged.
 723          */
 724         xfs_trans_dirty_buf(tp, bp);
 725         return true;
 726 }
 727 
 728 /*
 729  * Set the type of the buffer for log recovery so that it can correctly identify
 730  * and hence attach the correct buffer ops to the buffer after replay.
 731  */
 732 void
 733 xfs_trans_buf_set_type(
 734         struct xfs_trans        *tp,
 735         struct xfs_buf          *bp,
 736         enum xfs_blft           type)
 737 {
 738         struct xfs_buf_log_item *bip = bp->b_log_item;
 739 
 740         if (!tp)
 741                 return;
 742 
 743         ASSERT(bp->b_transp == tp);
 744         ASSERT(bip != NULL);
 745         ASSERT(atomic_read(&bip->bli_refcount) > 0);
 746 
 747         xfs_blft_to_flags(&bip->__bli_format, type);
 748 }
 749 
 750 void
 751 xfs_trans_buf_copy_type(
 752         struct xfs_buf          *dst_bp,
 753         struct xfs_buf          *src_bp)
 754 {
 755         struct xfs_buf_log_item *sbip = src_bp->b_log_item;
 756         struct xfs_buf_log_item *dbip = dst_bp->b_log_item;
 757         enum xfs_blft           type;
 758 
 759         type = xfs_blft_from_flags(&sbip->__bli_format);
 760         xfs_blft_to_flags(&dbip->__bli_format, type);
 761 }
 762 
 763 /*
 764  * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
 765  * dquots. However, unlike in inode buffer recovery, dquot buffers get
 766  * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
 767  * The only thing that makes dquot buffers different from regular
 768  * buffers is that we must not replay dquot bufs when recovering
 769  * if a _corresponding_ quotaoff has happened. We also have to distinguish
 770  * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
 771  * can be turned off independently.
 772  */
 773 /* ARGSUSED */
 774 void
 775 xfs_trans_dquot_buf(
 776         xfs_trans_t             *tp,
 777         xfs_buf_t               *bp,
 778         uint                    type)
 779 {
 780         struct xfs_buf_log_item *bip = bp->b_log_item;
 781 
 782         ASSERT(type == XFS_BLF_UDQUOT_BUF ||
 783                type == XFS_BLF_PDQUOT_BUF ||
 784                type == XFS_BLF_GDQUOT_BUF);
 785 
 786         bip->__bli_format.blf_flags |= type;
 787 
 788         switch (type) {
 789         case XFS_BLF_UDQUOT_BUF:
 790                 type = XFS_BLFT_UDQUOT_BUF;
 791                 break;
 792         case XFS_BLF_PDQUOT_BUF:
 793                 type = XFS_BLFT_PDQUOT_BUF;
 794                 break;
 795         case XFS_BLF_GDQUOT_BUF:
 796                 type = XFS_BLFT_GDQUOT_BUF;
 797                 break;
 798         default:
 799                 type = XFS_BLFT_UNKNOWN_BUF;
 800                 break;
 801         }
 802 
 803         xfs_trans_buf_set_type(tp, bp, type);
 804 }

/* [<][>][^][v][top][bottom][index][help] */