root/fs/xfs/xfs_trans_ail.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xfs_ail_check
  2. xfs_ail_max
  3. xfs_ail_next
  4. xfs_ail_min_lsn
  5. xfs_ail_max_lsn
  6. xfs_trans_ail_cursor_init
  7. xfs_trans_ail_cursor_next
  8. xfs_trans_ail_cursor_done
  9. xfs_trans_ail_cursor_clear
  10. xfs_trans_ail_cursor_first
  11. __xfs_trans_ail_cursor_last
  12. xfs_trans_ail_cursor_last
  13. xfs_ail_splice
  14. xfs_ail_delete
  15. xfsaild_push_item
  16. xfsaild_push
  17. xfsaild
  18. xfs_ail_push
  19. xfs_ail_push_all
  20. xfs_ail_push_all_sync
  21. xfs_trans_ail_update_bulk
  22. xfs_ail_delete_one
  23. xfs_trans_ail_delete
  24. xfs_trans_ail_init
  25. xfs_trans_ail_destroy

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
   4  * Copyright (c) 2008 Dave Chinner
   5  * All Rights Reserved.
   6  */
   7 #include "xfs.h"
   8 #include "xfs_fs.h"
   9 #include "xfs_shared.h"
  10 #include "xfs_format.h"
  11 #include "xfs_log_format.h"
  12 #include "xfs_trans_resv.h"
  13 #include "xfs_mount.h"
  14 #include "xfs_trans.h"
  15 #include "xfs_trans_priv.h"
  16 #include "xfs_trace.h"
  17 #include "xfs_errortag.h"
  18 #include "xfs_error.h"
  19 #include "xfs_log.h"
  20 
  21 #ifdef DEBUG
  22 /*
  23  * Check that the list is sorted as it should be.
  24  *
  25  * Called with the ail lock held, but we don't want to assert fail with it
  26  * held otherwise we'll lock everything up and won't be able to debug the
  27  * cause. Hence we sample and check the state under the AIL lock and return if
  28  * everything is fine, otherwise we drop the lock and run the ASSERT checks.
  29  * Asserts may not be fatal, so pick the lock back up and continue onwards.
  30  */
  31 STATIC void
  32 xfs_ail_check(
  33         struct xfs_ail          *ailp,
  34         struct xfs_log_item     *lip)
  35 {
  36         struct xfs_log_item     *prev_lip;
  37         struct xfs_log_item     *next_lip;
  38         xfs_lsn_t               prev_lsn = NULLCOMMITLSN;
  39         xfs_lsn_t               next_lsn = NULLCOMMITLSN;
  40         xfs_lsn_t               lsn;
  41         bool                    in_ail;
  42 
  43 
  44         if (list_empty(&ailp->ail_head))
  45                 return;
  46 
  47         /*
  48          * Sample then check the next and previous entries are valid.
  49          */
  50         in_ail = test_bit(XFS_LI_IN_AIL, &lip->li_flags);
  51         prev_lip = list_entry(lip->li_ail.prev, struct xfs_log_item, li_ail);
  52         if (&prev_lip->li_ail != &ailp->ail_head)
  53                 prev_lsn = prev_lip->li_lsn;
  54         next_lip = list_entry(lip->li_ail.next, struct xfs_log_item, li_ail);
  55         if (&next_lip->li_ail != &ailp->ail_head)
  56                 next_lsn = next_lip->li_lsn;
  57         lsn = lip->li_lsn;
  58 
  59         if (in_ail &&
  60             (prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0) &&
  61             (next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0))
  62                 return;
  63 
  64         spin_unlock(&ailp->ail_lock);
  65         ASSERT(in_ail);
  66         ASSERT(prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0);
  67         ASSERT(next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0);
  68         spin_lock(&ailp->ail_lock);
  69 }
  70 #else /* !DEBUG */
  71 #define xfs_ail_check(a,l)
  72 #endif /* DEBUG */
  73 
  74 /*
  75  * Return a pointer to the last item in the AIL.  If the AIL is empty, then
  76  * return NULL.
  77  */
  78 static struct xfs_log_item *
  79 xfs_ail_max(
  80         struct xfs_ail  *ailp)
  81 {
  82         if (list_empty(&ailp->ail_head))
  83                 return NULL;
  84 
  85         return list_entry(ailp->ail_head.prev, struct xfs_log_item, li_ail);
  86 }
  87 
  88 /*
  89  * Return a pointer to the item which follows the given item in the AIL.  If
  90  * the given item is the last item in the list, then return NULL.
  91  */
  92 static struct xfs_log_item *
  93 xfs_ail_next(
  94         struct xfs_ail          *ailp,
  95         struct xfs_log_item     *lip)
  96 {
  97         if (lip->li_ail.next == &ailp->ail_head)
  98                 return NULL;
  99 
 100         return list_first_entry(&lip->li_ail, struct xfs_log_item, li_ail);
 101 }
 102 
 103 /*
 104  * This is called by the log manager code to determine the LSN of the tail of
 105  * the log.  This is exactly the LSN of the first item in the AIL.  If the AIL
 106  * is empty, then this function returns 0.
 107  *
 108  * We need the AIL lock in order to get a coherent read of the lsn of the last
 109  * item in the AIL.
 110  */
 111 xfs_lsn_t
 112 xfs_ail_min_lsn(
 113         struct xfs_ail          *ailp)
 114 {
 115         xfs_lsn_t               lsn = 0;
 116         struct xfs_log_item     *lip;
 117 
 118         spin_lock(&ailp->ail_lock);
 119         lip = xfs_ail_min(ailp);
 120         if (lip)
 121                 lsn = lip->li_lsn;
 122         spin_unlock(&ailp->ail_lock);
 123 
 124         return lsn;
 125 }
 126 
 127 /*
 128  * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
 129  */
 130 static xfs_lsn_t
 131 xfs_ail_max_lsn(
 132         struct xfs_ail          *ailp)
 133 {
 134         xfs_lsn_t               lsn = 0;
 135         struct xfs_log_item     *lip;
 136 
 137         spin_lock(&ailp->ail_lock);
 138         lip = xfs_ail_max(ailp);
 139         if (lip)
 140                 lsn = lip->li_lsn;
 141         spin_unlock(&ailp->ail_lock);
 142 
 143         return lsn;
 144 }
 145 
 146 /*
 147  * The cursor keeps track of where our current traversal is up to by tracking
 148  * the next item in the list for us. However, for this to be safe, removing an
 149  * object from the AIL needs to invalidate any cursor that points to it. hence
 150  * the traversal cursor needs to be linked to the struct xfs_ail so that
 151  * deletion can search all the active cursors for invalidation.
 152  */
 153 STATIC void
 154 xfs_trans_ail_cursor_init(
 155         struct xfs_ail          *ailp,
 156         struct xfs_ail_cursor   *cur)
 157 {
 158         cur->item = NULL;
 159         list_add_tail(&cur->list, &ailp->ail_cursors);
 160 }
 161 
 162 /*
 163  * Get the next item in the traversal and advance the cursor.  If the cursor
 164  * was invalidated (indicated by a lip of 1), restart the traversal.
 165  */
 166 struct xfs_log_item *
 167 xfs_trans_ail_cursor_next(
 168         struct xfs_ail          *ailp,
 169         struct xfs_ail_cursor   *cur)
 170 {
 171         struct xfs_log_item     *lip = cur->item;
 172 
 173         if ((uintptr_t)lip & 1)
 174                 lip = xfs_ail_min(ailp);
 175         if (lip)
 176                 cur->item = xfs_ail_next(ailp, lip);
 177         return lip;
 178 }
 179 
 180 /*
 181  * When the traversal is complete, we need to remove the cursor from the list
 182  * of traversing cursors.
 183  */
 184 void
 185 xfs_trans_ail_cursor_done(
 186         struct xfs_ail_cursor   *cur)
 187 {
 188         cur->item = NULL;
 189         list_del_init(&cur->list);
 190 }
 191 
 192 /*
 193  * Invalidate any cursor that is pointing to this item. This is called when an
 194  * item is removed from the AIL. Any cursor pointing to this object is now
 195  * invalid and the traversal needs to be terminated so it doesn't reference a
 196  * freed object. We set the low bit of the cursor item pointer so we can
 197  * distinguish between an invalidation and the end of the list when getting the
 198  * next item from the cursor.
 199  */
 200 STATIC void
 201 xfs_trans_ail_cursor_clear(
 202         struct xfs_ail          *ailp,
 203         struct xfs_log_item     *lip)
 204 {
 205         struct xfs_ail_cursor   *cur;
 206 
 207         list_for_each_entry(cur, &ailp->ail_cursors, list) {
 208                 if (cur->item == lip)
 209                         cur->item = (struct xfs_log_item *)
 210                                         ((uintptr_t)cur->item | 1);
 211         }
 212 }
 213 
 214 /*
 215  * Find the first item in the AIL with the given @lsn by searching in ascending
 216  * LSN order and initialise the cursor to point to the next item for a
 217  * ascending traversal.  Pass a @lsn of zero to initialise the cursor to the
 218  * first item in the AIL. Returns NULL if the list is empty.
 219  */
 220 struct xfs_log_item *
 221 xfs_trans_ail_cursor_first(
 222         struct xfs_ail          *ailp,
 223         struct xfs_ail_cursor   *cur,
 224         xfs_lsn_t               lsn)
 225 {
 226         struct xfs_log_item     *lip;
 227 
 228         xfs_trans_ail_cursor_init(ailp, cur);
 229 
 230         if (lsn == 0) {
 231                 lip = xfs_ail_min(ailp);
 232                 goto out;
 233         }
 234 
 235         list_for_each_entry(lip, &ailp->ail_head, li_ail) {
 236                 if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
 237                         goto out;
 238         }
 239         return NULL;
 240 
 241 out:
 242         if (lip)
 243                 cur->item = xfs_ail_next(ailp, lip);
 244         return lip;
 245 }
 246 
 247 static struct xfs_log_item *
 248 __xfs_trans_ail_cursor_last(
 249         struct xfs_ail          *ailp,
 250         xfs_lsn_t               lsn)
 251 {
 252         struct xfs_log_item     *lip;
 253 
 254         list_for_each_entry_reverse(lip, &ailp->ail_head, li_ail) {
 255                 if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
 256                         return lip;
 257         }
 258         return NULL;
 259 }
 260 
 261 /*
 262  * Find the last item in the AIL with the given @lsn by searching in descending
 263  * LSN order and initialise the cursor to point to that item.  If there is no
 264  * item with the value of @lsn, then it sets the cursor to the last item with an
 265  * LSN lower than @lsn.  Returns NULL if the list is empty.
 266  */
 267 struct xfs_log_item *
 268 xfs_trans_ail_cursor_last(
 269         struct xfs_ail          *ailp,
 270         struct xfs_ail_cursor   *cur,
 271         xfs_lsn_t               lsn)
 272 {
 273         xfs_trans_ail_cursor_init(ailp, cur);
 274         cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
 275         return cur->item;
 276 }
 277 
 278 /*
 279  * Splice the log item list into the AIL at the given LSN. We splice to the
 280  * tail of the given LSN to maintain insert order for push traversals. The
 281  * cursor is optional, allowing repeated updates to the same LSN to avoid
 282  * repeated traversals.  This should not be called with an empty list.
 283  */
 284 static void
 285 xfs_ail_splice(
 286         struct xfs_ail          *ailp,
 287         struct xfs_ail_cursor   *cur,
 288         struct list_head        *list,
 289         xfs_lsn_t               lsn)
 290 {
 291         struct xfs_log_item     *lip;
 292 
 293         ASSERT(!list_empty(list));
 294 
 295         /*
 296          * Use the cursor to determine the insertion point if one is
 297          * provided.  If not, or if the one we got is not valid,
 298          * find the place in the AIL where the items belong.
 299          */
 300         lip = cur ? cur->item : NULL;
 301         if (!lip || (uintptr_t)lip & 1)
 302                 lip = __xfs_trans_ail_cursor_last(ailp, lsn);
 303 
 304         /*
 305          * If a cursor is provided, we know we're processing the AIL
 306          * in lsn order, and future items to be spliced in will
 307          * follow the last one being inserted now.  Update the
 308          * cursor to point to that last item, now while we have a
 309          * reliable pointer to it.
 310          */
 311         if (cur)
 312                 cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
 313 
 314         /*
 315          * Finally perform the splice.  Unless the AIL was empty,
 316          * lip points to the item in the AIL _after_ which the new
 317          * items should go.  If lip is null the AIL was empty, so
 318          * the new items go at the head of the AIL.
 319          */
 320         if (lip)
 321                 list_splice(list, &lip->li_ail);
 322         else
 323                 list_splice(list, &ailp->ail_head);
 324 }
 325 
 326 /*
 327  * Delete the given item from the AIL.  Return a pointer to the item.
 328  */
 329 static void
 330 xfs_ail_delete(
 331         struct xfs_ail          *ailp,
 332         struct xfs_log_item     *lip)
 333 {
 334         xfs_ail_check(ailp, lip);
 335         list_del(&lip->li_ail);
 336         xfs_trans_ail_cursor_clear(ailp, lip);
 337 }
 338 
 339 static inline uint
 340 xfsaild_push_item(
 341         struct xfs_ail          *ailp,
 342         struct xfs_log_item     *lip)
 343 {
 344         /*
 345          * If log item pinning is enabled, skip the push and track the item as
 346          * pinned. This can help induce head-behind-tail conditions.
 347          */
 348         if (XFS_TEST_ERROR(false, ailp->ail_mount, XFS_ERRTAG_LOG_ITEM_PIN))
 349                 return XFS_ITEM_PINNED;
 350 
 351         /*
 352          * Consider the item pinned if a push callback is not defined so the
 353          * caller will force the log. This should only happen for intent items
 354          * as they are unpinned once the associated done item is committed to
 355          * the on-disk log.
 356          */
 357         if (!lip->li_ops->iop_push)
 358                 return XFS_ITEM_PINNED;
 359         return lip->li_ops->iop_push(lip, &ailp->ail_buf_list);
 360 }
 361 
 362 static long
 363 xfsaild_push(
 364         struct xfs_ail          *ailp)
 365 {
 366         xfs_mount_t             *mp = ailp->ail_mount;
 367         struct xfs_ail_cursor   cur;
 368         struct xfs_log_item     *lip;
 369         xfs_lsn_t               lsn;
 370         xfs_lsn_t               target;
 371         long                    tout;
 372         int                     stuck = 0;
 373         int                     flushing = 0;
 374         int                     count = 0;
 375 
 376         /*
 377          * If we encountered pinned items or did not finish writing out all
 378          * buffers the last time we ran, force the log first and wait for it
 379          * before pushing again.
 380          */
 381         if (ailp->ail_log_flush && ailp->ail_last_pushed_lsn == 0 &&
 382             (!list_empty_careful(&ailp->ail_buf_list) ||
 383              xfs_ail_min_lsn(ailp))) {
 384                 ailp->ail_log_flush = 0;
 385 
 386                 XFS_STATS_INC(mp, xs_push_ail_flush);
 387                 xfs_log_force(mp, XFS_LOG_SYNC);
 388         }
 389 
 390         spin_lock(&ailp->ail_lock);
 391 
 392         /* barrier matches the ail_target update in xfs_ail_push() */
 393         smp_rmb();
 394         target = ailp->ail_target;
 395         ailp->ail_target_prev = target;
 396 
 397         lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->ail_last_pushed_lsn);
 398         if (!lip) {
 399                 /*
 400                  * If the AIL is empty or our push has reached the end we are
 401                  * done now.
 402                  */
 403                 xfs_trans_ail_cursor_done(&cur);
 404                 spin_unlock(&ailp->ail_lock);
 405                 goto out_done;
 406         }
 407 
 408         XFS_STATS_INC(mp, xs_push_ail);
 409 
 410         lsn = lip->li_lsn;
 411         while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
 412                 int     lock_result;
 413 
 414                 /*
 415                  * Note that iop_push may unlock and reacquire the AIL lock.  We
 416                  * rely on the AIL cursor implementation to be able to deal with
 417                  * the dropped lock.
 418                  */
 419                 lock_result = xfsaild_push_item(ailp, lip);
 420                 switch (lock_result) {
 421                 case XFS_ITEM_SUCCESS:
 422                         XFS_STATS_INC(mp, xs_push_ail_success);
 423                         trace_xfs_ail_push(lip);
 424 
 425                         ailp->ail_last_pushed_lsn = lsn;
 426                         break;
 427 
 428                 case XFS_ITEM_FLUSHING:
 429                         /*
 430                          * The item or its backing buffer is already beeing
 431                          * flushed.  The typical reason for that is that an
 432                          * inode buffer is locked because we already pushed the
 433                          * updates to it as part of inode clustering.
 434                          *
 435                          * We do not want to to stop flushing just because lots
 436                          * of items are already beeing flushed, but we need to
 437                          * re-try the flushing relatively soon if most of the
 438                          * AIL is beeing flushed.
 439                          */
 440                         XFS_STATS_INC(mp, xs_push_ail_flushing);
 441                         trace_xfs_ail_flushing(lip);
 442 
 443                         flushing++;
 444                         ailp->ail_last_pushed_lsn = lsn;
 445                         break;
 446 
 447                 case XFS_ITEM_PINNED:
 448                         XFS_STATS_INC(mp, xs_push_ail_pinned);
 449                         trace_xfs_ail_pinned(lip);
 450 
 451                         stuck++;
 452                         ailp->ail_log_flush++;
 453                         break;
 454                 case XFS_ITEM_LOCKED:
 455                         XFS_STATS_INC(mp, xs_push_ail_locked);
 456                         trace_xfs_ail_locked(lip);
 457 
 458                         stuck++;
 459                         break;
 460                 default:
 461                         ASSERT(0);
 462                         break;
 463                 }
 464 
 465                 count++;
 466 
 467                 /*
 468                  * Are there too many items we can't do anything with?
 469                  *
 470                  * If we we are skipping too many items because we can't flush
 471                  * them or they are already being flushed, we back off and
 472                  * given them time to complete whatever operation is being
 473                  * done. i.e. remove pressure from the AIL while we can't make
 474                  * progress so traversals don't slow down further inserts and
 475                  * removals to/from the AIL.
 476                  *
 477                  * The value of 100 is an arbitrary magic number based on
 478                  * observation.
 479                  */
 480                 if (stuck > 100)
 481                         break;
 482 
 483                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
 484                 if (lip == NULL)
 485                         break;
 486                 lsn = lip->li_lsn;
 487         }
 488         xfs_trans_ail_cursor_done(&cur);
 489         spin_unlock(&ailp->ail_lock);
 490 
 491         if (xfs_buf_delwri_submit_nowait(&ailp->ail_buf_list))
 492                 ailp->ail_log_flush++;
 493 
 494         if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
 495 out_done:
 496                 /*
 497                  * We reached the target or the AIL is empty, so wait a bit
 498                  * longer for I/O to complete and remove pushed items from the
 499                  * AIL before we start the next scan from the start of the AIL.
 500                  */
 501                 tout = 50;
 502                 ailp->ail_last_pushed_lsn = 0;
 503         } else if (((stuck + flushing) * 100) / count > 90) {
 504                 /*
 505                  * Either there is a lot of contention on the AIL or we are
 506                  * stuck due to operations in progress. "Stuck" in this case
 507                  * is defined as >90% of the items we tried to push were stuck.
 508                  *
 509                  * Backoff a bit more to allow some I/O to complete before
 510                  * restarting from the start of the AIL. This prevents us from
 511                  * spinning on the same items, and if they are pinned will all
 512                  * the restart to issue a log force to unpin the stuck items.
 513                  */
 514                 tout = 20;
 515                 ailp->ail_last_pushed_lsn = 0;
 516         } else {
 517                 /*
 518                  * Assume we have more work to do in a short while.
 519                  */
 520                 tout = 10;
 521         }
 522 
 523         return tout;
 524 }
 525 
 526 static int
 527 xfsaild(
 528         void            *data)
 529 {
 530         struct xfs_ail  *ailp = data;
 531         long            tout = 0;       /* milliseconds */
 532         unsigned int    noreclaim_flag;
 533 
 534         noreclaim_flag = memalloc_noreclaim_save();
 535         set_freezable();
 536 
 537         while (1) {
 538                 if (tout && tout <= 20)
 539                         set_current_state(TASK_KILLABLE);
 540                 else
 541                         set_current_state(TASK_INTERRUPTIBLE);
 542 
 543                 /*
 544                  * Check kthread_should_stop() after we set the task state to
 545                  * guarantee that we either see the stop bit and exit or the
 546                  * task state is reset to runnable such that it's not scheduled
 547                  * out indefinitely and detects the stop bit at next iteration.
 548                  * A memory barrier is included in above task state set to
 549                  * serialize again kthread_stop().
 550                  */
 551                 if (kthread_should_stop()) {
 552                         __set_current_state(TASK_RUNNING);
 553 
 554                         /*
 555                          * The caller forces out the AIL before stopping the
 556                          * thread in the common case, which means the delwri
 557                          * queue is drained. In the shutdown case, the queue may
 558                          * still hold relogged buffers that haven't been
 559                          * submitted because they were pinned since added to the
 560                          * queue.
 561                          *
 562                          * Log I/O error processing stales the underlying buffer
 563                          * and clears the delwri state, expecting the buf to be
 564                          * removed on the next submission attempt. That won't
 565                          * happen if we're shutting down, so this is the last
 566                          * opportunity to release such buffers from the queue.
 567                          */
 568                         ASSERT(list_empty(&ailp->ail_buf_list) ||
 569                                XFS_FORCED_SHUTDOWN(ailp->ail_mount));
 570                         xfs_buf_delwri_cancel(&ailp->ail_buf_list);
 571                         break;
 572                 }
 573 
 574                 spin_lock(&ailp->ail_lock);
 575 
 576                 /*
 577                  * Idle if the AIL is empty and we are not racing with a target
 578                  * update. We check the AIL after we set the task to a sleep
 579                  * state to guarantee that we either catch an ail_target update
 580                  * or that a wake_up resets the state to TASK_RUNNING.
 581                  * Otherwise, we run the risk of sleeping indefinitely.
 582                  *
 583                  * The barrier matches the ail_target update in xfs_ail_push().
 584                  */
 585                 smp_rmb();
 586                 if (!xfs_ail_min(ailp) &&
 587                     ailp->ail_target == ailp->ail_target_prev) {
 588                         spin_unlock(&ailp->ail_lock);
 589                         freezable_schedule();
 590                         tout = 0;
 591                         continue;
 592                 }
 593                 spin_unlock(&ailp->ail_lock);
 594 
 595                 if (tout)
 596                         freezable_schedule_timeout(msecs_to_jiffies(tout));
 597 
 598                 __set_current_state(TASK_RUNNING);
 599 
 600                 try_to_freeze();
 601 
 602                 tout = xfsaild_push(ailp);
 603         }
 604 
 605         memalloc_noreclaim_restore(noreclaim_flag);
 606         return 0;
 607 }
 608 
 609 /*
 610  * This routine is called to move the tail of the AIL forward.  It does this by
 611  * trying to flush items in the AIL whose lsns are below the given
 612  * threshold_lsn.
 613  *
 614  * The push is run asynchronously in a workqueue, which means the caller needs
 615  * to handle waiting on the async flush for space to become available.
 616  * We don't want to interrupt any push that is in progress, hence we only queue
 617  * work if we set the pushing bit approriately.
 618  *
 619  * We do this unlocked - we only need to know whether there is anything in the
 620  * AIL at the time we are called. We don't need to access the contents of
 621  * any of the objects, so the lock is not needed.
 622  */
 623 void
 624 xfs_ail_push(
 625         struct xfs_ail          *ailp,
 626         xfs_lsn_t               threshold_lsn)
 627 {
 628         struct xfs_log_item     *lip;
 629 
 630         lip = xfs_ail_min(ailp);
 631         if (!lip || XFS_FORCED_SHUTDOWN(ailp->ail_mount) ||
 632             XFS_LSN_CMP(threshold_lsn, ailp->ail_target) <= 0)
 633                 return;
 634 
 635         /*
 636          * Ensure that the new target is noticed in push code before it clears
 637          * the XFS_AIL_PUSHING_BIT.
 638          */
 639         smp_wmb();
 640         xfs_trans_ail_copy_lsn(ailp, &ailp->ail_target, &threshold_lsn);
 641         smp_wmb();
 642 
 643         wake_up_process(ailp->ail_task);
 644 }
 645 
 646 /*
 647  * Push out all items in the AIL immediately
 648  */
 649 void
 650 xfs_ail_push_all(
 651         struct xfs_ail  *ailp)
 652 {
 653         xfs_lsn_t       threshold_lsn = xfs_ail_max_lsn(ailp);
 654 
 655         if (threshold_lsn)
 656                 xfs_ail_push(ailp, threshold_lsn);
 657 }
 658 
 659 /*
 660  * Push out all items in the AIL immediately and wait until the AIL is empty.
 661  */
 662 void
 663 xfs_ail_push_all_sync(
 664         struct xfs_ail  *ailp)
 665 {
 666         struct xfs_log_item     *lip;
 667         DEFINE_WAIT(wait);
 668 
 669         spin_lock(&ailp->ail_lock);
 670         while ((lip = xfs_ail_max(ailp)) != NULL) {
 671                 prepare_to_wait(&ailp->ail_empty, &wait, TASK_UNINTERRUPTIBLE);
 672                 ailp->ail_target = lip->li_lsn;
 673                 wake_up_process(ailp->ail_task);
 674                 spin_unlock(&ailp->ail_lock);
 675                 schedule();
 676                 spin_lock(&ailp->ail_lock);
 677         }
 678         spin_unlock(&ailp->ail_lock);
 679 
 680         finish_wait(&ailp->ail_empty, &wait);
 681 }
 682 
 683 /*
 684  * xfs_trans_ail_update - bulk AIL insertion operation.
 685  *
 686  * @xfs_trans_ail_update takes an array of log items that all need to be
 687  * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
 688  * be added.  Otherwise, it will be repositioned  by removing it and re-adding
 689  * it to the AIL. If we move the first item in the AIL, update the log tail to
 690  * match the new minimum LSN in the AIL.
 691  *
 692  * This function takes the AIL lock once to execute the update operations on
 693  * all the items in the array, and as such should not be called with the AIL
 694  * lock held. As a result, once we have the AIL lock, we need to check each log
 695  * item LSN to confirm it needs to be moved forward in the AIL.
 696  *
 697  * To optimise the insert operation, we delete all the items from the AIL in
 698  * the first pass, moving them into a temporary list, then splice the temporary
 699  * list into the correct position in the AIL. This avoids needing to do an
 700  * insert operation on every item.
 701  *
 702  * This function must be called with the AIL lock held.  The lock is dropped
 703  * before returning.
 704  */
 705 void
 706 xfs_trans_ail_update_bulk(
 707         struct xfs_ail          *ailp,
 708         struct xfs_ail_cursor   *cur,
 709         struct xfs_log_item     **log_items,
 710         int                     nr_items,
 711         xfs_lsn_t               lsn) __releases(ailp->ail_lock)
 712 {
 713         struct xfs_log_item     *mlip;
 714         int                     mlip_changed = 0;
 715         int                     i;
 716         LIST_HEAD(tmp);
 717 
 718         ASSERT(nr_items > 0);           /* Not required, but true. */
 719         mlip = xfs_ail_min(ailp);
 720 
 721         for (i = 0; i < nr_items; i++) {
 722                 struct xfs_log_item *lip = log_items[i];
 723                 if (test_and_set_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
 724                         /* check if we really need to move the item */
 725                         if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
 726                                 continue;
 727 
 728                         trace_xfs_ail_move(lip, lip->li_lsn, lsn);
 729                         xfs_ail_delete(ailp, lip);
 730                         if (mlip == lip)
 731                                 mlip_changed = 1;
 732                 } else {
 733                         trace_xfs_ail_insert(lip, 0, lsn);
 734                 }
 735                 lip->li_lsn = lsn;
 736                 list_add(&lip->li_ail, &tmp);
 737         }
 738 
 739         if (!list_empty(&tmp))
 740                 xfs_ail_splice(ailp, cur, &tmp, lsn);
 741 
 742         if (mlip_changed) {
 743                 if (!XFS_FORCED_SHUTDOWN(ailp->ail_mount))
 744                         xlog_assign_tail_lsn_locked(ailp->ail_mount);
 745                 spin_unlock(&ailp->ail_lock);
 746 
 747                 xfs_log_space_wake(ailp->ail_mount);
 748         } else {
 749                 spin_unlock(&ailp->ail_lock);
 750         }
 751 }
 752 
 753 bool
 754 xfs_ail_delete_one(
 755         struct xfs_ail          *ailp,
 756         struct xfs_log_item     *lip)
 757 {
 758         struct xfs_log_item     *mlip = xfs_ail_min(ailp);
 759 
 760         trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
 761         xfs_ail_delete(ailp, lip);
 762         xfs_clear_li_failed(lip);
 763         clear_bit(XFS_LI_IN_AIL, &lip->li_flags);
 764         lip->li_lsn = 0;
 765 
 766         return mlip == lip;
 767 }
 768 
 769 /**
 770  * Remove a log items from the AIL
 771  *
 772  * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
 773  * removed from the AIL. The caller is already holding the AIL lock, and done
 774  * all the checks necessary to ensure the items passed in via @log_items are
 775  * ready for deletion. This includes checking that the items are in the AIL.
 776  *
 777  * For each log item to be removed, unlink it  from the AIL, clear the IN_AIL
 778  * flag from the item and reset the item's lsn to 0. If we remove the first
 779  * item in the AIL, update the log tail to match the new minimum LSN in the
 780  * AIL.
 781  *
 782  * This function will not drop the AIL lock until all items are removed from
 783  * the AIL to minimise the amount of lock traffic on the AIL. This does not
 784  * greatly increase the AIL hold time, but does significantly reduce the amount
 785  * of traffic on the lock, especially during IO completion.
 786  *
 787  * This function must be called with the AIL lock held.  The lock is dropped
 788  * before returning.
 789  */
 790 void
 791 xfs_trans_ail_delete(
 792         struct xfs_ail          *ailp,
 793         struct xfs_log_item     *lip,
 794         int                     shutdown_type) __releases(ailp->ail_lock)
 795 {
 796         struct xfs_mount        *mp = ailp->ail_mount;
 797         bool                    mlip_changed;
 798 
 799         if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
 800                 spin_unlock(&ailp->ail_lock);
 801                 if (!XFS_FORCED_SHUTDOWN(mp)) {
 802                         xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
 803         "%s: attempting to delete a log item that is not in the AIL",
 804                                         __func__);
 805                         xfs_force_shutdown(mp, shutdown_type);
 806                 }
 807                 return;
 808         }
 809 
 810         mlip_changed = xfs_ail_delete_one(ailp, lip);
 811         if (mlip_changed) {
 812                 if (!XFS_FORCED_SHUTDOWN(mp))
 813                         xlog_assign_tail_lsn_locked(mp);
 814                 if (list_empty(&ailp->ail_head))
 815                         wake_up_all(&ailp->ail_empty);
 816         }
 817 
 818         spin_unlock(&ailp->ail_lock);
 819         if (mlip_changed)
 820                 xfs_log_space_wake(ailp->ail_mount);
 821 }
 822 
 823 int
 824 xfs_trans_ail_init(
 825         xfs_mount_t     *mp)
 826 {
 827         struct xfs_ail  *ailp;
 828 
 829         ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
 830         if (!ailp)
 831                 return -ENOMEM;
 832 
 833         ailp->ail_mount = mp;
 834         INIT_LIST_HEAD(&ailp->ail_head);
 835         INIT_LIST_HEAD(&ailp->ail_cursors);
 836         spin_lock_init(&ailp->ail_lock);
 837         INIT_LIST_HEAD(&ailp->ail_buf_list);
 838         init_waitqueue_head(&ailp->ail_empty);
 839 
 840         ailp->ail_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
 841                         ailp->ail_mount->m_fsname);
 842         if (IS_ERR(ailp->ail_task))
 843                 goto out_free_ailp;
 844 
 845         mp->m_ail = ailp;
 846         return 0;
 847 
 848 out_free_ailp:
 849         kmem_free(ailp);
 850         return -ENOMEM;
 851 }
 852 
 853 void
 854 xfs_trans_ail_destroy(
 855         xfs_mount_t     *mp)
 856 {
 857         struct xfs_ail  *ailp = mp->m_ail;
 858 
 859         kthread_stop(ailp->ail_task);
 860         kmem_free(ailp);
 861 }

/* [<][>][^][v][top][bottom][index][help] */