root/fs/xfs/xfs_aops.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xfs_find_bdev_for_inode
  2. xfs_find_daxdev_for_inode
  3. xfs_finish_page_writeback
  4. xfs_destroy_ioend
  5. xfs_ioend_is_append
  6. xfs_setfilesize_trans_alloc
  7. __xfs_setfilesize
  8. xfs_setfilesize
  9. xfs_setfilesize_ioend
  10. xfs_end_ioend
  11. xfs_ioend_can_merge
  12. xfs_ioend_merge_append_transactions
  13. xfs_ioend_try_merge
  14. xfs_ioend_compare
  15. xfs_end_io
  16. xfs_end_bio
  17. xfs_imap_valid
  18. xfs_convert_blocks
  19. xfs_map_blocks
  20. xfs_submit_ioend
  21. xfs_alloc_ioend
  22. xfs_chain_bio
  23. xfs_add_to_ioend
  24. xfs_vm_invalidatepage
  25. xfs_aops_discard_page
  26. xfs_writepage_map
  27. xfs_do_writepage
  28. xfs_vm_writepage
  29. xfs_vm_writepages
  30. xfs_dax_writepages
  31. xfs_vm_releasepage
  32. xfs_vm_bmap
  33. xfs_vm_readpage
  34. xfs_vm_readpages
  35. xfs_iomap_swapfile_activate

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4  * Copyright (c) 2016-2018 Christoph Hellwig.
   5  * All Rights Reserved.
   6  */
   7 #include "xfs.h"
   8 #include "xfs_shared.h"
   9 #include "xfs_format.h"
  10 #include "xfs_log_format.h"
  11 #include "xfs_trans_resv.h"
  12 #include "xfs_mount.h"
  13 #include "xfs_inode.h"
  14 #include "xfs_trans.h"
  15 #include "xfs_iomap.h"
  16 #include "xfs_trace.h"
  17 #include "xfs_bmap.h"
  18 #include "xfs_bmap_util.h"
  19 #include "xfs_reflink.h"
  20 
  21 /*
  22  * structure owned by writepages passed to individual writepage calls
  23  */
  24 struct xfs_writepage_ctx {
  25         struct xfs_bmbt_irec    imap;
  26         int                     fork;
  27         unsigned int            data_seq;
  28         unsigned int            cow_seq;
  29         struct xfs_ioend        *ioend;
  30 };
  31 
  32 struct block_device *
  33 xfs_find_bdev_for_inode(
  34         struct inode            *inode)
  35 {
  36         struct xfs_inode        *ip = XFS_I(inode);
  37         struct xfs_mount        *mp = ip->i_mount;
  38 
  39         if (XFS_IS_REALTIME_INODE(ip))
  40                 return mp->m_rtdev_targp->bt_bdev;
  41         else
  42                 return mp->m_ddev_targp->bt_bdev;
  43 }
  44 
  45 struct dax_device *
  46 xfs_find_daxdev_for_inode(
  47         struct inode            *inode)
  48 {
  49         struct xfs_inode        *ip = XFS_I(inode);
  50         struct xfs_mount        *mp = ip->i_mount;
  51 
  52         if (XFS_IS_REALTIME_INODE(ip))
  53                 return mp->m_rtdev_targp->bt_daxdev;
  54         else
  55                 return mp->m_ddev_targp->bt_daxdev;
  56 }
  57 
  58 static void
  59 xfs_finish_page_writeback(
  60         struct inode            *inode,
  61         struct bio_vec  *bvec,
  62         int                     error)
  63 {
  64         struct iomap_page       *iop = to_iomap_page(bvec->bv_page);
  65 
  66         if (error) {
  67                 SetPageError(bvec->bv_page);
  68                 mapping_set_error(inode->i_mapping, -EIO);
  69         }
  70 
  71         ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
  72         ASSERT(!iop || atomic_read(&iop->write_count) > 0);
  73 
  74         if (!iop || atomic_dec_and_test(&iop->write_count))
  75                 end_page_writeback(bvec->bv_page);
  76 }
  77 
  78 /*
  79  * We're now finished for good with this ioend structure.  Update the page
  80  * state, release holds on bios, and finally free up memory.  Do not use the
  81  * ioend after this.
  82  */
  83 STATIC void
  84 xfs_destroy_ioend(
  85         struct xfs_ioend        *ioend,
  86         int                     error)
  87 {
  88         struct inode            *inode = ioend->io_inode;
  89         struct bio              *bio = &ioend->io_inline_bio;
  90         struct bio              *last = ioend->io_bio, *next;
  91         u64                     start = bio->bi_iter.bi_sector;
  92         bool                    quiet = bio_flagged(bio, BIO_QUIET);
  93 
  94         for (bio = &ioend->io_inline_bio; bio; bio = next) {
  95                 struct bio_vec  *bvec;
  96                 struct bvec_iter_all iter_all;
  97 
  98                 /*
  99                  * For the last bio, bi_private points to the ioend, so we
 100                  * need to explicitly end the iteration here.
 101                  */
 102                 if (bio == last)
 103                         next = NULL;
 104                 else
 105                         next = bio->bi_private;
 106 
 107                 /* walk each page on bio, ending page IO on them */
 108                 bio_for_each_segment_all(bvec, bio, iter_all)
 109                         xfs_finish_page_writeback(inode, bvec, error);
 110                 bio_put(bio);
 111         }
 112 
 113         if (unlikely(error && !quiet)) {
 114                 xfs_err_ratelimited(XFS_I(inode)->i_mount,
 115                         "writeback error on sector %llu", start);
 116         }
 117 }
 118 
 119 /*
 120  * Fast and loose check if this write could update the on-disk inode size.
 121  */
 122 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
 123 {
 124         return ioend->io_offset + ioend->io_size >
 125                 XFS_I(ioend->io_inode)->i_d.di_size;
 126 }
 127 
 128 STATIC int
 129 xfs_setfilesize_trans_alloc(
 130         struct xfs_ioend        *ioend)
 131 {
 132         struct xfs_mount        *mp = XFS_I(ioend->io_inode)->i_mount;
 133         struct xfs_trans        *tp;
 134         int                     error;
 135 
 136         error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
 137         if (error)
 138                 return error;
 139 
 140         ioend->io_append_trans = tp;
 141 
 142         /*
 143          * We may pass freeze protection with a transaction.  So tell lockdep
 144          * we released it.
 145          */
 146         __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
 147         /*
 148          * We hand off the transaction to the completion thread now, so
 149          * clear the flag here.
 150          */
 151         current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 152         return 0;
 153 }
 154 
 155 /*
 156  * Update on-disk file size now that data has been written to disk.
 157  */
 158 STATIC int
 159 __xfs_setfilesize(
 160         struct xfs_inode        *ip,
 161         struct xfs_trans        *tp,
 162         xfs_off_t               offset,
 163         size_t                  size)
 164 {
 165         xfs_fsize_t             isize;
 166 
 167         xfs_ilock(ip, XFS_ILOCK_EXCL);
 168         isize = xfs_new_eof(ip, offset + size);
 169         if (!isize) {
 170                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
 171                 xfs_trans_cancel(tp);
 172                 return 0;
 173         }
 174 
 175         trace_xfs_setfilesize(ip, offset, size);
 176 
 177         ip->i_d.di_size = isize;
 178         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 179         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 180 
 181         return xfs_trans_commit(tp);
 182 }
 183 
 184 int
 185 xfs_setfilesize(
 186         struct xfs_inode        *ip,
 187         xfs_off_t               offset,
 188         size_t                  size)
 189 {
 190         struct xfs_mount        *mp = ip->i_mount;
 191         struct xfs_trans        *tp;
 192         int                     error;
 193 
 194         error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
 195         if (error)
 196                 return error;
 197 
 198         return __xfs_setfilesize(ip, tp, offset, size);
 199 }
 200 
 201 STATIC int
 202 xfs_setfilesize_ioend(
 203         struct xfs_ioend        *ioend,
 204         int                     error)
 205 {
 206         struct xfs_inode        *ip = XFS_I(ioend->io_inode);
 207         struct xfs_trans        *tp = ioend->io_append_trans;
 208 
 209         /*
 210          * The transaction may have been allocated in the I/O submission thread,
 211          * thus we need to mark ourselves as being in a transaction manually.
 212          * Similarly for freeze protection.
 213          */
 214         current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 215         __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
 216 
 217         /* we abort the update if there was an IO error */
 218         if (error) {
 219                 xfs_trans_cancel(tp);
 220                 return error;
 221         }
 222 
 223         return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
 224 }
 225 
 226 /*
 227  * IO write completion.
 228  */
 229 STATIC void
 230 xfs_end_ioend(
 231         struct xfs_ioend        *ioend)
 232 {
 233         struct list_head        ioend_list;
 234         struct xfs_inode        *ip = XFS_I(ioend->io_inode);
 235         xfs_off_t               offset = ioend->io_offset;
 236         size_t                  size = ioend->io_size;
 237         unsigned int            nofs_flag;
 238         int                     error;
 239 
 240         /*
 241          * We can allocate memory here while doing writeback on behalf of
 242          * memory reclaim.  To avoid memory allocation deadlocks set the
 243          * task-wide nofs context for the following operations.
 244          */
 245         nofs_flag = memalloc_nofs_save();
 246 
 247         /*
 248          * Just clean up the in-memory strutures if the fs has been shut down.
 249          */
 250         if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 251                 error = -EIO;
 252                 goto done;
 253         }
 254 
 255         /*
 256          * Clean up any COW blocks on an I/O error.
 257          */
 258         error = blk_status_to_errno(ioend->io_bio->bi_status);
 259         if (unlikely(error)) {
 260                 if (ioend->io_fork == XFS_COW_FORK)
 261                         xfs_reflink_cancel_cow_range(ip, offset, size, true);
 262                 goto done;
 263         }
 264 
 265         /*
 266          * Success: commit the COW or unwritten blocks if needed.
 267          */
 268         if (ioend->io_fork == XFS_COW_FORK)
 269                 error = xfs_reflink_end_cow(ip, offset, size);
 270         else if (ioend->io_state == XFS_EXT_UNWRITTEN)
 271                 error = xfs_iomap_write_unwritten(ip, offset, size, false);
 272         else
 273                 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
 274 
 275 done:
 276         if (ioend->io_append_trans)
 277                 error = xfs_setfilesize_ioend(ioend, error);
 278         list_replace_init(&ioend->io_list, &ioend_list);
 279         xfs_destroy_ioend(ioend, error);
 280 
 281         while (!list_empty(&ioend_list)) {
 282                 ioend = list_first_entry(&ioend_list, struct xfs_ioend,
 283                                 io_list);
 284                 list_del_init(&ioend->io_list);
 285                 xfs_destroy_ioend(ioend, error);
 286         }
 287 
 288         memalloc_nofs_restore(nofs_flag);
 289 }
 290 
 291 /*
 292  * We can merge two adjacent ioends if they have the same set of work to do.
 293  */
 294 static bool
 295 xfs_ioend_can_merge(
 296         struct xfs_ioend        *ioend,
 297         struct xfs_ioend        *next)
 298 {
 299         if (ioend->io_bio->bi_status != next->io_bio->bi_status)
 300                 return false;
 301         if ((ioend->io_fork == XFS_COW_FORK) ^ (next->io_fork == XFS_COW_FORK))
 302                 return false;
 303         if ((ioend->io_state == XFS_EXT_UNWRITTEN) ^
 304             (next->io_state == XFS_EXT_UNWRITTEN))
 305                 return false;
 306         if (ioend->io_offset + ioend->io_size != next->io_offset)
 307                 return false;
 308         return true;
 309 }
 310 
 311 /*
 312  * If the to be merged ioend has a preallocated transaction for file
 313  * size updates we need to ensure the ioend it is merged into also
 314  * has one.  If it already has one we can simply cancel the transaction
 315  * as it is guaranteed to be clean.
 316  */
 317 static void
 318 xfs_ioend_merge_append_transactions(
 319         struct xfs_ioend        *ioend,
 320         struct xfs_ioend        *next)
 321 {
 322         if (!ioend->io_append_trans) {
 323                 ioend->io_append_trans = next->io_append_trans;
 324                 next->io_append_trans = NULL;
 325         } else {
 326                 xfs_setfilesize_ioend(next, -ECANCELED);
 327         }
 328 }
 329 
 330 /* Try to merge adjacent completions. */
 331 STATIC void
 332 xfs_ioend_try_merge(
 333         struct xfs_ioend        *ioend,
 334         struct list_head        *more_ioends)
 335 {
 336         struct xfs_ioend        *next_ioend;
 337 
 338         while (!list_empty(more_ioends)) {
 339                 next_ioend = list_first_entry(more_ioends, struct xfs_ioend,
 340                                 io_list);
 341                 if (!xfs_ioend_can_merge(ioend, next_ioend))
 342                         break;
 343                 list_move_tail(&next_ioend->io_list, &ioend->io_list);
 344                 ioend->io_size += next_ioend->io_size;
 345                 if (next_ioend->io_append_trans)
 346                         xfs_ioend_merge_append_transactions(ioend, next_ioend);
 347         }
 348 }
 349 
 350 /* list_sort compare function for ioends */
 351 static int
 352 xfs_ioend_compare(
 353         void                    *priv,
 354         struct list_head        *a,
 355         struct list_head        *b)
 356 {
 357         struct xfs_ioend        *ia;
 358         struct xfs_ioend        *ib;
 359 
 360         ia = container_of(a, struct xfs_ioend, io_list);
 361         ib = container_of(b, struct xfs_ioend, io_list);
 362         if (ia->io_offset < ib->io_offset)
 363                 return -1;
 364         else if (ia->io_offset > ib->io_offset)
 365                 return 1;
 366         return 0;
 367 }
 368 
 369 /* Finish all pending io completions. */
 370 void
 371 xfs_end_io(
 372         struct work_struct      *work)
 373 {
 374         struct xfs_inode        *ip;
 375         struct xfs_ioend        *ioend;
 376         struct list_head        completion_list;
 377         unsigned long           flags;
 378 
 379         ip = container_of(work, struct xfs_inode, i_ioend_work);
 380 
 381         spin_lock_irqsave(&ip->i_ioend_lock, flags);
 382         list_replace_init(&ip->i_ioend_list, &completion_list);
 383         spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
 384 
 385         list_sort(NULL, &completion_list, xfs_ioend_compare);
 386 
 387         while (!list_empty(&completion_list)) {
 388                 ioend = list_first_entry(&completion_list, struct xfs_ioend,
 389                                 io_list);
 390                 list_del_init(&ioend->io_list);
 391                 xfs_ioend_try_merge(ioend, &completion_list);
 392                 xfs_end_ioend(ioend);
 393         }
 394 }
 395 
 396 STATIC void
 397 xfs_end_bio(
 398         struct bio              *bio)
 399 {
 400         struct xfs_ioend        *ioend = bio->bi_private;
 401         struct xfs_inode        *ip = XFS_I(ioend->io_inode);
 402         struct xfs_mount        *mp = ip->i_mount;
 403         unsigned long           flags;
 404 
 405         if (ioend->io_fork == XFS_COW_FORK ||
 406             ioend->io_state == XFS_EXT_UNWRITTEN ||
 407             ioend->io_append_trans != NULL) {
 408                 spin_lock_irqsave(&ip->i_ioend_lock, flags);
 409                 if (list_empty(&ip->i_ioend_list))
 410                         WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue,
 411                                                  &ip->i_ioend_work));
 412                 list_add_tail(&ioend->io_list, &ip->i_ioend_list);
 413                 spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
 414         } else
 415                 xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
 416 }
 417 
 418 /*
 419  * Fast revalidation of the cached writeback mapping. Return true if the current
 420  * mapping is valid, false otherwise.
 421  */
 422 static bool
 423 xfs_imap_valid(
 424         struct xfs_writepage_ctx        *wpc,
 425         struct xfs_inode                *ip,
 426         xfs_fileoff_t                   offset_fsb)
 427 {
 428         if (offset_fsb < wpc->imap.br_startoff ||
 429             offset_fsb >= wpc->imap.br_startoff + wpc->imap.br_blockcount)
 430                 return false;
 431         /*
 432          * If this is a COW mapping, it is sufficient to check that the mapping
 433          * covers the offset. Be careful to check this first because the caller
 434          * can revalidate a COW mapping without updating the data seqno.
 435          */
 436         if (wpc->fork == XFS_COW_FORK)
 437                 return true;
 438 
 439         /*
 440          * This is not a COW mapping. Check the sequence number of the data fork
 441          * because concurrent changes could have invalidated the extent. Check
 442          * the COW fork because concurrent changes since the last time we
 443          * checked (and found nothing at this offset) could have added
 444          * overlapping blocks.
 445          */
 446         if (wpc->data_seq != READ_ONCE(ip->i_df.if_seq))
 447                 return false;
 448         if (xfs_inode_has_cow_data(ip) &&
 449             wpc->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
 450                 return false;
 451         return true;
 452 }
 453 
 454 /*
 455  * Pass in a dellalloc extent and convert it to real extents, return the real
 456  * extent that maps offset_fsb in wpc->imap.
 457  *
 458  * The current page is held locked so nothing could have removed the block
 459  * backing offset_fsb, although it could have moved from the COW to the data
 460  * fork by another thread.
 461  */
 462 static int
 463 xfs_convert_blocks(
 464         struct xfs_writepage_ctx *wpc,
 465         struct xfs_inode        *ip,
 466         xfs_fileoff_t           offset_fsb)
 467 {
 468         int                     error;
 469 
 470         /*
 471          * Attempt to allocate whatever delalloc extent currently backs
 472          * offset_fsb and put the result into wpc->imap.  Allocate in a loop
 473          * because it may take several attempts to allocate real blocks for a
 474          * contiguous delalloc extent if free space is sufficiently fragmented.
 475          */
 476         do {
 477                 error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset_fsb,
 478                                 &wpc->imap, wpc->fork == XFS_COW_FORK ?
 479                                         &wpc->cow_seq : &wpc->data_seq);
 480                 if (error)
 481                         return error;
 482         } while (wpc->imap.br_startoff + wpc->imap.br_blockcount <= offset_fsb);
 483 
 484         return 0;
 485 }
 486 
 487 STATIC int
 488 xfs_map_blocks(
 489         struct xfs_writepage_ctx *wpc,
 490         struct inode            *inode,
 491         loff_t                  offset)
 492 {
 493         struct xfs_inode        *ip = XFS_I(inode);
 494         struct xfs_mount        *mp = ip->i_mount;
 495         ssize_t                 count = i_blocksize(inode);
 496         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
 497         xfs_fileoff_t           end_fsb = XFS_B_TO_FSB(mp, offset + count);
 498         xfs_fileoff_t           cow_fsb = NULLFILEOFF;
 499         struct xfs_bmbt_irec    imap;
 500         struct xfs_iext_cursor  icur;
 501         int                     retries = 0;
 502         int                     error = 0;
 503 
 504         if (XFS_FORCED_SHUTDOWN(mp))
 505                 return -EIO;
 506 
 507         /*
 508          * COW fork blocks can overlap data fork blocks even if the blocks
 509          * aren't shared.  COW I/O always takes precedent, so we must always
 510          * check for overlap on reflink inodes unless the mapping is already a
 511          * COW one, or the COW fork hasn't changed from the last time we looked
 512          * at it.
 513          *
 514          * It's safe to check the COW fork if_seq here without the ILOCK because
 515          * we've indirectly protected against concurrent updates: writeback has
 516          * the page locked, which prevents concurrent invalidations by reflink
 517          * and directio and prevents concurrent buffered writes to the same
 518          * page.  Changes to if_seq always happen under i_lock, which protects
 519          * against concurrent updates and provides a memory barrier on the way
 520          * out that ensures that we always see the current value.
 521          */
 522         if (xfs_imap_valid(wpc, ip, offset_fsb))
 523                 return 0;
 524 
 525         /*
 526          * If we don't have a valid map, now it's time to get a new one for this
 527          * offset.  This will convert delayed allocations (including COW ones)
 528          * into real extents.  If we return without a valid map, it means we
 529          * landed in a hole and we skip the block.
 530          */
 531 retry:
 532         xfs_ilock(ip, XFS_ILOCK_SHARED);
 533         ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
 534                (ip->i_df.if_flags & XFS_IFEXTENTS));
 535 
 536         /*
 537          * Check if this is offset is covered by a COW extents, and if yes use
 538          * it directly instead of looking up anything in the data fork.
 539          */
 540         if (xfs_inode_has_cow_data(ip) &&
 541             xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
 542                 cow_fsb = imap.br_startoff;
 543         if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
 544                 wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
 545                 xfs_iunlock(ip, XFS_ILOCK_SHARED);
 546 
 547                 wpc->fork = XFS_COW_FORK;
 548                 goto allocate_blocks;
 549         }
 550 
 551         /*
 552          * No COW extent overlap. Revalidate now that we may have updated
 553          * ->cow_seq. If the data mapping is still valid, we're done.
 554          */
 555         if (xfs_imap_valid(wpc, ip, offset_fsb)) {
 556                 xfs_iunlock(ip, XFS_ILOCK_SHARED);
 557                 return 0;
 558         }
 559 
 560         /*
 561          * If we don't have a valid map, now it's time to get a new one for this
 562          * offset.  This will convert delayed allocations (including COW ones)
 563          * into real extents.
 564          */
 565         if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
 566                 imap.br_startoff = end_fsb;     /* fake a hole past EOF */
 567         wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
 568         xfs_iunlock(ip, XFS_ILOCK_SHARED);
 569 
 570         wpc->fork = XFS_DATA_FORK;
 571 
 572         /* landed in a hole or beyond EOF? */
 573         if (imap.br_startoff > offset_fsb) {
 574                 imap.br_blockcount = imap.br_startoff - offset_fsb;
 575                 imap.br_startoff = offset_fsb;
 576                 imap.br_startblock = HOLESTARTBLOCK;
 577                 imap.br_state = XFS_EXT_NORM;
 578         }
 579 
 580         /*
 581          * Truncate to the next COW extent if there is one.  This is the only
 582          * opportunity to do this because we can skip COW fork lookups for the
 583          * subsequent blocks in the mapping; however, the requirement to treat
 584          * the COW range separately remains.
 585          */
 586         if (cow_fsb != NULLFILEOFF &&
 587             cow_fsb < imap.br_startoff + imap.br_blockcount)
 588                 imap.br_blockcount = cow_fsb - imap.br_startoff;
 589 
 590         /* got a delalloc extent? */
 591         if (imap.br_startblock != HOLESTARTBLOCK &&
 592             isnullstartblock(imap.br_startblock))
 593                 goto allocate_blocks;
 594 
 595         wpc->imap = imap;
 596         trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap);
 597         return 0;
 598 allocate_blocks:
 599         error = xfs_convert_blocks(wpc, ip, offset_fsb);
 600         if (error) {
 601                 /*
 602                  * If we failed to find the extent in the COW fork we might have
 603                  * raced with a COW to data fork conversion or truncate.
 604                  * Restart the lookup to catch the extent in the data fork for
 605                  * the former case, but prevent additional retries to avoid
 606                  * looping forever for the latter case.
 607                  */
 608                 if (error == -EAGAIN && wpc->fork == XFS_COW_FORK && !retries++)
 609                         goto retry;
 610                 ASSERT(error != -EAGAIN);
 611                 return error;
 612         }
 613 
 614         /*
 615          * Due to merging the return real extent might be larger than the
 616          * original delalloc one.  Trim the return extent to the next COW
 617          * boundary again to force a re-lookup.
 618          */
 619         if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF &&
 620             cow_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount)
 621                 wpc->imap.br_blockcount = cow_fsb - wpc->imap.br_startoff;
 622 
 623         ASSERT(wpc->imap.br_startoff <= offset_fsb);
 624         ASSERT(wpc->imap.br_startoff + wpc->imap.br_blockcount > offset_fsb);
 625         trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap);
 626         return 0;
 627 }
 628 
 629 /*
 630  * Submit the bio for an ioend. We are passed an ioend with a bio attached to
 631  * it, and we submit that bio. The ioend may be used for multiple bio
 632  * submissions, so we only want to allocate an append transaction for the ioend
 633  * once. In the case of multiple bio submission, each bio will take an IO
 634  * reference to the ioend to ensure that the ioend completion is only done once
 635  * all bios have been submitted and the ioend is really done.
 636  *
 637  * If @status is non-zero, it means that we have a situation where some part of
 638  * the submission process has failed after we have marked paged for writeback
 639  * and unlocked them. In this situation, we need to fail the bio and ioend
 640  * rather than submit it to IO. This typically only happens on a filesystem
 641  * shutdown.
 642  */
 643 STATIC int
 644 xfs_submit_ioend(
 645         struct writeback_control *wbc,
 646         struct xfs_ioend        *ioend,
 647         int                     status)
 648 {
 649         unsigned int            nofs_flag;
 650 
 651         /*
 652          * We can allocate memory here while doing writeback on behalf of
 653          * memory reclaim.  To avoid memory allocation deadlocks set the
 654          * task-wide nofs context for the following operations.
 655          */
 656         nofs_flag = memalloc_nofs_save();
 657 
 658         /* Convert CoW extents to regular */
 659         if (!status && ioend->io_fork == XFS_COW_FORK) {
 660                 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
 661                                 ioend->io_offset, ioend->io_size);
 662         }
 663 
 664         /* Reserve log space if we might write beyond the on-disk inode size. */
 665         if (!status &&
 666             (ioend->io_fork == XFS_COW_FORK ||
 667              ioend->io_state != XFS_EXT_UNWRITTEN) &&
 668             xfs_ioend_is_append(ioend) &&
 669             !ioend->io_append_trans)
 670                 status = xfs_setfilesize_trans_alloc(ioend);
 671 
 672         memalloc_nofs_restore(nofs_flag);
 673 
 674         ioend->io_bio->bi_private = ioend;
 675         ioend->io_bio->bi_end_io = xfs_end_bio;
 676 
 677         /*
 678          * If we are failing the IO now, just mark the ioend with an
 679          * error and finish it. This will run IO completion immediately
 680          * as there is only one reference to the ioend at this point in
 681          * time.
 682          */
 683         if (status) {
 684                 ioend->io_bio->bi_status = errno_to_blk_status(status);
 685                 bio_endio(ioend->io_bio);
 686                 return status;
 687         }
 688 
 689         submit_bio(ioend->io_bio);
 690         return 0;
 691 }
 692 
 693 static struct xfs_ioend *
 694 xfs_alloc_ioend(
 695         struct inode            *inode,
 696         int                     fork,
 697         xfs_exntst_t            state,
 698         xfs_off_t               offset,
 699         struct block_device     *bdev,
 700         sector_t                sector,
 701         struct writeback_control *wbc)
 702 {
 703         struct xfs_ioend        *ioend;
 704         struct bio              *bio;
 705 
 706         bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
 707         bio_set_dev(bio, bdev);
 708         bio->bi_iter.bi_sector = sector;
 709         bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
 710         bio->bi_write_hint = inode->i_write_hint;
 711         wbc_init_bio(wbc, bio);
 712 
 713         ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
 714         INIT_LIST_HEAD(&ioend->io_list);
 715         ioend->io_fork = fork;
 716         ioend->io_state = state;
 717         ioend->io_inode = inode;
 718         ioend->io_size = 0;
 719         ioend->io_offset = offset;
 720         ioend->io_append_trans = NULL;
 721         ioend->io_bio = bio;
 722         return ioend;
 723 }
 724 
 725 /*
 726  * Allocate a new bio, and chain the old bio to the new one.
 727  *
 728  * Note that we have to do perform the chaining in this unintuitive order
 729  * so that the bi_private linkage is set up in the right direction for the
 730  * traversal in xfs_destroy_ioend().
 731  */
 732 static struct bio *
 733 xfs_chain_bio(
 734         struct bio              *prev)
 735 {
 736         struct bio *new;
 737 
 738         new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
 739         bio_copy_dev(new, prev);/* also copies over blkcg information */
 740         new->bi_iter.bi_sector = bio_end_sector(prev);
 741         new->bi_opf = prev->bi_opf;
 742         new->bi_write_hint = prev->bi_write_hint;
 743 
 744         bio_chain(prev, new);
 745         bio_get(prev);          /* for xfs_destroy_ioend */
 746         submit_bio(prev);
 747         return new;
 748 }
 749 
 750 /*
 751  * Test to see if we have an existing ioend structure that we could append to
 752  * first, otherwise finish off the current ioend and start another.
 753  */
 754 STATIC void
 755 xfs_add_to_ioend(
 756         struct inode            *inode,
 757         xfs_off_t               offset,
 758         struct page             *page,
 759         struct iomap_page       *iop,
 760         struct xfs_writepage_ctx *wpc,
 761         struct writeback_control *wbc,
 762         struct list_head        *iolist)
 763 {
 764         struct xfs_inode        *ip = XFS_I(inode);
 765         struct xfs_mount        *mp = ip->i_mount;
 766         struct block_device     *bdev = xfs_find_bdev_for_inode(inode);
 767         unsigned                len = i_blocksize(inode);
 768         unsigned                poff = offset & (PAGE_SIZE - 1);
 769         bool                    merged, same_page = false;
 770         sector_t                sector;
 771 
 772         sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
 773                 ((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
 774 
 775         if (!wpc->ioend ||
 776             wpc->fork != wpc->ioend->io_fork ||
 777             wpc->imap.br_state != wpc->ioend->io_state ||
 778             sector != bio_end_sector(wpc->ioend->io_bio) ||
 779             offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
 780                 if (wpc->ioend)
 781                         list_add(&wpc->ioend->io_list, iolist);
 782                 wpc->ioend = xfs_alloc_ioend(inode, wpc->fork,
 783                                 wpc->imap.br_state, offset, bdev, sector, wbc);
 784         }
 785 
 786         merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
 787                         &same_page);
 788 
 789         if (iop && !same_page)
 790                 atomic_inc(&iop->write_count);
 791 
 792         if (!merged) {
 793                 if (bio_full(wpc->ioend->io_bio, len))
 794                         wpc->ioend->io_bio = xfs_chain_bio(wpc->ioend->io_bio);
 795                 bio_add_page(wpc->ioend->io_bio, page, len, poff);
 796         }
 797 
 798         wpc->ioend->io_size += len;
 799         wbc_account_cgroup_owner(wbc, page, len);
 800 }
 801 
 802 STATIC void
 803 xfs_vm_invalidatepage(
 804         struct page             *page,
 805         unsigned int            offset,
 806         unsigned int            length)
 807 {
 808         trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
 809         iomap_invalidatepage(page, offset, length);
 810 }
 811 
 812 /*
 813  * If the page has delalloc blocks on it, we need to punch them out before we
 814  * invalidate the page.  If we don't, we leave a stale delalloc mapping on the
 815  * inode that can trip up a later direct I/O read operation on the same region.
 816  *
 817  * We prevent this by truncating away the delalloc regions on the page.  Because
 818  * they are delalloc, we can do this without needing a transaction. Indeed - if
 819  * we get ENOSPC errors, we have to be able to do this truncation without a
 820  * transaction as there is no space left for block reservation (typically why we
 821  * see a ENOSPC in writeback).
 822  */
 823 STATIC void
 824 xfs_aops_discard_page(
 825         struct page             *page)
 826 {
 827         struct inode            *inode = page->mapping->host;
 828         struct xfs_inode        *ip = XFS_I(inode);
 829         struct xfs_mount        *mp = ip->i_mount;
 830         loff_t                  offset = page_offset(page);
 831         xfs_fileoff_t           start_fsb = XFS_B_TO_FSBT(mp, offset);
 832         int                     error;
 833 
 834         if (XFS_FORCED_SHUTDOWN(mp))
 835                 goto out_invalidate;
 836 
 837         xfs_alert(mp,
 838                 "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
 839                         page, ip->i_ino, offset);
 840 
 841         error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
 842                         PAGE_SIZE / i_blocksize(inode));
 843         if (error && !XFS_FORCED_SHUTDOWN(mp))
 844                 xfs_alert(mp, "page discard unable to remove delalloc mapping.");
 845 out_invalidate:
 846         xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
 847 }
 848 
 849 /*
 850  * We implement an immediate ioend submission policy here to avoid needing to
 851  * chain multiple ioends and hence nest mempool allocations which can violate
 852  * forward progress guarantees we need to provide. The current ioend we are
 853  * adding blocks to is cached on the writepage context, and if the new block
 854  * does not append to the cached ioend it will create a new ioend and cache that
 855  * instead.
 856  *
 857  * If a new ioend is created and cached, the old ioend is returned and queued
 858  * locally for submission once the entire page is processed or an error has been
 859  * detected.  While ioends are submitted immediately after they are completed,
 860  * batching optimisations are provided by higher level block plugging.
 861  *
 862  * At the end of a writeback pass, there will be a cached ioend remaining on the
 863  * writepage context that the caller will need to submit.
 864  */
 865 static int
 866 xfs_writepage_map(
 867         struct xfs_writepage_ctx *wpc,
 868         struct writeback_control *wbc,
 869         struct inode            *inode,
 870         struct page             *page,
 871         uint64_t                end_offset)
 872 {
 873         LIST_HEAD(submit_list);
 874         struct iomap_page       *iop = to_iomap_page(page);
 875         unsigned                len = i_blocksize(inode);
 876         struct xfs_ioend        *ioend, *next;
 877         uint64_t                file_offset;    /* file offset of page */
 878         int                     error = 0, count = 0, i;
 879 
 880         ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
 881         ASSERT(!iop || atomic_read(&iop->write_count) == 0);
 882 
 883         /*
 884          * Walk through the page to find areas to write back. If we run off the
 885          * end of the current map or find the current map invalid, grab a new
 886          * one.
 887          */
 888         for (i = 0, file_offset = page_offset(page);
 889              i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
 890              i++, file_offset += len) {
 891                 if (iop && !test_bit(i, iop->uptodate))
 892                         continue;
 893 
 894                 error = xfs_map_blocks(wpc, inode, file_offset);
 895                 if (error)
 896                         break;
 897                 if (wpc->imap.br_startblock == HOLESTARTBLOCK)
 898                         continue;
 899                 xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
 900                                  &submit_list);
 901                 count++;
 902         }
 903 
 904         ASSERT(wpc->ioend || list_empty(&submit_list));
 905         ASSERT(PageLocked(page));
 906         ASSERT(!PageWriteback(page));
 907 
 908         /*
 909          * On error, we have to fail the ioend here because we may have set
 910          * pages under writeback, we have to make sure we run IO completion to
 911          * mark the error state of the IO appropriately, so we can't cancel the
 912          * ioend directly here.  That means we have to mark this page as under
 913          * writeback if we included any blocks from it in the ioend chain so
 914          * that completion treats it correctly.
 915          *
 916          * If we didn't include the page in the ioend, the on error we can
 917          * simply discard and unlock it as there are no other users of the page
 918          * now.  The caller will still need to trigger submission of outstanding
 919          * ioends on the writepage context so they are treated correctly on
 920          * error.
 921          */
 922         if (unlikely(error)) {
 923                 if (!count) {
 924                         xfs_aops_discard_page(page);
 925                         ClearPageUptodate(page);
 926                         unlock_page(page);
 927                         goto done;
 928                 }
 929 
 930                 /*
 931                  * If the page was not fully cleaned, we need to ensure that the
 932                  * higher layers come back to it correctly.  That means we need
 933                  * to keep the page dirty, and for WB_SYNC_ALL writeback we need
 934                  * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
 935                  * so another attempt to write this page in this writeback sweep
 936                  * will be made.
 937                  */
 938                 set_page_writeback_keepwrite(page);
 939         } else {
 940                 clear_page_dirty_for_io(page);
 941                 set_page_writeback(page);
 942         }
 943 
 944         unlock_page(page);
 945 
 946         /*
 947          * Preserve the original error if there was one, otherwise catch
 948          * submission errors here and propagate into subsequent ioend
 949          * submissions.
 950          */
 951         list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
 952                 int error2;
 953 
 954                 list_del_init(&ioend->io_list);
 955                 error2 = xfs_submit_ioend(wbc, ioend, error);
 956                 if (error2 && !error)
 957                         error = error2;
 958         }
 959 
 960         /*
 961          * We can end up here with no error and nothing to write only if we race
 962          * with a partial page truncate on a sub-page block sized filesystem.
 963          */
 964         if (!count)
 965                 end_page_writeback(page);
 966 done:
 967         mapping_set_error(page->mapping, error);
 968         return error;
 969 }
 970 
 971 /*
 972  * Write out a dirty page.
 973  *
 974  * For delalloc space on the page we need to allocate space and flush it.
 975  * For unwritten space on the page we need to start the conversion to
 976  * regular allocated space.
 977  */
 978 STATIC int
 979 xfs_do_writepage(
 980         struct page             *page,
 981         struct writeback_control *wbc,
 982         void                    *data)
 983 {
 984         struct xfs_writepage_ctx *wpc = data;
 985         struct inode            *inode = page->mapping->host;
 986         loff_t                  offset;
 987         uint64_t              end_offset;
 988         pgoff_t                 end_index;
 989 
 990         trace_xfs_writepage(inode, page, 0, 0);
 991 
 992         /*
 993          * Refuse to write the page out if we are called from reclaim context.
 994          *
 995          * This avoids stack overflows when called from deeply used stacks in
 996          * random callers for direct reclaim or memcg reclaim.  We explicitly
 997          * allow reclaim from kswapd as the stack usage there is relatively low.
 998          *
 999          * This should never happen except in the case of a VM regression so
1000          * warn about it.
1001          */
1002         if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1003                         PF_MEMALLOC))
1004                 goto redirty;
1005 
1006         /*
1007          * Given that we do not allow direct reclaim to call us, we should
1008          * never be called while in a filesystem transaction.
1009          */
1010         if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
1011                 goto redirty;
1012 
1013         /*
1014          * Is this page beyond the end of the file?
1015          *
1016          * The page index is less than the end_index, adjust the end_offset
1017          * to the highest offset that this page should represent.
1018          * -----------------------------------------------------
1019          * |                    file mapping           | <EOF> |
1020          * -----------------------------------------------------
1021          * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
1022          * ^--------------------------------^----------|--------
1023          * |     desired writeback range    |      see else    |
1024          * ---------------------------------^------------------|
1025          */
1026         offset = i_size_read(inode);
1027         end_index = offset >> PAGE_SHIFT;
1028         if (page->index < end_index)
1029                 end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
1030         else {
1031                 /*
1032                  * Check whether the page to write out is beyond or straddles
1033                  * i_size or not.
1034                  * -------------------------------------------------------
1035                  * |            file mapping                    | <EOF>  |
1036                  * -------------------------------------------------------
1037                  * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
1038                  * ^--------------------------------^-----------|---------
1039                  * |                                |      Straddles     |
1040                  * ---------------------------------^-----------|--------|
1041                  */
1042                 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1043 
1044                 /*
1045                  * Skip the page if it is fully outside i_size, e.g. due to a
1046                  * truncate operation that is in progress. We must redirty the
1047                  * page so that reclaim stops reclaiming it. Otherwise
1048                  * xfs_vm_releasepage() is called on it and gets confused.
1049                  *
1050                  * Note that the end_index is unsigned long, it would overflow
1051                  * if the given offset is greater than 16TB on 32-bit system
1052                  * and if we do check the page is fully outside i_size or not
1053                  * via "if (page->index >= end_index + 1)" as "end_index + 1"
1054                  * will be evaluated to 0.  Hence this page will be redirtied
1055                  * and be written out repeatedly which would result in an
1056                  * infinite loop, the user program that perform this operation
1057                  * will hang.  Instead, we can verify this situation by checking
1058                  * if the page to write is totally beyond the i_size or if it's
1059                  * offset is just equal to the EOF.
1060                  */
1061                 if (page->index > end_index ||
1062                     (page->index == end_index && offset_into_page == 0))
1063                         goto redirty;
1064 
1065                 /*
1066                  * The page straddles i_size.  It must be zeroed out on each
1067                  * and every writepage invocation because it may be mmapped.
1068                  * "A file is mapped in multiples of the page size.  For a file
1069                  * that is not a multiple of the page size, the remaining
1070                  * memory is zeroed when mapped, and writes to that region are
1071                  * not written out to the file."
1072                  */
1073                 zero_user_segment(page, offset_into_page, PAGE_SIZE);
1074 
1075                 /* Adjust the end_offset to the end of file */
1076                 end_offset = offset;
1077         }
1078 
1079         return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
1080 
1081 redirty:
1082         redirty_page_for_writepage(wbc, page);
1083         unlock_page(page);
1084         return 0;
1085 }
1086 
1087 STATIC int
1088 xfs_vm_writepage(
1089         struct page             *page,
1090         struct writeback_control *wbc)
1091 {
1092         struct xfs_writepage_ctx wpc = { };
1093         int                     ret;
1094 
1095         ret = xfs_do_writepage(page, wbc, &wpc);
1096         if (wpc.ioend)
1097                 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1098         return ret;
1099 }
1100 
1101 STATIC int
1102 xfs_vm_writepages(
1103         struct address_space    *mapping,
1104         struct writeback_control *wbc)
1105 {
1106         struct xfs_writepage_ctx wpc = { };
1107         int                     ret;
1108 
1109         xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1110         ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1111         if (wpc.ioend)
1112                 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1113         return ret;
1114 }
1115 
1116 STATIC int
1117 xfs_dax_writepages(
1118         struct address_space    *mapping,
1119         struct writeback_control *wbc)
1120 {
1121         xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1122         return dax_writeback_mapping_range(mapping,
1123                         xfs_find_bdev_for_inode(mapping->host), wbc);
1124 }
1125 
1126 STATIC int
1127 xfs_vm_releasepage(
1128         struct page             *page,
1129         gfp_t                   gfp_mask)
1130 {
1131         trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1132         return iomap_releasepage(page, gfp_mask);
1133 }
1134 
1135 STATIC sector_t
1136 xfs_vm_bmap(
1137         struct address_space    *mapping,
1138         sector_t                block)
1139 {
1140         struct xfs_inode        *ip = XFS_I(mapping->host);
1141 
1142         trace_xfs_vm_bmap(ip);
1143 
1144         /*
1145          * The swap code (ab-)uses ->bmap to get a block mapping and then
1146          * bypasses the file system for actual I/O.  We really can't allow
1147          * that on reflinks inodes, so we have to skip out here.  And yes,
1148          * 0 is the magic code for a bmap error.
1149          *
1150          * Since we don't pass back blockdev info, we can't return bmap
1151          * information for rt files either.
1152          */
1153         if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1154                 return 0;
1155         return iomap_bmap(mapping, block, &xfs_iomap_ops);
1156 }
1157 
1158 STATIC int
1159 xfs_vm_readpage(
1160         struct file             *unused,
1161         struct page             *page)
1162 {
1163         trace_xfs_vm_readpage(page->mapping->host, 1);
1164         return iomap_readpage(page, &xfs_iomap_ops);
1165 }
1166 
1167 STATIC int
1168 xfs_vm_readpages(
1169         struct file             *unused,
1170         struct address_space    *mapping,
1171         struct list_head        *pages,
1172         unsigned                nr_pages)
1173 {
1174         trace_xfs_vm_readpages(mapping->host, nr_pages);
1175         return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
1176 }
1177 
1178 static int
1179 xfs_iomap_swapfile_activate(
1180         struct swap_info_struct         *sis,
1181         struct file                     *swap_file,
1182         sector_t                        *span)
1183 {
1184         sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
1185         return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops);
1186 }
1187 
1188 const struct address_space_operations xfs_address_space_operations = {
1189         .readpage               = xfs_vm_readpage,
1190         .readpages              = xfs_vm_readpages,
1191         .writepage              = xfs_vm_writepage,
1192         .writepages             = xfs_vm_writepages,
1193         .set_page_dirty         = iomap_set_page_dirty,
1194         .releasepage            = xfs_vm_releasepage,
1195         .invalidatepage         = xfs_vm_invalidatepage,
1196         .bmap                   = xfs_vm_bmap,
1197         .direct_IO              = noop_direct_IO,
1198         .migratepage            = iomap_migrate_page,
1199         .is_partially_uptodate  = iomap_is_partially_uptodate,
1200         .error_remove_page      = generic_error_remove_page,
1201         .swap_activate          = xfs_iomap_swapfile_activate,
1202 };
1203 
1204 const struct address_space_operations xfs_dax_aops = {
1205         .writepages             = xfs_dax_writepages,
1206         .direct_IO              = noop_direct_IO,
1207         .set_page_dirty         = noop_set_page_dirty,
1208         .invalidatepage         = noop_invalidatepage,
1209         .swap_activate          = xfs_iomap_swapfile_activate,
1210 };

/* [<][>][^][v][top][bottom][index][help] */