root/fs/ntfs/aops.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ntfs_end_buffer_async_read
  2. ntfs_read_block
  3. ntfs_readpage
  4. ntfs_write_block
  5. ntfs_write_mst_block
  6. ntfs_writepage
  7. ntfs_bmap
  8. mark_ntfs_record_dirty

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /**
   3  * aops.c - NTFS kernel address space operations and page cache handling.
   4  *
   5  * Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc.
   6  * Copyright (c) 2002 Richard Russon
   7  */
   8 
   9 #include <linux/errno.h>
  10 #include <linux/fs.h>
  11 #include <linux/gfp.h>
  12 #include <linux/mm.h>
  13 #include <linux/pagemap.h>
  14 #include <linux/swap.h>
  15 #include <linux/buffer_head.h>
  16 #include <linux/writeback.h>
  17 #include <linux/bit_spinlock.h>
  18 #include <linux/bio.h>
  19 
  20 #include "aops.h"
  21 #include "attrib.h"
  22 #include "debug.h"
  23 #include "inode.h"
  24 #include "mft.h"
  25 #include "runlist.h"
  26 #include "types.h"
  27 #include "ntfs.h"
  28 
  29 /**
  30  * ntfs_end_buffer_async_read - async io completion for reading attributes
  31  * @bh:         buffer head on which io is completed
  32  * @uptodate:   whether @bh is now uptodate or not
  33  *
  34  * Asynchronous I/O completion handler for reading pages belonging to the
  35  * attribute address space of an inode.  The inodes can either be files or
  36  * directories or they can be fake inodes describing some attribute.
  37  *
  38  * If NInoMstProtected(), perform the post read mst fixups when all IO on the
  39  * page has been completed and mark the page uptodate or set the error bit on
  40  * the page.  To determine the size of the records that need fixing up, we
  41  * cheat a little bit by setting the index_block_size in ntfs_inode to the ntfs
  42  * record size, and index_block_size_bits, to the log(base 2) of the ntfs
  43  * record size.
  44  */
  45 static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
  46 {
  47         unsigned long flags;
  48         struct buffer_head *first, *tmp;
  49         struct page *page;
  50         struct inode *vi;
  51         ntfs_inode *ni;
  52         int page_uptodate = 1;
  53 
  54         page = bh->b_page;
  55         vi = page->mapping->host;
  56         ni = NTFS_I(vi);
  57 
  58         if (likely(uptodate)) {
  59                 loff_t i_size;
  60                 s64 file_ofs, init_size;
  61 
  62                 set_buffer_uptodate(bh);
  63 
  64                 file_ofs = ((s64)page->index << PAGE_SHIFT) +
  65                                 bh_offset(bh);
  66                 read_lock_irqsave(&ni->size_lock, flags);
  67                 init_size = ni->initialized_size;
  68                 i_size = i_size_read(vi);
  69                 read_unlock_irqrestore(&ni->size_lock, flags);
  70                 if (unlikely(init_size > i_size)) {
  71                         /* Race with shrinking truncate. */
  72                         init_size = i_size;
  73                 }
  74                 /* Check for the current buffer head overflowing. */
  75                 if (unlikely(file_ofs + bh->b_size > init_size)) {
  76                         int ofs;
  77                         void *kaddr;
  78 
  79                         ofs = 0;
  80                         if (file_ofs < init_size)
  81                                 ofs = init_size - file_ofs;
  82                         kaddr = kmap_atomic(page);
  83                         memset(kaddr + bh_offset(bh) + ofs, 0,
  84                                         bh->b_size - ofs);
  85                         flush_dcache_page(page);
  86                         kunmap_atomic(kaddr);
  87                 }
  88         } else {
  89                 clear_buffer_uptodate(bh);
  90                 SetPageError(page);
  91                 ntfs_error(ni->vol->sb, "Buffer I/O error, logical block "
  92                                 "0x%llx.", (unsigned long long)bh->b_blocknr);
  93         }
  94         first = page_buffers(page);
  95         local_irq_save(flags);
  96         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
  97         clear_buffer_async_read(bh);
  98         unlock_buffer(bh);
  99         tmp = bh;
 100         do {
 101                 if (!buffer_uptodate(tmp))
 102                         page_uptodate = 0;
 103                 if (buffer_async_read(tmp)) {
 104                         if (likely(buffer_locked(tmp)))
 105                                 goto still_busy;
 106                         /* Async buffers must be locked. */
 107                         BUG();
 108                 }
 109                 tmp = tmp->b_this_page;
 110         } while (tmp != bh);
 111         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
 112         local_irq_restore(flags);
 113         /*
 114          * If none of the buffers had errors then we can set the page uptodate,
 115          * but we first have to perform the post read mst fixups, if the
 116          * attribute is mst protected, i.e. if NInoMstProteced(ni) is true.
 117          * Note we ignore fixup errors as those are detected when
 118          * map_mft_record() is called which gives us per record granularity
 119          * rather than per page granularity.
 120          */
 121         if (!NInoMstProtected(ni)) {
 122                 if (likely(page_uptodate && !PageError(page)))
 123                         SetPageUptodate(page);
 124         } else {
 125                 u8 *kaddr;
 126                 unsigned int i, recs;
 127                 u32 rec_size;
 128 
 129                 rec_size = ni->itype.index.block_size;
 130                 recs = PAGE_SIZE / rec_size;
 131                 /* Should have been verified before we got here... */
 132                 BUG_ON(!recs);
 133                 kaddr = kmap_atomic(page);
 134                 for (i = 0; i < recs; i++)
 135                         post_read_mst_fixup((NTFS_RECORD*)(kaddr +
 136                                         i * rec_size), rec_size);
 137                 kunmap_atomic(kaddr);
 138                 flush_dcache_page(page);
 139                 if (likely(page_uptodate && !PageError(page)))
 140                         SetPageUptodate(page);
 141         }
 142         unlock_page(page);
 143         return;
 144 still_busy:
 145         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
 146         local_irq_restore(flags);
 147         return;
 148 }
 149 
 150 /**
 151  * ntfs_read_block - fill a @page of an address space with data
 152  * @page:       page cache page to fill with data
 153  *
 154  * Fill the page @page of the address space belonging to the @page->host inode.
 155  * We read each buffer asynchronously and when all buffers are read in, our io
 156  * completion handler ntfs_end_buffer_read_async(), if required, automatically
 157  * applies the mst fixups to the page before finally marking it uptodate and
 158  * unlocking it.
 159  *
 160  * We only enforce allocated_size limit because i_size is checked for in
 161  * generic_file_read().
 162  *
 163  * Return 0 on success and -errno on error.
 164  *
 165  * Contains an adapted version of fs/buffer.c::block_read_full_page().
 166  */
 167 static int ntfs_read_block(struct page *page)
 168 {
 169         loff_t i_size;
 170         VCN vcn;
 171         LCN lcn;
 172         s64 init_size;
 173         struct inode *vi;
 174         ntfs_inode *ni;
 175         ntfs_volume *vol;
 176         runlist_element *rl;
 177         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
 178         sector_t iblock, lblock, zblock;
 179         unsigned long flags;
 180         unsigned int blocksize, vcn_ofs;
 181         int i, nr;
 182         unsigned char blocksize_bits;
 183 
 184         vi = page->mapping->host;
 185         ni = NTFS_I(vi);
 186         vol = ni->vol;
 187 
 188         /* $MFT/$DATA must have its complete runlist in memory at all times. */
 189         BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
 190 
 191         blocksize = vol->sb->s_blocksize;
 192         blocksize_bits = vol->sb->s_blocksize_bits;
 193 
 194         if (!page_has_buffers(page)) {
 195                 create_empty_buffers(page, blocksize, 0);
 196                 if (unlikely(!page_has_buffers(page))) {
 197                         unlock_page(page);
 198                         return -ENOMEM;
 199                 }
 200         }
 201         bh = head = page_buffers(page);
 202         BUG_ON(!bh);
 203 
 204         /*
 205          * We may be racing with truncate.  To avoid some of the problems we
 206          * now take a snapshot of the various sizes and use those for the whole
 207          * of the function.  In case of an extending truncate it just means we
 208          * may leave some buffers unmapped which are now allocated.  This is
 209          * not a problem since these buffers will just get mapped when a write
 210          * occurs.  In case of a shrinking truncate, we will detect this later
 211          * on due to the runlist being incomplete and if the page is being
 212          * fully truncated, truncate will throw it away as soon as we unlock
 213          * it so no need to worry what we do with it.
 214          */
 215         iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
 216         read_lock_irqsave(&ni->size_lock, flags);
 217         lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
 218         init_size = ni->initialized_size;
 219         i_size = i_size_read(vi);
 220         read_unlock_irqrestore(&ni->size_lock, flags);
 221         if (unlikely(init_size > i_size)) {
 222                 /* Race with shrinking truncate. */
 223                 init_size = i_size;
 224         }
 225         zblock = (init_size + blocksize - 1) >> blocksize_bits;
 226 
 227         /* Loop through all the buffers in the page. */
 228         rl = NULL;
 229         nr = i = 0;
 230         do {
 231                 int err = 0;
 232 
 233                 if (unlikely(buffer_uptodate(bh)))
 234                         continue;
 235                 if (unlikely(buffer_mapped(bh))) {
 236                         arr[nr++] = bh;
 237                         continue;
 238                 }
 239                 bh->b_bdev = vol->sb->s_bdev;
 240                 /* Is the block within the allowed limits? */
 241                 if (iblock < lblock) {
 242                         bool is_retry = false;
 243 
 244                         /* Convert iblock into corresponding vcn and offset. */
 245                         vcn = (VCN)iblock << blocksize_bits >>
 246                                         vol->cluster_size_bits;
 247                         vcn_ofs = ((VCN)iblock << blocksize_bits) &
 248                                         vol->cluster_size_mask;
 249                         if (!rl) {
 250 lock_retry_remap:
 251                                 down_read(&ni->runlist.lock);
 252                                 rl = ni->runlist.rl;
 253                         }
 254                         if (likely(rl != NULL)) {
 255                                 /* Seek to element containing target vcn. */
 256                                 while (rl->length && rl[1].vcn <= vcn)
 257                                         rl++;
 258                                 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
 259                         } else
 260                                 lcn = LCN_RL_NOT_MAPPED;
 261                         /* Successful remap. */
 262                         if (lcn >= 0) {
 263                                 /* Setup buffer head to correct block. */
 264                                 bh->b_blocknr = ((lcn << vol->cluster_size_bits)
 265                                                 + vcn_ofs) >> blocksize_bits;
 266                                 set_buffer_mapped(bh);
 267                                 /* Only read initialized data blocks. */
 268                                 if (iblock < zblock) {
 269                                         arr[nr++] = bh;
 270                                         continue;
 271                                 }
 272                                 /* Fully non-initialized data block, zero it. */
 273                                 goto handle_zblock;
 274                         }
 275                         /* It is a hole, need to zero it. */
 276                         if (lcn == LCN_HOLE)
 277                                 goto handle_hole;
 278                         /* If first try and runlist unmapped, map and retry. */
 279                         if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
 280                                 is_retry = true;
 281                                 /*
 282                                  * Attempt to map runlist, dropping lock for
 283                                  * the duration.
 284                                  */
 285                                 up_read(&ni->runlist.lock);
 286                                 err = ntfs_map_runlist(ni, vcn);
 287                                 if (likely(!err))
 288                                         goto lock_retry_remap;
 289                                 rl = NULL;
 290                         } else if (!rl)
 291                                 up_read(&ni->runlist.lock);
 292                         /*
 293                          * If buffer is outside the runlist, treat it as a
 294                          * hole.  This can happen due to concurrent truncate
 295                          * for example.
 296                          */
 297                         if (err == -ENOENT || lcn == LCN_ENOENT) {
 298                                 err = 0;
 299                                 goto handle_hole;
 300                         }
 301                         /* Hard error, zero out region. */
 302                         if (!err)
 303                                 err = -EIO;
 304                         bh->b_blocknr = -1;
 305                         SetPageError(page);
 306                         ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "
 307                                         "attribute type 0x%x, vcn 0x%llx, "
 308                                         "offset 0x%x because its location on "
 309                                         "disk could not be determined%s "
 310                                         "(error code %i).", ni->mft_no,
 311                                         ni->type, (unsigned long long)vcn,
 312                                         vcn_ofs, is_retry ? " even after "
 313                                         "retrying" : "", err);
 314                 }
 315                 /*
 316                  * Either iblock was outside lblock limits or
 317                  * ntfs_rl_vcn_to_lcn() returned error.  Just zero that portion
 318                  * of the page and set the buffer uptodate.
 319                  */
 320 handle_hole:
 321                 bh->b_blocknr = -1UL;
 322                 clear_buffer_mapped(bh);
 323 handle_zblock:
 324                 zero_user(page, i * blocksize, blocksize);
 325                 if (likely(!err))
 326                         set_buffer_uptodate(bh);
 327         } while (i++, iblock++, (bh = bh->b_this_page) != head);
 328 
 329         /* Release the lock if we took it. */
 330         if (rl)
 331                 up_read(&ni->runlist.lock);
 332 
 333         /* Check we have at least one buffer ready for i/o. */
 334         if (nr) {
 335                 struct buffer_head *tbh;
 336 
 337                 /* Lock the buffers. */
 338                 for (i = 0; i < nr; i++) {
 339                         tbh = arr[i];
 340                         lock_buffer(tbh);
 341                         tbh->b_end_io = ntfs_end_buffer_async_read;
 342                         set_buffer_async_read(tbh);
 343                 }
 344                 /* Finally, start i/o on the buffers. */
 345                 for (i = 0; i < nr; i++) {
 346                         tbh = arr[i];
 347                         if (likely(!buffer_uptodate(tbh)))
 348                                 submit_bh(REQ_OP_READ, 0, tbh);
 349                         else
 350                                 ntfs_end_buffer_async_read(tbh, 1);
 351                 }
 352                 return 0;
 353         }
 354         /* No i/o was scheduled on any of the buffers. */
 355         if (likely(!PageError(page)))
 356                 SetPageUptodate(page);
 357         else /* Signal synchronous i/o error. */
 358                 nr = -EIO;
 359         unlock_page(page);
 360         return nr;
 361 }
 362 
 363 /**
 364  * ntfs_readpage - fill a @page of a @file with data from the device
 365  * @file:       open file to which the page @page belongs or NULL
 366  * @page:       page cache page to fill with data
 367  *
 368  * For non-resident attributes, ntfs_readpage() fills the @page of the open
 369  * file @file by calling the ntfs version of the generic block_read_full_page()
 370  * function, ntfs_read_block(), which in turn creates and reads in the buffers
 371  * associated with the page asynchronously.
 372  *
 373  * For resident attributes, OTOH, ntfs_readpage() fills @page by copying the
 374  * data from the mft record (which at this stage is most likely in memory) and
 375  * fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as
 376  * even if the mft record is not cached at this point in time, we need to wait
 377  * for it to be read in before we can do the copy.
 378  *
 379  * Return 0 on success and -errno on error.
 380  */
 381 static int ntfs_readpage(struct file *file, struct page *page)
 382 {
 383         loff_t i_size;
 384         struct inode *vi;
 385         ntfs_inode *ni, *base_ni;
 386         u8 *addr;
 387         ntfs_attr_search_ctx *ctx;
 388         MFT_RECORD *mrec;
 389         unsigned long flags;
 390         u32 attr_len;
 391         int err = 0;
 392 
 393 retry_readpage:
 394         BUG_ON(!PageLocked(page));
 395         vi = page->mapping->host;
 396         i_size = i_size_read(vi);
 397         /* Is the page fully outside i_size? (truncate in progress) */
 398         if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
 399                         PAGE_SHIFT)) {
 400                 zero_user(page, 0, PAGE_SIZE);
 401                 ntfs_debug("Read outside i_size - truncated?");
 402                 goto done;
 403         }
 404         /*
 405          * This can potentially happen because we clear PageUptodate() during
 406          * ntfs_writepage() of MstProtected() attributes.
 407          */
 408         if (PageUptodate(page)) {
 409                 unlock_page(page);
 410                 return 0;
 411         }
 412         ni = NTFS_I(vi);
 413         /*
 414          * Only $DATA attributes can be encrypted and only unnamed $DATA
 415          * attributes can be compressed.  Index root can have the flags set but
 416          * this means to create compressed/encrypted files, not that the
 417          * attribute is compressed/encrypted.  Note we need to check for
 418          * AT_INDEX_ALLOCATION since this is the type of both directory and
 419          * index inodes.
 420          */
 421         if (ni->type != AT_INDEX_ALLOCATION) {
 422                 /* If attribute is encrypted, deny access, just like NT4. */
 423                 if (NInoEncrypted(ni)) {
 424                         BUG_ON(ni->type != AT_DATA);
 425                         err = -EACCES;
 426                         goto err_out;
 427                 }
 428                 /* Compressed data streams are handled in compress.c. */
 429                 if (NInoNonResident(ni) && NInoCompressed(ni)) {
 430                         BUG_ON(ni->type != AT_DATA);
 431                         BUG_ON(ni->name_len);
 432                         return ntfs_read_compressed_block(page);
 433                 }
 434         }
 435         /* NInoNonResident() == NInoIndexAllocPresent() */
 436         if (NInoNonResident(ni)) {
 437                 /* Normal, non-resident data stream. */
 438                 return ntfs_read_block(page);
 439         }
 440         /*
 441          * Attribute is resident, implying it is not compressed or encrypted.
 442          * This also means the attribute is smaller than an mft record and
 443          * hence smaller than a page, so can simply zero out any pages with
 444          * index above 0.  Note the attribute can actually be marked compressed
 445          * but if it is resident the actual data is not compressed so we are
 446          * ok to ignore the compressed flag here.
 447          */
 448         if (unlikely(page->index > 0)) {
 449                 zero_user(page, 0, PAGE_SIZE);
 450                 goto done;
 451         }
 452         if (!NInoAttr(ni))
 453                 base_ni = ni;
 454         else
 455                 base_ni = ni->ext.base_ntfs_ino;
 456         /* Map, pin, and lock the mft record. */
 457         mrec = map_mft_record(base_ni);
 458         if (IS_ERR(mrec)) {
 459                 err = PTR_ERR(mrec);
 460                 goto err_out;
 461         }
 462         /*
 463          * If a parallel write made the attribute non-resident, drop the mft
 464          * record and retry the readpage.
 465          */
 466         if (unlikely(NInoNonResident(ni))) {
 467                 unmap_mft_record(base_ni);
 468                 goto retry_readpage;
 469         }
 470         ctx = ntfs_attr_get_search_ctx(base_ni, mrec);
 471         if (unlikely(!ctx)) {
 472                 err = -ENOMEM;
 473                 goto unm_err_out;
 474         }
 475         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
 476                         CASE_SENSITIVE, 0, NULL, 0, ctx);
 477         if (unlikely(err))
 478                 goto put_unm_err_out;
 479         attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
 480         read_lock_irqsave(&ni->size_lock, flags);
 481         if (unlikely(attr_len > ni->initialized_size))
 482                 attr_len = ni->initialized_size;
 483         i_size = i_size_read(vi);
 484         read_unlock_irqrestore(&ni->size_lock, flags);
 485         if (unlikely(attr_len > i_size)) {
 486                 /* Race with shrinking truncate. */
 487                 attr_len = i_size;
 488         }
 489         addr = kmap_atomic(page);
 490         /* Copy the data to the page. */
 491         memcpy(addr, (u8*)ctx->attr +
 492                         le16_to_cpu(ctx->attr->data.resident.value_offset),
 493                         attr_len);
 494         /* Zero the remainder of the page. */
 495         memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
 496         flush_dcache_page(page);
 497         kunmap_atomic(addr);
 498 put_unm_err_out:
 499         ntfs_attr_put_search_ctx(ctx);
 500 unm_err_out:
 501         unmap_mft_record(base_ni);
 502 done:
 503         SetPageUptodate(page);
 504 err_out:
 505         unlock_page(page);
 506         return err;
 507 }
 508 
 509 #ifdef NTFS_RW
 510 
 511 /**
 512  * ntfs_write_block - write a @page to the backing store
 513  * @page:       page cache page to write out
 514  * @wbc:        writeback control structure
 515  *
 516  * This function is for writing pages belonging to non-resident, non-mst
 517  * protected attributes to their backing store.
 518  *
 519  * For a page with buffers, map and write the dirty buffers asynchronously
 520  * under page writeback. For a page without buffers, create buffers for the
 521  * page, then proceed as above.
 522  *
 523  * If a page doesn't have buffers the page dirty state is definitive. If a page
 524  * does have buffers, the page dirty state is just a hint, and the buffer dirty
 525  * state is definitive. (A hint which has rules: dirty buffers against a clean
 526  * page is illegal. Other combinations are legal and need to be handled. In
 527  * particular a dirty page containing clean buffers for example.)
 528  *
 529  * Return 0 on success and -errno on error.
 530  *
 531  * Based on ntfs_read_block() and __block_write_full_page().
 532  */
 533 static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
 534 {
 535         VCN vcn;
 536         LCN lcn;
 537         s64 initialized_size;
 538         loff_t i_size;
 539         sector_t block, dblock, iblock;
 540         struct inode *vi;
 541         ntfs_inode *ni;
 542         ntfs_volume *vol;
 543         runlist_element *rl;
 544         struct buffer_head *bh, *head;
 545         unsigned long flags;
 546         unsigned int blocksize, vcn_ofs;
 547         int err;
 548         bool need_end_writeback;
 549         unsigned char blocksize_bits;
 550 
 551         vi = page->mapping->host;
 552         ni = NTFS_I(vi);
 553         vol = ni->vol;
 554 
 555         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
 556                         "0x%lx.", ni->mft_no, ni->type, page->index);
 557 
 558         BUG_ON(!NInoNonResident(ni));
 559         BUG_ON(NInoMstProtected(ni));
 560         blocksize = vol->sb->s_blocksize;
 561         blocksize_bits = vol->sb->s_blocksize_bits;
 562         if (!page_has_buffers(page)) {
 563                 BUG_ON(!PageUptodate(page));
 564                 create_empty_buffers(page, blocksize,
 565                                 (1 << BH_Uptodate) | (1 << BH_Dirty));
 566                 if (unlikely(!page_has_buffers(page))) {
 567                         ntfs_warning(vol->sb, "Error allocating page "
 568                                         "buffers.  Redirtying page so we try "
 569                                         "again later.");
 570                         /*
 571                          * Put the page back on mapping->dirty_pages, but leave
 572                          * its buffers' dirty state as-is.
 573                          */
 574                         redirty_page_for_writepage(wbc, page);
 575                         unlock_page(page);
 576                         return 0;
 577                 }
 578         }
 579         bh = head = page_buffers(page);
 580         BUG_ON(!bh);
 581 
 582         /* NOTE: Different naming scheme to ntfs_read_block()! */
 583 
 584         /* The first block in the page. */
 585         block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
 586 
 587         read_lock_irqsave(&ni->size_lock, flags);
 588         i_size = i_size_read(vi);
 589         initialized_size = ni->initialized_size;
 590         read_unlock_irqrestore(&ni->size_lock, flags);
 591 
 592         /* The first out of bounds block for the data size. */
 593         dblock = (i_size + blocksize - 1) >> blocksize_bits;
 594 
 595         /* The last (fully or partially) initialized block. */
 596         iblock = initialized_size >> blocksize_bits;
 597 
 598         /*
 599          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
 600          * here, and the (potentially unmapped) buffers may become dirty at
 601          * any time.  If a buffer becomes dirty here after we've inspected it
 602          * then we just miss that fact, and the page stays dirty.
 603          *
 604          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
 605          * handle that here by just cleaning them.
 606          */
 607 
 608         /*
 609          * Loop through all the buffers in the page, mapping all the dirty
 610          * buffers to disk addresses and handling any aliases from the
 611          * underlying block device's mapping.
 612          */
 613         rl = NULL;
 614         err = 0;
 615         do {
 616                 bool is_retry = false;
 617 
 618                 if (unlikely(block >= dblock)) {
 619                         /*
 620                          * Mapped buffers outside i_size will occur, because
 621                          * this page can be outside i_size when there is a
 622                          * truncate in progress. The contents of such buffers
 623                          * were zeroed by ntfs_writepage().
 624                          *
 625                          * FIXME: What about the small race window where
 626                          * ntfs_writepage() has not done any clearing because
 627                          * the page was within i_size but before we get here,
 628                          * vmtruncate() modifies i_size?
 629                          */
 630                         clear_buffer_dirty(bh);
 631                         set_buffer_uptodate(bh);
 632                         continue;
 633                 }
 634 
 635                 /* Clean buffers are not written out, so no need to map them. */
 636                 if (!buffer_dirty(bh))
 637                         continue;
 638 
 639                 /* Make sure we have enough initialized size. */
 640                 if (unlikely((block >= iblock) &&
 641                                 (initialized_size < i_size))) {
 642                         /*
 643                          * If this page is fully outside initialized size, zero
 644                          * out all pages between the current initialized size
 645                          * and the current page. Just use ntfs_readpage() to do
 646                          * the zeroing transparently.
 647                          */
 648                         if (block > iblock) {
 649                                 // TODO:
 650                                 // For each page do:
 651                                 // - read_cache_page()
 652                                 // Again for each page do:
 653                                 // - wait_on_page_locked()
 654                                 // - Check (PageUptodate(page) &&
 655                                 //                      !PageError(page))
 656                                 // Update initialized size in the attribute and
 657                                 // in the inode.
 658                                 // Again, for each page do:
 659                                 //      __set_page_dirty_buffers();
 660                                 // put_page()
 661                                 // We don't need to wait on the writes.
 662                                 // Update iblock.
 663                         }
 664                         /*
 665                          * The current page straddles initialized size. Zero
 666                          * all non-uptodate buffers and set them uptodate (and
 667                          * dirty?). Note, there aren't any non-uptodate buffers
 668                          * if the page is uptodate.
 669                          * FIXME: For an uptodate page, the buffers may need to
 670                          * be written out because they were not initialized on
 671                          * disk before.
 672                          */
 673                         if (!PageUptodate(page)) {
 674                                 // TODO:
 675                                 // Zero any non-uptodate buffers up to i_size.
 676                                 // Set them uptodate and dirty.
 677                         }
 678                         // TODO:
 679                         // Update initialized size in the attribute and in the
 680                         // inode (up to i_size).
 681                         // Update iblock.
 682                         // FIXME: This is inefficient. Try to batch the two
 683                         // size changes to happen in one go.
 684                         ntfs_error(vol->sb, "Writing beyond initialized size "
 685                                         "is not supported yet. Sorry.");
 686                         err = -EOPNOTSUPP;
 687                         break;
 688                         // Do NOT set_buffer_new() BUT DO clear buffer range
 689                         // outside write request range.
 690                         // set_buffer_uptodate() on complete buffers as well as
 691                         // set_buffer_dirty().
 692                 }
 693 
 694                 /* No need to map buffers that are already mapped. */
 695                 if (buffer_mapped(bh))
 696                         continue;
 697 
 698                 /* Unmapped, dirty buffer. Need to map it. */
 699                 bh->b_bdev = vol->sb->s_bdev;
 700 
 701                 /* Convert block into corresponding vcn and offset. */
 702                 vcn = (VCN)block << blocksize_bits;
 703                 vcn_ofs = vcn & vol->cluster_size_mask;
 704                 vcn >>= vol->cluster_size_bits;
 705                 if (!rl) {
 706 lock_retry_remap:
 707                         down_read(&ni->runlist.lock);
 708                         rl = ni->runlist.rl;
 709                 }
 710                 if (likely(rl != NULL)) {
 711                         /* Seek to element containing target vcn. */
 712                         while (rl->length && rl[1].vcn <= vcn)
 713                                 rl++;
 714                         lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
 715                 } else
 716                         lcn = LCN_RL_NOT_MAPPED;
 717                 /* Successful remap. */
 718                 if (lcn >= 0) {
 719                         /* Setup buffer head to point to correct block. */
 720                         bh->b_blocknr = ((lcn << vol->cluster_size_bits) +
 721                                         vcn_ofs) >> blocksize_bits;
 722                         set_buffer_mapped(bh);
 723                         continue;
 724                 }
 725                 /* It is a hole, need to instantiate it. */
 726                 if (lcn == LCN_HOLE) {
 727                         u8 *kaddr;
 728                         unsigned long *bpos, *bend;
 729 
 730                         /* Check if the buffer is zero. */
 731                         kaddr = kmap_atomic(page);
 732                         bpos = (unsigned long *)(kaddr + bh_offset(bh));
 733                         bend = (unsigned long *)((u8*)bpos + blocksize);
 734                         do {
 735                                 if (unlikely(*bpos))
 736                                         break;
 737                         } while (likely(++bpos < bend));
 738                         kunmap_atomic(kaddr);
 739                         if (bpos == bend) {
 740                                 /*
 741                                  * Buffer is zero and sparse, no need to write
 742                                  * it.
 743                                  */
 744                                 bh->b_blocknr = -1;
 745                                 clear_buffer_dirty(bh);
 746                                 continue;
 747                         }
 748                         // TODO: Instantiate the hole.
 749                         // clear_buffer_new(bh);
 750                         // clean_bdev_bh_alias(bh);
 751                         ntfs_error(vol->sb, "Writing into sparse regions is "
 752                                         "not supported yet. Sorry.");
 753                         err = -EOPNOTSUPP;
 754                         break;
 755                 }
 756                 /* If first try and runlist unmapped, map and retry. */
 757                 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
 758                         is_retry = true;
 759                         /*
 760                          * Attempt to map runlist, dropping lock for
 761                          * the duration.
 762                          */
 763                         up_read(&ni->runlist.lock);
 764                         err = ntfs_map_runlist(ni, vcn);
 765                         if (likely(!err))
 766                                 goto lock_retry_remap;
 767                         rl = NULL;
 768                 } else if (!rl)
 769                         up_read(&ni->runlist.lock);
 770                 /*
 771                  * If buffer is outside the runlist, truncate has cut it out
 772                  * of the runlist.  Just clean and clear the buffer and set it
 773                  * uptodate so it can get discarded by the VM.
 774                  */
 775                 if (err == -ENOENT || lcn == LCN_ENOENT) {
 776                         bh->b_blocknr = -1;
 777                         clear_buffer_dirty(bh);
 778                         zero_user(page, bh_offset(bh), blocksize);
 779                         set_buffer_uptodate(bh);
 780                         err = 0;
 781                         continue;
 782                 }
 783                 /* Failed to map the buffer, even after retrying. */
 784                 if (!err)
 785                         err = -EIO;
 786                 bh->b_blocknr = -1;
 787                 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
 788                                 "attribute type 0x%x, vcn 0x%llx, offset 0x%x "
 789                                 "because its location on disk could not be "
 790                                 "determined%s (error code %i).", ni->mft_no,
 791                                 ni->type, (unsigned long long)vcn,
 792                                 vcn_ofs, is_retry ? " even after "
 793                                 "retrying" : "", err);
 794                 break;
 795         } while (block++, (bh = bh->b_this_page) != head);
 796 
 797         /* Release the lock if we took it. */
 798         if (rl)
 799                 up_read(&ni->runlist.lock);
 800 
 801         /* For the error case, need to reset bh to the beginning. */
 802         bh = head;
 803 
 804         /* Just an optimization, so ->readpage() is not called later. */
 805         if (unlikely(!PageUptodate(page))) {
 806                 int uptodate = 1;
 807                 do {
 808                         if (!buffer_uptodate(bh)) {
 809                                 uptodate = 0;
 810                                 bh = head;
 811                                 break;
 812                         }
 813                 } while ((bh = bh->b_this_page) != head);
 814                 if (uptodate)
 815                         SetPageUptodate(page);
 816         }
 817 
 818         /* Setup all mapped, dirty buffers for async write i/o. */
 819         do {
 820                 if (buffer_mapped(bh) && buffer_dirty(bh)) {
 821                         lock_buffer(bh);
 822                         if (test_clear_buffer_dirty(bh)) {
 823                                 BUG_ON(!buffer_uptodate(bh));
 824                                 mark_buffer_async_write(bh);
 825                         } else
 826                                 unlock_buffer(bh);
 827                 } else if (unlikely(err)) {
 828                         /*
 829                          * For the error case. The buffer may have been set
 830                          * dirty during attachment to a dirty page.
 831                          */
 832                         if (err != -ENOMEM)
 833                                 clear_buffer_dirty(bh);
 834                 }
 835         } while ((bh = bh->b_this_page) != head);
 836 
 837         if (unlikely(err)) {
 838                 // TODO: Remove the -EOPNOTSUPP check later on...
 839                 if (unlikely(err == -EOPNOTSUPP))
 840                         err = 0;
 841                 else if (err == -ENOMEM) {
 842                         ntfs_warning(vol->sb, "Error allocating memory. "
 843                                         "Redirtying page so we try again "
 844                                         "later.");
 845                         /*
 846                          * Put the page back on mapping->dirty_pages, but
 847                          * leave its buffer's dirty state as-is.
 848                          */
 849                         redirty_page_for_writepage(wbc, page);
 850                         err = 0;
 851                 } else
 852                         SetPageError(page);
 853         }
 854 
 855         BUG_ON(PageWriteback(page));
 856         set_page_writeback(page);       /* Keeps try_to_free_buffers() away. */
 857 
 858         /* Submit the prepared buffers for i/o. */
 859         need_end_writeback = true;
 860         do {
 861                 struct buffer_head *next = bh->b_this_page;
 862                 if (buffer_async_write(bh)) {
 863                         submit_bh(REQ_OP_WRITE, 0, bh);
 864                         need_end_writeback = false;
 865                 }
 866                 bh = next;
 867         } while (bh != head);
 868         unlock_page(page);
 869 
 870         /* If no i/o was started, need to end_page_writeback(). */
 871         if (unlikely(need_end_writeback))
 872                 end_page_writeback(page);
 873 
 874         ntfs_debug("Done.");
 875         return err;
 876 }
 877 
 878 /**
 879  * ntfs_write_mst_block - write a @page to the backing store
 880  * @page:       page cache page to write out
 881  * @wbc:        writeback control structure
 882  *
 883  * This function is for writing pages belonging to non-resident, mst protected
 884  * attributes to their backing store.  The only supported attributes are index
 885  * allocation and $MFT/$DATA.  Both directory inodes and index inodes are
 886  * supported for the index allocation case.
 887  *
 888  * The page must remain locked for the duration of the write because we apply
 889  * the mst fixups, write, and then undo the fixups, so if we were to unlock the
 890  * page before undoing the fixups, any other user of the page will see the
 891  * page contents as corrupt.
 892  *
 893  * We clear the page uptodate flag for the duration of the function to ensure
 894  * exclusion for the $MFT/$DATA case against someone mapping an mft record we
 895  * are about to apply the mst fixups to.
 896  *
 897  * Return 0 on success and -errno on error.
 898  *
 899  * Based on ntfs_write_block(), ntfs_mft_writepage(), and
 900  * write_mft_record_nolock().
 901  */
 902 static int ntfs_write_mst_block(struct page *page,
 903                 struct writeback_control *wbc)
 904 {
 905         sector_t block, dblock, rec_block;
 906         struct inode *vi = page->mapping->host;
 907         ntfs_inode *ni = NTFS_I(vi);
 908         ntfs_volume *vol = ni->vol;
 909         u8 *kaddr;
 910         unsigned int rec_size = ni->itype.index.block_size;
 911         ntfs_inode *locked_nis[PAGE_SIZE / NTFS_BLOCK_SIZE];
 912         struct buffer_head *bh, *head, *tbh, *rec_start_bh;
 913         struct buffer_head *bhs[MAX_BUF_PER_PAGE];
 914         runlist_element *rl;
 915         int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2;
 916         unsigned bh_size, rec_size_bits;
 917         bool sync, is_mft, page_is_dirty, rec_is_dirty;
 918         unsigned char bh_size_bits;
 919 
 920         if (WARN_ON(rec_size < NTFS_BLOCK_SIZE))
 921                 return -EINVAL;
 922 
 923         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
 924                         "0x%lx.", vi->i_ino, ni->type, page->index);
 925         BUG_ON(!NInoNonResident(ni));
 926         BUG_ON(!NInoMstProtected(ni));
 927         is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino);
 928         /*
 929          * NOTE: ntfs_write_mst_block() would be called for $MFTMirr if a page
 930          * in its page cache were to be marked dirty.  However this should
 931          * never happen with the current driver and considering we do not
 932          * handle this case here we do want to BUG(), at least for now.
 933          */
 934         BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
 935                         (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
 936         bh_size = vol->sb->s_blocksize;
 937         bh_size_bits = vol->sb->s_blocksize_bits;
 938         max_bhs = PAGE_SIZE / bh_size;
 939         BUG_ON(!max_bhs);
 940         BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
 941 
 942         /* Were we called for sync purposes? */
 943         sync = (wbc->sync_mode == WB_SYNC_ALL);
 944 
 945         /* Make sure we have mapped buffers. */
 946         bh = head = page_buffers(page);
 947         BUG_ON(!bh);
 948 
 949         rec_size_bits = ni->itype.index.block_size_bits;
 950         BUG_ON(!(PAGE_SIZE >> rec_size_bits));
 951         bhs_per_rec = rec_size >> bh_size_bits;
 952         BUG_ON(!bhs_per_rec);
 953 
 954         /* The first block in the page. */
 955         rec_block = block = (sector_t)page->index <<
 956                         (PAGE_SHIFT - bh_size_bits);
 957 
 958         /* The first out of bounds block for the data size. */
 959         dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
 960 
 961         rl = NULL;
 962         err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
 963         page_is_dirty = rec_is_dirty = false;
 964         rec_start_bh = NULL;
 965         do {
 966                 bool is_retry = false;
 967 
 968                 if (likely(block < rec_block)) {
 969                         if (unlikely(block >= dblock)) {
 970                                 clear_buffer_dirty(bh);
 971                                 set_buffer_uptodate(bh);
 972                                 continue;
 973                         }
 974                         /*
 975                          * This block is not the first one in the record.  We
 976                          * ignore the buffer's dirty state because we could
 977                          * have raced with a parallel mark_ntfs_record_dirty().
 978                          */
 979                         if (!rec_is_dirty)
 980                                 continue;
 981                         if (unlikely(err2)) {
 982                                 if (err2 != -ENOMEM)
 983                                         clear_buffer_dirty(bh);
 984                                 continue;
 985                         }
 986                 } else /* if (block == rec_block) */ {
 987                         BUG_ON(block > rec_block);
 988                         /* This block is the first one in the record. */
 989                         rec_block += bhs_per_rec;
 990                         err2 = 0;
 991                         if (unlikely(block >= dblock)) {
 992                                 clear_buffer_dirty(bh);
 993                                 continue;
 994                         }
 995                         if (!buffer_dirty(bh)) {
 996                                 /* Clean records are not written out. */
 997                                 rec_is_dirty = false;
 998                                 continue;
 999                         }
1000                         rec_is_dirty = true;
1001                         rec_start_bh = bh;
1002                 }
1003                 /* Need to map the buffer if it is not mapped already. */
1004                 if (unlikely(!buffer_mapped(bh))) {
1005                         VCN vcn;
1006                         LCN lcn;
1007                         unsigned int vcn_ofs;
1008 
1009                         bh->b_bdev = vol->sb->s_bdev;
1010                         /* Obtain the vcn and offset of the current block. */
1011                         vcn = (VCN)block << bh_size_bits;
1012                         vcn_ofs = vcn & vol->cluster_size_mask;
1013                         vcn >>= vol->cluster_size_bits;
1014                         if (!rl) {
1015 lock_retry_remap:
1016                                 down_read(&ni->runlist.lock);
1017                                 rl = ni->runlist.rl;
1018                         }
1019                         if (likely(rl != NULL)) {
1020                                 /* Seek to element containing target vcn. */
1021                                 while (rl->length && rl[1].vcn <= vcn)
1022                                         rl++;
1023                                 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
1024                         } else
1025                                 lcn = LCN_RL_NOT_MAPPED;
1026                         /* Successful remap. */
1027                         if (likely(lcn >= 0)) {
1028                                 /* Setup buffer head to correct block. */
1029                                 bh->b_blocknr = ((lcn <<
1030                                                 vol->cluster_size_bits) +
1031                                                 vcn_ofs) >> bh_size_bits;
1032                                 set_buffer_mapped(bh);
1033                         } else {
1034                                 /*
1035                                  * Remap failed.  Retry to map the runlist once
1036                                  * unless we are working on $MFT which always
1037                                  * has the whole of its runlist in memory.
1038                                  */
1039                                 if (!is_mft && !is_retry &&
1040                                                 lcn == LCN_RL_NOT_MAPPED) {
1041                                         is_retry = true;
1042                                         /*
1043                                          * Attempt to map runlist, dropping
1044                                          * lock for the duration.
1045                                          */
1046                                         up_read(&ni->runlist.lock);
1047                                         err2 = ntfs_map_runlist(ni, vcn);
1048                                         if (likely(!err2))
1049                                                 goto lock_retry_remap;
1050                                         if (err2 == -ENOMEM)
1051                                                 page_is_dirty = true;
1052                                         lcn = err2;
1053                                 } else {
1054                                         err2 = -EIO;
1055                                         if (!rl)
1056                                                 up_read(&ni->runlist.lock);
1057                                 }
1058                                 /* Hard error.  Abort writing this record. */
1059                                 if (!err || err == -ENOMEM)
1060                                         err = err2;
1061                                 bh->b_blocknr = -1;
1062                                 ntfs_error(vol->sb, "Cannot write ntfs record "
1063                                                 "0x%llx (inode 0x%lx, "
1064                                                 "attribute type 0x%x) because "
1065                                                 "its location on disk could "
1066                                                 "not be determined (error "
1067                                                 "code %lli).",
1068                                                 (long long)block <<
1069                                                 bh_size_bits >>
1070                                                 vol->mft_record_size_bits,
1071                                                 ni->mft_no, ni->type,
1072                                                 (long long)lcn);
1073                                 /*
1074                                  * If this is not the first buffer, remove the
1075                                  * buffers in this record from the list of
1076                                  * buffers to write and clear their dirty bit
1077                                  * if not error -ENOMEM.
1078                                  */
1079                                 if (rec_start_bh != bh) {
1080                                         while (bhs[--nr_bhs] != rec_start_bh)
1081                                                 ;
1082                                         if (err2 != -ENOMEM) {
1083                                                 do {
1084                                                         clear_buffer_dirty(
1085                                                                 rec_start_bh);
1086                                                 } while ((rec_start_bh =
1087                                                                 rec_start_bh->
1088                                                                 b_this_page) !=
1089                                                                 bh);
1090                                         }
1091                                 }
1092                                 continue;
1093                         }
1094                 }
1095                 BUG_ON(!buffer_uptodate(bh));
1096                 BUG_ON(nr_bhs >= max_bhs);
1097                 bhs[nr_bhs++] = bh;
1098         } while (block++, (bh = bh->b_this_page) != head);
1099         if (unlikely(rl))
1100                 up_read(&ni->runlist.lock);
1101         /* If there were no dirty buffers, we are done. */
1102         if (!nr_bhs)
1103                 goto done;
1104         /* Map the page so we can access its contents. */
1105         kaddr = kmap(page);
1106         /* Clear the page uptodate flag whilst the mst fixups are applied. */
1107         BUG_ON(!PageUptodate(page));
1108         ClearPageUptodate(page);
1109         for (i = 0; i < nr_bhs; i++) {
1110                 unsigned int ofs;
1111 
1112                 /* Skip buffers which are not at the beginning of records. */
1113                 if (i % bhs_per_rec)
1114                         continue;
1115                 tbh = bhs[i];
1116                 ofs = bh_offset(tbh);
1117                 if (is_mft) {
1118                         ntfs_inode *tni;
1119                         unsigned long mft_no;
1120 
1121                         /* Get the mft record number. */
1122                         mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
1123                                         >> rec_size_bits;
1124                         /* Check whether to write this mft record. */
1125                         tni = NULL;
1126                         if (!ntfs_may_write_mft_record(vol, mft_no,
1127                                         (MFT_RECORD*)(kaddr + ofs), &tni)) {
1128                                 /*
1129                                  * The record should not be written.  This
1130                                  * means we need to redirty the page before
1131                                  * returning.
1132                                  */
1133                                 page_is_dirty = true;
1134                                 /*
1135                                  * Remove the buffers in this mft record from
1136                                  * the list of buffers to write.
1137                                  */
1138                                 do {
1139                                         bhs[i] = NULL;
1140                                 } while (++i % bhs_per_rec);
1141                                 continue;
1142                         }
1143                         /*
1144                          * The record should be written.  If a locked ntfs
1145                          * inode was returned, add it to the array of locked
1146                          * ntfs inodes.
1147                          */
1148                         if (tni)
1149                                 locked_nis[nr_locked_nis++] = tni;
1150                 }
1151                 /* Apply the mst protection fixups. */
1152                 err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs),
1153                                 rec_size);
1154                 if (unlikely(err2)) {
1155                         if (!err || err == -ENOMEM)
1156                                 err = -EIO;
1157                         ntfs_error(vol->sb, "Failed to apply mst fixups "
1158                                         "(inode 0x%lx, attribute type 0x%x, "
1159                                         "page index 0x%lx, page offset 0x%x)!"
1160                                         "  Unmount and run chkdsk.", vi->i_ino,
1161                                         ni->type, page->index, ofs);
1162                         /*
1163                          * Mark all the buffers in this record clean as we do
1164                          * not want to write corrupt data to disk.
1165                          */
1166                         do {
1167                                 clear_buffer_dirty(bhs[i]);
1168                                 bhs[i] = NULL;
1169                         } while (++i % bhs_per_rec);
1170                         continue;
1171                 }
1172                 nr_recs++;
1173         }
1174         /* If no records are to be written out, we are done. */
1175         if (!nr_recs)
1176                 goto unm_done;
1177         flush_dcache_page(page);
1178         /* Lock buffers and start synchronous write i/o on them. */
1179         for (i = 0; i < nr_bhs; i++) {
1180                 tbh = bhs[i];
1181                 if (!tbh)
1182                         continue;
1183                 if (!trylock_buffer(tbh))
1184                         BUG();
1185                 /* The buffer dirty state is now irrelevant, just clean it. */
1186                 clear_buffer_dirty(tbh);
1187                 BUG_ON(!buffer_uptodate(tbh));
1188                 BUG_ON(!buffer_mapped(tbh));
1189                 get_bh(tbh);
1190                 tbh->b_end_io = end_buffer_write_sync;
1191                 submit_bh(REQ_OP_WRITE, 0, tbh);
1192         }
1193         /* Synchronize the mft mirror now if not @sync. */
1194         if (is_mft && !sync)
1195                 goto do_mirror;
1196 do_wait:
1197         /* Wait on i/o completion of buffers. */
1198         for (i = 0; i < nr_bhs; i++) {
1199                 tbh = bhs[i];
1200                 if (!tbh)
1201                         continue;
1202                 wait_on_buffer(tbh);
1203                 if (unlikely(!buffer_uptodate(tbh))) {
1204                         ntfs_error(vol->sb, "I/O error while writing ntfs "
1205                                         "record buffer (inode 0x%lx, "
1206                                         "attribute type 0x%x, page index "
1207                                         "0x%lx, page offset 0x%lx)!  Unmount "
1208                                         "and run chkdsk.", vi->i_ino, ni->type,
1209                                         page->index, bh_offset(tbh));
1210                         if (!err || err == -ENOMEM)
1211                                 err = -EIO;
1212                         /*
1213                          * Set the buffer uptodate so the page and buffer
1214                          * states do not become out of sync.
1215                          */
1216                         set_buffer_uptodate(tbh);
1217                 }
1218         }
1219         /* If @sync, now synchronize the mft mirror. */
1220         if (is_mft && sync) {
1221 do_mirror:
1222                 for (i = 0; i < nr_bhs; i++) {
1223                         unsigned long mft_no;
1224                         unsigned int ofs;
1225 
1226                         /*
1227                          * Skip buffers which are not at the beginning of
1228                          * records.
1229                          */
1230                         if (i % bhs_per_rec)
1231                                 continue;
1232                         tbh = bhs[i];
1233                         /* Skip removed buffers (and hence records). */
1234                         if (!tbh)
1235                                 continue;
1236                         ofs = bh_offset(tbh);
1237                         /* Get the mft record number. */
1238                         mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
1239                                         >> rec_size_bits;
1240                         if (mft_no < vol->mftmirr_size)
1241                                 ntfs_sync_mft_mirror(vol, mft_no,
1242                                                 (MFT_RECORD*)(kaddr + ofs),
1243                                                 sync);
1244                 }
1245                 if (!sync)
1246                         goto do_wait;
1247         }
1248         /* Remove the mst protection fixups again. */
1249         for (i = 0; i < nr_bhs; i++) {
1250                 if (!(i % bhs_per_rec)) {
1251                         tbh = bhs[i];
1252                         if (!tbh)
1253                                 continue;
1254                         post_write_mst_fixup((NTFS_RECORD*)(kaddr +
1255                                         bh_offset(tbh)));
1256                 }
1257         }
1258         flush_dcache_page(page);
1259 unm_done:
1260         /* Unlock any locked inodes. */
1261         while (nr_locked_nis-- > 0) {
1262                 ntfs_inode *tni, *base_tni;
1263                 
1264                 tni = locked_nis[nr_locked_nis];
1265                 /* Get the base inode. */
1266                 mutex_lock(&tni->extent_lock);
1267                 if (tni->nr_extents >= 0)
1268                         base_tni = tni;
1269                 else {
1270                         base_tni = tni->ext.base_ntfs_ino;
1271                         BUG_ON(!base_tni);
1272                 }
1273                 mutex_unlock(&tni->extent_lock);
1274                 ntfs_debug("Unlocking %s inode 0x%lx.",
1275                                 tni == base_tni ? "base" : "extent",
1276                                 tni->mft_no);
1277                 mutex_unlock(&tni->mrec_lock);
1278                 atomic_dec(&tni->count);
1279                 iput(VFS_I(base_tni));
1280         }
1281         SetPageUptodate(page);
1282         kunmap(page);
1283 done:
1284         if (unlikely(err && err != -ENOMEM)) {
1285                 /*
1286                  * Set page error if there is only one ntfs record in the page.
1287                  * Otherwise we would loose per-record granularity.
1288                  */
1289                 if (ni->itype.index.block_size == PAGE_SIZE)
1290                         SetPageError(page);
1291                 NVolSetErrors(vol);
1292         }
1293         if (page_is_dirty) {
1294                 ntfs_debug("Page still contains one or more dirty ntfs "
1295                                 "records.  Redirtying the page starting at "
1296                                 "record 0x%lx.", page->index <<
1297                                 (PAGE_SHIFT - rec_size_bits));
1298                 redirty_page_for_writepage(wbc, page);
1299                 unlock_page(page);
1300         } else {
1301                 /*
1302                  * Keep the VM happy.  This must be done otherwise the
1303                  * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though
1304                  * the page is clean.
1305                  */
1306                 BUG_ON(PageWriteback(page));
1307                 set_page_writeback(page);
1308                 unlock_page(page);
1309                 end_page_writeback(page);
1310         }
1311         if (likely(!err))
1312                 ntfs_debug("Done.");
1313         return err;
1314 }
1315 
1316 /**
1317  * ntfs_writepage - write a @page to the backing store
1318  * @page:       page cache page to write out
1319  * @wbc:        writeback control structure
1320  *
1321  * This is called from the VM when it wants to have a dirty ntfs page cache
1322  * page cleaned.  The VM has already locked the page and marked it clean.
1323  *
1324  * For non-resident attributes, ntfs_writepage() writes the @page by calling
1325  * the ntfs version of the generic block_write_full_page() function,
1326  * ntfs_write_block(), which in turn if necessary creates and writes the
1327  * buffers associated with the page asynchronously.
1328  *
1329  * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying
1330  * the data to the mft record (which at this stage is most likely in memory).
1331  * The mft record is then marked dirty and written out asynchronously via the
1332  * vfs inode dirty code path for the inode the mft record belongs to or via the
1333  * vm page dirty code path for the page the mft record is in.
1334  *
1335  * Based on ntfs_readpage() and fs/buffer.c::block_write_full_page().
1336  *
1337  * Return 0 on success and -errno on error.
1338  */
1339 static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
1340 {
1341         loff_t i_size;
1342         struct inode *vi = page->mapping->host;
1343         ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
1344         char *addr;
1345         ntfs_attr_search_ctx *ctx = NULL;
1346         MFT_RECORD *m = NULL;
1347         u32 attr_len;
1348         int err;
1349 
1350 retry_writepage:
1351         BUG_ON(!PageLocked(page));
1352         i_size = i_size_read(vi);
1353         /* Is the page fully outside i_size? (truncate in progress) */
1354         if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
1355                         PAGE_SHIFT)) {
1356                 /*
1357                  * The page may have dirty, unmapped buffers.  Make them
1358                  * freeable here, so the page does not leak.
1359                  */
1360                 block_invalidatepage(page, 0, PAGE_SIZE);
1361                 unlock_page(page);
1362                 ntfs_debug("Write outside i_size - truncated?");
1363                 return 0;
1364         }
1365         /*
1366          * Only $DATA attributes can be encrypted and only unnamed $DATA
1367          * attributes can be compressed.  Index root can have the flags set but
1368          * this means to create compressed/encrypted files, not that the
1369          * attribute is compressed/encrypted.  Note we need to check for
1370          * AT_INDEX_ALLOCATION since this is the type of both directory and
1371          * index inodes.
1372          */
1373         if (ni->type != AT_INDEX_ALLOCATION) {
1374                 /* If file is encrypted, deny access, just like NT4. */
1375                 if (NInoEncrypted(ni)) {
1376                         unlock_page(page);
1377                         BUG_ON(ni->type != AT_DATA);
1378                         ntfs_debug("Denying write access to encrypted file.");
1379                         return -EACCES;
1380                 }
1381                 /* Compressed data streams are handled in compress.c. */
1382                 if (NInoNonResident(ni) && NInoCompressed(ni)) {
1383                         BUG_ON(ni->type != AT_DATA);
1384                         BUG_ON(ni->name_len);
1385                         // TODO: Implement and replace this with
1386                         // return ntfs_write_compressed_block(page);
1387                         unlock_page(page);
1388                         ntfs_error(vi->i_sb, "Writing to compressed files is "
1389                                         "not supported yet.  Sorry.");
1390                         return -EOPNOTSUPP;
1391                 }
1392                 // TODO: Implement and remove this check.
1393                 if (NInoNonResident(ni) && NInoSparse(ni)) {
1394                         unlock_page(page);
1395                         ntfs_error(vi->i_sb, "Writing to sparse files is not "
1396                                         "supported yet.  Sorry.");
1397                         return -EOPNOTSUPP;
1398                 }
1399         }
1400         /* NInoNonResident() == NInoIndexAllocPresent() */
1401         if (NInoNonResident(ni)) {
1402                 /* We have to zero every time due to mmap-at-end-of-file. */
1403                 if (page->index >= (i_size >> PAGE_SHIFT)) {
1404                         /* The page straddles i_size. */
1405                         unsigned int ofs = i_size & ~PAGE_MASK;
1406                         zero_user_segment(page, ofs, PAGE_SIZE);
1407                 }
1408                 /* Handle mst protected attributes. */
1409                 if (NInoMstProtected(ni))
1410                         return ntfs_write_mst_block(page, wbc);
1411                 /* Normal, non-resident data stream. */
1412                 return ntfs_write_block(page, wbc);
1413         }
1414         /*
1415          * Attribute is resident, implying it is not compressed, encrypted, or
1416          * mst protected.  This also means the attribute is smaller than an mft
1417          * record and hence smaller than a page, so can simply return error on
1418          * any pages with index above 0.  Note the attribute can actually be
1419          * marked compressed but if it is resident the actual data is not
1420          * compressed so we are ok to ignore the compressed flag here.
1421          */
1422         BUG_ON(page_has_buffers(page));
1423         BUG_ON(!PageUptodate(page));
1424         if (unlikely(page->index > 0)) {
1425                 ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0.  "
1426                                 "Aborting write.", page->index);
1427                 BUG_ON(PageWriteback(page));
1428                 set_page_writeback(page);
1429                 unlock_page(page);
1430                 end_page_writeback(page);
1431                 return -EIO;
1432         }
1433         if (!NInoAttr(ni))
1434                 base_ni = ni;
1435         else
1436                 base_ni = ni->ext.base_ntfs_ino;
1437         /* Map, pin, and lock the mft record. */
1438         m = map_mft_record(base_ni);
1439         if (IS_ERR(m)) {
1440                 err = PTR_ERR(m);
1441                 m = NULL;
1442                 ctx = NULL;
1443                 goto err_out;
1444         }
1445         /*
1446          * If a parallel write made the attribute non-resident, drop the mft
1447          * record and retry the writepage.
1448          */
1449         if (unlikely(NInoNonResident(ni))) {
1450                 unmap_mft_record(base_ni);
1451                 goto retry_writepage;
1452         }
1453         ctx = ntfs_attr_get_search_ctx(base_ni, m);
1454         if (unlikely(!ctx)) {
1455                 err = -ENOMEM;
1456                 goto err_out;
1457         }
1458         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1459                         CASE_SENSITIVE, 0, NULL, 0, ctx);
1460         if (unlikely(err))
1461                 goto err_out;
1462         /*
1463          * Keep the VM happy.  This must be done otherwise the radix-tree tag
1464          * PAGECACHE_TAG_DIRTY remains set even though the page is clean.
1465          */
1466         BUG_ON(PageWriteback(page));
1467         set_page_writeback(page);
1468         unlock_page(page);
1469         attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
1470         i_size = i_size_read(vi);
1471         if (unlikely(attr_len > i_size)) {
1472                 /* Race with shrinking truncate or a failed truncate. */
1473                 attr_len = i_size;
1474                 /*
1475                  * If the truncate failed, fix it up now.  If a concurrent
1476                  * truncate, we do its job, so it does not have to do anything.
1477                  */
1478                 err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr,
1479                                 attr_len);
1480                 /* Shrinking cannot fail. */
1481                 BUG_ON(err);
1482         }
1483         addr = kmap_atomic(page);
1484         /* Copy the data from the page to the mft record. */
1485         memcpy((u8*)ctx->attr +
1486                         le16_to_cpu(ctx->attr->data.resident.value_offset),
1487                         addr, attr_len);
1488         /* Zero out of bounds area in the page cache page. */
1489         memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
1490         kunmap_atomic(addr);
1491         flush_dcache_page(page);
1492         flush_dcache_mft_record_page(ctx->ntfs_ino);
1493         /* We are done with the page. */
1494         end_page_writeback(page);
1495         /* Finally, mark the mft record dirty, so it gets written back. */
1496         mark_mft_record_dirty(ctx->ntfs_ino);
1497         ntfs_attr_put_search_ctx(ctx);
1498         unmap_mft_record(base_ni);
1499         return 0;
1500 err_out:
1501         if (err == -ENOMEM) {
1502                 ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying "
1503                                 "page so we try again later.");
1504                 /*
1505                  * Put the page back on mapping->dirty_pages, but leave its
1506                  * buffers' dirty state as-is.
1507                  */
1508                 redirty_page_for_writepage(wbc, page);
1509                 err = 0;
1510         } else {
1511                 ntfs_error(vi->i_sb, "Resident attribute write failed with "
1512                                 "error %i.", err);
1513                 SetPageError(page);
1514                 NVolSetErrors(ni->vol);
1515         }
1516         unlock_page(page);
1517         if (ctx)
1518                 ntfs_attr_put_search_ctx(ctx);
1519         if (m)
1520                 unmap_mft_record(base_ni);
1521         return err;
1522 }
1523 
1524 #endif  /* NTFS_RW */
1525 
1526 /**
1527  * ntfs_bmap - map logical file block to physical device block
1528  * @mapping:    address space mapping to which the block to be mapped belongs
1529  * @block:      logical block to map to its physical device block
1530  *
1531  * For regular, non-resident files (i.e. not compressed and not encrypted), map
1532  * the logical @block belonging to the file described by the address space
1533  * mapping @mapping to its physical device block.
1534  *
1535  * The size of the block is equal to the @s_blocksize field of the super block
1536  * of the mounted file system which is guaranteed to be smaller than or equal
1537  * to the cluster size thus the block is guaranteed to fit entirely inside the
1538  * cluster which means we do not need to care how many contiguous bytes are
1539  * available after the beginning of the block.
1540  *
1541  * Return the physical device block if the mapping succeeded or 0 if the block
1542  * is sparse or there was an error.
1543  *
1544  * Note: This is a problem if someone tries to run bmap() on $Boot system file
1545  * as that really is in block zero but there is nothing we can do.  bmap() is
1546  * just broken in that respect (just like it cannot distinguish sparse from
1547  * not available or error).
1548  */
1549 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
1550 {
1551         s64 ofs, size;
1552         loff_t i_size;
1553         LCN lcn;
1554         unsigned long blocksize, flags;
1555         ntfs_inode *ni = NTFS_I(mapping->host);
1556         ntfs_volume *vol = ni->vol;
1557         unsigned delta;
1558         unsigned char blocksize_bits, cluster_size_shift;
1559 
1560         ntfs_debug("Entering for mft_no 0x%lx, logical block 0x%llx.",
1561                         ni->mft_no, (unsigned long long)block);
1562         if (ni->type != AT_DATA || !NInoNonResident(ni) || NInoEncrypted(ni)) {
1563                 ntfs_error(vol->sb, "BMAP does not make sense for %s "
1564                                 "attributes, returning 0.",
1565                                 (ni->type != AT_DATA) ? "non-data" :
1566                                 (!NInoNonResident(ni) ? "resident" :
1567                                 "encrypted"));
1568                 return 0;
1569         }
1570         /* None of these can happen. */
1571         BUG_ON(NInoCompressed(ni));
1572         BUG_ON(NInoMstProtected(ni));
1573         blocksize = vol->sb->s_blocksize;
1574         blocksize_bits = vol->sb->s_blocksize_bits;
1575         ofs = (s64)block << blocksize_bits;
1576         read_lock_irqsave(&ni->size_lock, flags);
1577         size = ni->initialized_size;
1578         i_size = i_size_read(VFS_I(ni));
1579         read_unlock_irqrestore(&ni->size_lock, flags);
1580         /*
1581          * If the offset is outside the initialized size or the block straddles
1582          * the initialized size then pretend it is a hole unless the
1583          * initialized size equals the file size.
1584          */
1585         if (unlikely(ofs >= size || (ofs + blocksize > size && size < i_size)))
1586                 goto hole;
1587         cluster_size_shift = vol->cluster_size_bits;
1588         down_read(&ni->runlist.lock);
1589         lcn = ntfs_attr_vcn_to_lcn_nolock(ni, ofs >> cluster_size_shift, false);
1590         up_read(&ni->runlist.lock);
1591         if (unlikely(lcn < LCN_HOLE)) {
1592                 /*
1593                  * Step down to an integer to avoid gcc doing a long long
1594                  * comparision in the switch when we know @lcn is between
1595                  * LCN_HOLE and LCN_EIO (i.e. -1 to -5).
1596                  *
1597                  * Otherwise older gcc (at least on some architectures) will
1598                  * try to use __cmpdi2() which is of course not available in
1599                  * the kernel.
1600                  */
1601                 switch ((int)lcn) {
1602                 case LCN_ENOENT:
1603                         /*
1604                          * If the offset is out of bounds then pretend it is a
1605                          * hole.
1606                          */
1607                         goto hole;
1608                 case LCN_ENOMEM:
1609                         ntfs_error(vol->sb, "Not enough memory to complete "
1610                                         "mapping for inode 0x%lx.  "
1611                                         "Returning 0.", ni->mft_no);
1612                         break;
1613                 default:
1614                         ntfs_error(vol->sb, "Failed to complete mapping for "
1615                                         "inode 0x%lx.  Run chkdsk.  "
1616                                         "Returning 0.", ni->mft_no);
1617                         break;
1618                 }
1619                 return 0;
1620         }
1621         if (lcn < 0) {
1622                 /* It is a hole. */
1623 hole:
1624                 ntfs_debug("Done (returning hole).");
1625                 return 0;
1626         }
1627         /*
1628          * The block is really allocated and fullfils all our criteria.
1629          * Convert the cluster to units of block size and return the result.
1630          */
1631         delta = ofs & vol->cluster_size_mask;
1632         if (unlikely(sizeof(block) < sizeof(lcn))) {
1633                 block = lcn = ((lcn << cluster_size_shift) + delta) >>
1634                                 blocksize_bits;
1635                 /* If the block number was truncated return 0. */
1636                 if (unlikely(block != lcn)) {
1637                         ntfs_error(vol->sb, "Physical block 0x%llx is too "
1638                                         "large to be returned, returning 0.",
1639                                         (long long)lcn);
1640                         return 0;
1641                 }
1642         } else
1643                 block = ((lcn << cluster_size_shift) + delta) >>
1644                                 blocksize_bits;
1645         ntfs_debug("Done (returning block 0x%llx).", (unsigned long long)lcn);
1646         return block;
1647 }
1648 
1649 /**
1650  * ntfs_normal_aops - address space operations for normal inodes and attributes
1651  *
1652  * Note these are not used for compressed or mst protected inodes and
1653  * attributes.
1654  */
1655 const struct address_space_operations ntfs_normal_aops = {
1656         .readpage       = ntfs_readpage,
1657 #ifdef NTFS_RW
1658         .writepage      = ntfs_writepage,
1659         .set_page_dirty = __set_page_dirty_buffers,
1660 #endif /* NTFS_RW */
1661         .bmap           = ntfs_bmap,
1662         .migratepage    = buffer_migrate_page,
1663         .is_partially_uptodate = block_is_partially_uptodate,
1664         .error_remove_page = generic_error_remove_page,
1665 };
1666 
1667 /**
1668  * ntfs_compressed_aops - address space operations for compressed inodes
1669  */
1670 const struct address_space_operations ntfs_compressed_aops = {
1671         .readpage       = ntfs_readpage,
1672 #ifdef NTFS_RW
1673         .writepage      = ntfs_writepage,
1674         .set_page_dirty = __set_page_dirty_buffers,
1675 #endif /* NTFS_RW */
1676         .migratepage    = buffer_migrate_page,
1677         .is_partially_uptodate = block_is_partially_uptodate,
1678         .error_remove_page = generic_error_remove_page,
1679 };
1680 
1681 /**
1682  * ntfs_mst_aops - general address space operations for mst protecteed inodes
1683  *                 and attributes
1684  */
1685 const struct address_space_operations ntfs_mst_aops = {
1686         .readpage       = ntfs_readpage,        /* Fill page with data. */
1687 #ifdef NTFS_RW
1688         .writepage      = ntfs_writepage,       /* Write dirty page to disk. */
1689         .set_page_dirty = __set_page_dirty_nobuffers,   /* Set the page dirty
1690                                                    without touching the buffers
1691                                                    belonging to the page. */
1692 #endif /* NTFS_RW */
1693         .migratepage    = buffer_migrate_page,
1694         .is_partially_uptodate  = block_is_partially_uptodate,
1695         .error_remove_page = generic_error_remove_page,
1696 };
1697 
1698 #ifdef NTFS_RW
1699 
1700 /**
1701  * mark_ntfs_record_dirty - mark an ntfs record dirty
1702  * @page:       page containing the ntfs record to mark dirty
1703  * @ofs:        byte offset within @page at which the ntfs record begins
1704  *
1705  * Set the buffers and the page in which the ntfs record is located dirty.
1706  *
1707  * The latter also marks the vfs inode the ntfs record belongs to dirty
1708  * (I_DIRTY_PAGES only).
1709  *
1710  * If the page does not have buffers, we create them and set them uptodate.
1711  * The page may not be locked which is why we need to handle the buffers under
1712  * the mapping->private_lock.  Once the buffers are marked dirty we no longer
1713  * need the lock since try_to_free_buffers() does not free dirty buffers.
1714  */
1715 void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
1716         struct address_space *mapping = page->mapping;
1717         ntfs_inode *ni = NTFS_I(mapping->host);
1718         struct buffer_head *bh, *head, *buffers_to_free = NULL;
1719         unsigned int end, bh_size, bh_ofs;
1720 
1721         BUG_ON(!PageUptodate(page));
1722         end = ofs + ni->itype.index.block_size;
1723         bh_size = VFS_I(ni)->i_sb->s_blocksize;
1724         spin_lock(&mapping->private_lock);
1725         if (unlikely(!page_has_buffers(page))) {
1726                 spin_unlock(&mapping->private_lock);
1727                 bh = head = alloc_page_buffers(page, bh_size, true);
1728                 spin_lock(&mapping->private_lock);
1729                 if (likely(!page_has_buffers(page))) {
1730                         struct buffer_head *tail;
1731 
1732                         do {
1733                                 set_buffer_uptodate(bh);
1734                                 tail = bh;
1735                                 bh = bh->b_this_page;
1736                         } while (bh);
1737                         tail->b_this_page = head;
1738                         attach_page_buffers(page, head);
1739                 } else
1740                         buffers_to_free = bh;
1741         }
1742         bh = head = page_buffers(page);
1743         BUG_ON(!bh);
1744         do {
1745                 bh_ofs = bh_offset(bh);
1746                 if (bh_ofs + bh_size <= ofs)
1747                         continue;
1748                 if (unlikely(bh_ofs >= end))
1749                         break;
1750                 set_buffer_dirty(bh);
1751         } while ((bh = bh->b_this_page) != head);
1752         spin_unlock(&mapping->private_lock);
1753         __set_page_dirty_nobuffers(page);
1754         if (unlikely(buffers_to_free)) {
1755                 do {
1756                         bh = buffers_to_free->b_this_page;
1757                         free_buffer_head(buffers_to_free);
1758                         buffers_to_free = bh;
1759                 } while (buffers_to_free);
1760         }
1761 }
1762 
1763 #endif /* NTFS_RW */

/* [<][>][^][v][top][bottom][index][help] */