root/fs/afs/write.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. afs_set_page_dirty
  2. afs_fill_page
  3. afs_write_begin
  4. afs_write_end
  5. afs_kill_pages
  6. afs_redirty_pages
  7. afs_pages_written_back
  8. afs_store_data
  9. afs_write_back_from_locked_page
  10. afs_writepage
  11. afs_writepages_region
  12. afs_writepages
  13. afs_file_write
  14. afs_fsync
  15. afs_page_mkwrite
  16. afs_prune_wb_keys
  17. afs_launder_page

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /* handling of writes to regular files and writing back to the server
   3  *
   4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   5  * Written by David Howells (dhowells@redhat.com)
   6  */
   7 
   8 #include <linux/backing-dev.h>
   9 #include <linux/slab.h>
  10 #include <linux/fs.h>
  11 #include <linux/pagemap.h>
  12 #include <linux/writeback.h>
  13 #include <linux/pagevec.h>
  14 #include "internal.h"
  15 
  16 /*
  17  * mark a page as having been made dirty and thus needing writeback
  18  */
  19 int afs_set_page_dirty(struct page *page)
  20 {
  21         _enter("");
  22         return __set_page_dirty_nobuffers(page);
  23 }
  24 
  25 /*
  26  * partly or wholly fill a page that's under preparation for writing
  27  */
  28 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
  29                          loff_t pos, unsigned int len, struct page *page)
  30 {
  31         struct afs_read *req;
  32         size_t p;
  33         void *data;
  34         int ret;
  35 
  36         _enter(",,%llu", (unsigned long long)pos);
  37 
  38         if (pos >= vnode->vfs_inode.i_size) {
  39                 p = pos & ~PAGE_MASK;
  40                 ASSERTCMP(p + len, <=, PAGE_SIZE);
  41                 data = kmap(page);
  42                 memset(data + p, 0, len);
  43                 kunmap(page);
  44                 return 0;
  45         }
  46 
  47         req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
  48         if (!req)
  49                 return -ENOMEM;
  50 
  51         refcount_set(&req->usage, 1);
  52         req->pos = pos;
  53         req->len = len;
  54         req->nr_pages = 1;
  55         req->pages = req->array;
  56         req->pages[0] = page;
  57         get_page(page);
  58 
  59         ret = afs_fetch_data(vnode, key, req);
  60         afs_put_read(req);
  61         if (ret < 0) {
  62                 if (ret == -ENOENT) {
  63                         _debug("got NOENT from server"
  64                                " - marking file deleted and stale");
  65                         set_bit(AFS_VNODE_DELETED, &vnode->flags);
  66                         ret = -ESTALE;
  67                 }
  68         }
  69 
  70         _leave(" = %d", ret);
  71         return ret;
  72 }
  73 
  74 /*
  75  * prepare to perform part of a write to a page
  76  */
  77 int afs_write_begin(struct file *file, struct address_space *mapping,
  78                     loff_t pos, unsigned len, unsigned flags,
  79                     struct page **pagep, void **fsdata)
  80 {
  81         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
  82         struct page *page;
  83         struct key *key = afs_file_key(file);
  84         unsigned long priv;
  85         unsigned f, from = pos & (PAGE_SIZE - 1);
  86         unsigned t, to = from + len;
  87         pgoff_t index = pos >> PAGE_SHIFT;
  88         int ret;
  89 
  90         _enter("{%llx:%llu},{%lx},%u,%u",
  91                vnode->fid.vid, vnode->fid.vnode, index, from, to);
  92 
  93         /* We want to store information about how much of a page is altered in
  94          * page->private.
  95          */
  96         BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
  97 
  98         page = grab_cache_page_write_begin(mapping, index, flags);
  99         if (!page)
 100                 return -ENOMEM;
 101 
 102         if (!PageUptodate(page) && len != PAGE_SIZE) {
 103                 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
 104                 if (ret < 0) {
 105                         unlock_page(page);
 106                         put_page(page);
 107                         _leave(" = %d [prep]", ret);
 108                         return ret;
 109                 }
 110                 SetPageUptodate(page);
 111         }
 112 
 113         /* page won't leak in error case: it eventually gets cleaned off LRU */
 114         *pagep = page;
 115 
 116 try_again:
 117         /* See if this page is already partially written in a way that we can
 118          * merge the new write with.
 119          */
 120         t = f = 0;
 121         if (PagePrivate(page)) {
 122                 priv = page_private(page);
 123                 f = priv & AFS_PRIV_MAX;
 124                 t = priv >> AFS_PRIV_SHIFT;
 125                 ASSERTCMP(f, <=, t);
 126         }
 127 
 128         if (f != t) {
 129                 if (PageWriteback(page)) {
 130                         trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
 131                                              page->index, priv);
 132                         goto flush_conflicting_write;
 133                 }
 134                 /* If the file is being filled locally, allow inter-write
 135                  * spaces to be merged into writes.  If it's not, only write
 136                  * back what the user gives us.
 137                  */
 138                 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
 139                     (to < f || from > t))
 140                         goto flush_conflicting_write;
 141                 if (from < f)
 142                         f = from;
 143                 if (to > t)
 144                         t = to;
 145         } else {
 146                 f = from;
 147                 t = to;
 148         }
 149 
 150         priv = (unsigned long)t << AFS_PRIV_SHIFT;
 151         priv |= f;
 152         trace_afs_page_dirty(vnode, tracepoint_string("begin"),
 153                              page->index, priv);
 154         SetPagePrivate(page);
 155         set_page_private(page, priv);
 156         _leave(" = 0");
 157         return 0;
 158 
 159         /* The previous write and this write aren't adjacent or overlapping, so
 160          * flush the page out.
 161          */
 162 flush_conflicting_write:
 163         _debug("flush conflict");
 164         ret = write_one_page(page);
 165         if (ret < 0) {
 166                 _leave(" = %d", ret);
 167                 return ret;
 168         }
 169 
 170         ret = lock_page_killable(page);
 171         if (ret < 0) {
 172                 _leave(" = %d", ret);
 173                 return ret;
 174         }
 175         goto try_again;
 176 }
 177 
 178 /*
 179  * finalise part of a write to a page
 180  */
 181 int afs_write_end(struct file *file, struct address_space *mapping,
 182                   loff_t pos, unsigned len, unsigned copied,
 183                   struct page *page, void *fsdata)
 184 {
 185         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
 186         struct key *key = afs_file_key(file);
 187         loff_t i_size, maybe_i_size;
 188         int ret;
 189 
 190         _enter("{%llx:%llu},{%lx}",
 191                vnode->fid.vid, vnode->fid.vnode, page->index);
 192 
 193         maybe_i_size = pos + copied;
 194 
 195         i_size = i_size_read(&vnode->vfs_inode);
 196         if (maybe_i_size > i_size) {
 197                 spin_lock(&vnode->wb_lock);
 198                 i_size = i_size_read(&vnode->vfs_inode);
 199                 if (maybe_i_size > i_size)
 200                         i_size_write(&vnode->vfs_inode, maybe_i_size);
 201                 spin_unlock(&vnode->wb_lock);
 202         }
 203 
 204         if (!PageUptodate(page)) {
 205                 if (copied < len) {
 206                         /* Try and load any missing data from the server.  The
 207                          * unmarshalling routine will take care of clearing any
 208                          * bits that are beyond the EOF.
 209                          */
 210                         ret = afs_fill_page(vnode, key, pos + copied,
 211                                             len - copied, page);
 212                         if (ret < 0)
 213                                 goto out;
 214                 }
 215                 SetPageUptodate(page);
 216         }
 217 
 218         set_page_dirty(page);
 219         if (PageDirty(page))
 220                 _debug("dirtied");
 221         ret = copied;
 222 
 223 out:
 224         unlock_page(page);
 225         put_page(page);
 226         return ret;
 227 }
 228 
 229 /*
 230  * kill all the pages in the given range
 231  */
 232 static void afs_kill_pages(struct address_space *mapping,
 233                            pgoff_t first, pgoff_t last)
 234 {
 235         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 236         struct pagevec pv;
 237         unsigned count, loop;
 238 
 239         _enter("{%llx:%llu},%lx-%lx",
 240                vnode->fid.vid, vnode->fid.vnode, first, last);
 241 
 242         pagevec_init(&pv);
 243 
 244         do {
 245                 _debug("kill %lx-%lx", first, last);
 246 
 247                 count = last - first + 1;
 248                 if (count > PAGEVEC_SIZE)
 249                         count = PAGEVEC_SIZE;
 250                 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
 251                 ASSERTCMP(pv.nr, ==, count);
 252 
 253                 for (loop = 0; loop < count; loop++) {
 254                         struct page *page = pv.pages[loop];
 255                         ClearPageUptodate(page);
 256                         SetPageError(page);
 257                         end_page_writeback(page);
 258                         if (page->index >= first)
 259                                 first = page->index + 1;
 260                         lock_page(page);
 261                         generic_error_remove_page(mapping, page);
 262                         unlock_page(page);
 263                 }
 264 
 265                 __pagevec_release(&pv);
 266         } while (first <= last);
 267 
 268         _leave("");
 269 }
 270 
 271 /*
 272  * Redirty all the pages in a given range.
 273  */
 274 static void afs_redirty_pages(struct writeback_control *wbc,
 275                               struct address_space *mapping,
 276                               pgoff_t first, pgoff_t last)
 277 {
 278         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 279         struct pagevec pv;
 280         unsigned count, loop;
 281 
 282         _enter("{%llx:%llu},%lx-%lx",
 283                vnode->fid.vid, vnode->fid.vnode, first, last);
 284 
 285         pagevec_init(&pv);
 286 
 287         do {
 288                 _debug("redirty %lx-%lx", first, last);
 289 
 290                 count = last - first + 1;
 291                 if (count > PAGEVEC_SIZE)
 292                         count = PAGEVEC_SIZE;
 293                 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
 294                 ASSERTCMP(pv.nr, ==, count);
 295 
 296                 for (loop = 0; loop < count; loop++) {
 297                         struct page *page = pv.pages[loop];
 298 
 299                         redirty_page_for_writepage(wbc, page);
 300                         end_page_writeback(page);
 301                         if (page->index >= first)
 302                                 first = page->index + 1;
 303                 }
 304 
 305                 __pagevec_release(&pv);
 306         } while (first <= last);
 307 
 308         _leave("");
 309 }
 310 
 311 /*
 312  * completion of write to server
 313  */
 314 static void afs_pages_written_back(struct afs_vnode *vnode,
 315                                    pgoff_t first, pgoff_t last)
 316 {
 317         struct pagevec pv;
 318         unsigned long priv;
 319         unsigned count, loop;
 320 
 321         _enter("{%llx:%llu},{%lx-%lx}",
 322                vnode->fid.vid, vnode->fid.vnode, first, last);
 323 
 324         pagevec_init(&pv);
 325 
 326         do {
 327                 _debug("done %lx-%lx", first, last);
 328 
 329                 count = last - first + 1;
 330                 if (count > PAGEVEC_SIZE)
 331                         count = PAGEVEC_SIZE;
 332                 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
 333                                               first, count, pv.pages);
 334                 ASSERTCMP(pv.nr, ==, count);
 335 
 336                 for (loop = 0; loop < count; loop++) {
 337                         priv = page_private(pv.pages[loop]);
 338                         trace_afs_page_dirty(vnode, tracepoint_string("clear"),
 339                                              pv.pages[loop]->index, priv);
 340                         set_page_private(pv.pages[loop], 0);
 341                         end_page_writeback(pv.pages[loop]);
 342                 }
 343                 first += count;
 344                 __pagevec_release(&pv);
 345         } while (first <= last);
 346 
 347         afs_prune_wb_keys(vnode);
 348         _leave("");
 349 }
 350 
 351 /*
 352  * write to a file
 353  */
 354 static int afs_store_data(struct address_space *mapping,
 355                           pgoff_t first, pgoff_t last,
 356                           unsigned offset, unsigned to)
 357 {
 358         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 359         struct afs_fs_cursor fc;
 360         struct afs_status_cb *scb;
 361         struct afs_wb_key *wbk = NULL;
 362         struct list_head *p;
 363         int ret = -ENOKEY, ret2;
 364 
 365         _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
 366                vnode->volume->name,
 367                vnode->fid.vid,
 368                vnode->fid.vnode,
 369                vnode->fid.unique,
 370                first, last, offset, to);
 371 
 372         scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
 373         if (!scb)
 374                 return -ENOMEM;
 375 
 376         spin_lock(&vnode->wb_lock);
 377         p = vnode->wb_keys.next;
 378 
 379         /* Iterate through the list looking for a valid key to use. */
 380 try_next_key:
 381         while (p != &vnode->wb_keys) {
 382                 wbk = list_entry(p, struct afs_wb_key, vnode_link);
 383                 _debug("wbk %u", key_serial(wbk->key));
 384                 ret2 = key_validate(wbk->key);
 385                 if (ret2 == 0)
 386                         goto found_key;
 387                 if (ret == -ENOKEY)
 388                         ret = ret2;
 389                 p = p->next;
 390         }
 391 
 392         spin_unlock(&vnode->wb_lock);
 393         afs_put_wb_key(wbk);
 394         kfree(scb);
 395         _leave(" = %d [no keys]", ret);
 396         return ret;
 397 
 398 found_key:
 399         refcount_inc(&wbk->usage);
 400         spin_unlock(&vnode->wb_lock);
 401 
 402         _debug("USE WB KEY %u", key_serial(wbk->key));
 403 
 404         ret = -ERESTARTSYS;
 405         if (afs_begin_vnode_operation(&fc, vnode, wbk->key, false)) {
 406                 afs_dataversion_t data_version = vnode->status.data_version + 1;
 407 
 408                 while (afs_select_fileserver(&fc)) {
 409                         fc.cb_break = afs_calc_vnode_cb_break(vnode);
 410                         afs_fs_store_data(&fc, mapping, first, last, offset, to, scb);
 411                 }
 412 
 413                 afs_check_for_remote_deletion(&fc, vnode);
 414                 afs_vnode_commit_status(&fc, vnode, fc.cb_break,
 415                                         &data_version, scb);
 416                 if (fc.ac.error == 0)
 417                         afs_pages_written_back(vnode, first, last);
 418                 ret = afs_end_vnode_operation(&fc);
 419         }
 420 
 421         switch (ret) {
 422         case 0:
 423                 afs_stat_v(vnode, n_stores);
 424                 atomic_long_add((last * PAGE_SIZE + to) -
 425                                 (first * PAGE_SIZE + offset),
 426                                 &afs_v2net(vnode)->n_store_bytes);
 427                 break;
 428         case -EACCES:
 429         case -EPERM:
 430         case -ENOKEY:
 431         case -EKEYEXPIRED:
 432         case -EKEYREJECTED:
 433         case -EKEYREVOKED:
 434                 _debug("next");
 435                 spin_lock(&vnode->wb_lock);
 436                 p = wbk->vnode_link.next;
 437                 afs_put_wb_key(wbk);
 438                 goto try_next_key;
 439         }
 440 
 441         afs_put_wb_key(wbk);
 442         kfree(scb);
 443         _leave(" = %d", ret);
 444         return ret;
 445 }
 446 
 447 /*
 448  * Synchronously write back the locked page and any subsequent non-locked dirty
 449  * pages.
 450  */
 451 static int afs_write_back_from_locked_page(struct address_space *mapping,
 452                                            struct writeback_control *wbc,
 453                                            struct page *primary_page,
 454                                            pgoff_t final_page)
 455 {
 456         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 457         struct page *pages[8], *page;
 458         unsigned long count, priv;
 459         unsigned n, offset, to, f, t;
 460         pgoff_t start, first, last;
 461         int loop, ret;
 462 
 463         _enter(",%lx", primary_page->index);
 464 
 465         count = 1;
 466         if (test_set_page_writeback(primary_page))
 467                 BUG();
 468 
 469         /* Find all consecutive lockable dirty pages that have contiguous
 470          * written regions, stopping when we find a page that is not
 471          * immediately lockable, is not dirty or is missing, or we reach the
 472          * end of the range.
 473          */
 474         start = primary_page->index;
 475         priv = page_private(primary_page);
 476         offset = priv & AFS_PRIV_MAX;
 477         to = priv >> AFS_PRIV_SHIFT;
 478         trace_afs_page_dirty(vnode, tracepoint_string("store"),
 479                              primary_page->index, priv);
 480 
 481         WARN_ON(offset == to);
 482         if (offset == to)
 483                 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
 484                                      primary_page->index, priv);
 485 
 486         if (start >= final_page ||
 487             (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
 488                 goto no_more;
 489 
 490         start++;
 491         do {
 492                 _debug("more %lx [%lx]", start, count);
 493                 n = final_page - start + 1;
 494                 if (n > ARRAY_SIZE(pages))
 495                         n = ARRAY_SIZE(pages);
 496                 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
 497                 _debug("fgpc %u", n);
 498                 if (n == 0)
 499                         goto no_more;
 500                 if (pages[0]->index != start) {
 501                         do {
 502                                 put_page(pages[--n]);
 503                         } while (n > 0);
 504                         goto no_more;
 505                 }
 506 
 507                 for (loop = 0; loop < n; loop++) {
 508                         page = pages[loop];
 509                         if (to != PAGE_SIZE &&
 510                             !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
 511                                 break;
 512                         if (page->index > final_page)
 513                                 break;
 514                         if (!trylock_page(page))
 515                                 break;
 516                         if (!PageDirty(page) || PageWriteback(page)) {
 517                                 unlock_page(page);
 518                                 break;
 519                         }
 520 
 521                         priv = page_private(page);
 522                         f = priv & AFS_PRIV_MAX;
 523                         t = priv >> AFS_PRIV_SHIFT;
 524                         if (f != 0 &&
 525                             !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
 526                                 unlock_page(page);
 527                                 break;
 528                         }
 529                         to = t;
 530 
 531                         trace_afs_page_dirty(vnode, tracepoint_string("store+"),
 532                                              page->index, priv);
 533 
 534                         if (!clear_page_dirty_for_io(page))
 535                                 BUG();
 536                         if (test_set_page_writeback(page))
 537                                 BUG();
 538                         unlock_page(page);
 539                         put_page(page);
 540                 }
 541                 count += loop;
 542                 if (loop < n) {
 543                         for (; loop < n; loop++)
 544                                 put_page(pages[loop]);
 545                         goto no_more;
 546                 }
 547 
 548                 start += loop;
 549         } while (start <= final_page && count < 65536);
 550 
 551 no_more:
 552         /* We now have a contiguous set of dirty pages, each with writeback
 553          * set; the first page is still locked at this point, but all the rest
 554          * have been unlocked.
 555          */
 556         unlock_page(primary_page);
 557 
 558         first = primary_page->index;
 559         last = first + count - 1;
 560 
 561         _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
 562 
 563         ret = afs_store_data(mapping, first, last, offset, to);
 564         switch (ret) {
 565         case 0:
 566                 ret = count;
 567                 break;
 568 
 569         default:
 570                 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
 571                 /* Fall through */
 572         case -EACCES:
 573         case -EPERM:
 574         case -ENOKEY:
 575         case -EKEYEXPIRED:
 576         case -EKEYREJECTED:
 577         case -EKEYREVOKED:
 578                 afs_redirty_pages(wbc, mapping, first, last);
 579                 mapping_set_error(mapping, ret);
 580                 break;
 581 
 582         case -EDQUOT:
 583         case -ENOSPC:
 584                 afs_redirty_pages(wbc, mapping, first, last);
 585                 mapping_set_error(mapping, -ENOSPC);
 586                 break;
 587 
 588         case -EROFS:
 589         case -EIO:
 590         case -EREMOTEIO:
 591         case -EFBIG:
 592         case -ENOENT:
 593         case -ENOMEDIUM:
 594         case -ENXIO:
 595                 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
 596                 afs_kill_pages(mapping, first, last);
 597                 mapping_set_error(mapping, ret);
 598                 break;
 599         }
 600 
 601         _leave(" = %d", ret);
 602         return ret;
 603 }
 604 
 605 /*
 606  * write a page back to the server
 607  * - the caller locked the page for us
 608  */
 609 int afs_writepage(struct page *page, struct writeback_control *wbc)
 610 {
 611         int ret;
 612 
 613         _enter("{%lx},", page->index);
 614 
 615         ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
 616                                               wbc->range_end >> PAGE_SHIFT);
 617         if (ret < 0) {
 618                 _leave(" = %d", ret);
 619                 return 0;
 620         }
 621 
 622         wbc->nr_to_write -= ret;
 623 
 624         _leave(" = 0");
 625         return 0;
 626 }
 627 
 628 /*
 629  * write a region of pages back to the server
 630  */
 631 static int afs_writepages_region(struct address_space *mapping,
 632                                  struct writeback_control *wbc,
 633                                  pgoff_t index, pgoff_t end, pgoff_t *_next)
 634 {
 635         struct page *page;
 636         int ret, n;
 637 
 638         _enter(",,%lx,%lx,", index, end);
 639 
 640         do {
 641                 n = find_get_pages_range_tag(mapping, &index, end,
 642                                         PAGECACHE_TAG_DIRTY, 1, &page);
 643                 if (!n)
 644                         break;
 645 
 646                 _debug("wback %lx", page->index);
 647 
 648                 /*
 649                  * at this point we hold neither the i_pages lock nor the
 650                  * page lock: the page may be truncated or invalidated
 651                  * (changing page->mapping to NULL), or even swizzled
 652                  * back from swapper_space to tmpfs file mapping
 653                  */
 654                 ret = lock_page_killable(page);
 655                 if (ret < 0) {
 656                         put_page(page);
 657                         _leave(" = %d", ret);
 658                         return ret;
 659                 }
 660 
 661                 if (page->mapping != mapping || !PageDirty(page)) {
 662                         unlock_page(page);
 663                         put_page(page);
 664                         continue;
 665                 }
 666 
 667                 if (PageWriteback(page)) {
 668                         unlock_page(page);
 669                         if (wbc->sync_mode != WB_SYNC_NONE)
 670                                 wait_on_page_writeback(page);
 671                         put_page(page);
 672                         continue;
 673                 }
 674 
 675                 if (!clear_page_dirty_for_io(page))
 676                         BUG();
 677                 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
 678                 put_page(page);
 679                 if (ret < 0) {
 680                         _leave(" = %d", ret);
 681                         return ret;
 682                 }
 683 
 684                 wbc->nr_to_write -= ret;
 685 
 686                 cond_resched();
 687         } while (index < end && wbc->nr_to_write > 0);
 688 
 689         *_next = index;
 690         _leave(" = 0 [%lx]", *_next);
 691         return 0;
 692 }
 693 
 694 /*
 695  * write some of the pending data back to the server
 696  */
 697 int afs_writepages(struct address_space *mapping,
 698                    struct writeback_control *wbc)
 699 {
 700         pgoff_t start, end, next;
 701         int ret;
 702 
 703         _enter("");
 704 
 705         if (wbc->range_cyclic) {
 706                 start = mapping->writeback_index;
 707                 end = -1;
 708                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
 709                 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
 710                         ret = afs_writepages_region(mapping, wbc, 0, start,
 711                                                     &next);
 712                 mapping->writeback_index = next;
 713         } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
 714                 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
 715                 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
 716                 if (wbc->nr_to_write > 0)
 717                         mapping->writeback_index = next;
 718         } else {
 719                 start = wbc->range_start >> PAGE_SHIFT;
 720                 end = wbc->range_end >> PAGE_SHIFT;
 721                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
 722         }
 723 
 724         _leave(" = %d", ret);
 725         return ret;
 726 }
 727 
 728 /*
 729  * write to an AFS file
 730  */
 731 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
 732 {
 733         struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
 734         ssize_t result;
 735         size_t count = iov_iter_count(from);
 736 
 737         _enter("{%llx:%llu},{%zu},",
 738                vnode->fid.vid, vnode->fid.vnode, count);
 739 
 740         if (IS_SWAPFILE(&vnode->vfs_inode)) {
 741                 printk(KERN_INFO
 742                        "AFS: Attempt to write to active swap file!\n");
 743                 return -EBUSY;
 744         }
 745 
 746         if (!count)
 747                 return 0;
 748 
 749         result = generic_file_write_iter(iocb, from);
 750 
 751         _leave(" = %zd", result);
 752         return result;
 753 }
 754 
 755 /*
 756  * flush any dirty pages for this process, and check for write errors.
 757  * - the return status from this call provides a reliable indication of
 758  *   whether any write errors occurred for this process.
 759  */
 760 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 761 {
 762         struct inode *inode = file_inode(file);
 763         struct afs_vnode *vnode = AFS_FS_I(inode);
 764 
 765         _enter("{%llx:%llu},{n=%pD},%d",
 766                vnode->fid.vid, vnode->fid.vnode, file,
 767                datasync);
 768 
 769         return file_write_and_wait_range(file, start, end);
 770 }
 771 
 772 /*
 773  * notification that a previously read-only page is about to become writable
 774  * - if it returns an error, the caller will deliver a bus error signal
 775  */
 776 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
 777 {
 778         struct file *file = vmf->vma->vm_file;
 779         struct inode *inode = file_inode(file);
 780         struct afs_vnode *vnode = AFS_FS_I(inode);
 781         unsigned long priv;
 782 
 783         _enter("{{%llx:%llu}},{%lx}",
 784                vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
 785 
 786         sb_start_pagefault(inode->i_sb);
 787 
 788         /* Wait for the page to be written to the cache before we allow it to
 789          * be modified.  We then assume the entire page will need writing back.
 790          */
 791 #ifdef CONFIG_AFS_FSCACHE
 792         fscache_wait_on_page_write(vnode->cache, vmf->page);
 793 #endif
 794 
 795         if (PageWriteback(vmf->page) &&
 796             wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
 797                 return VM_FAULT_RETRY;
 798 
 799         if (lock_page_killable(vmf->page) < 0)
 800                 return VM_FAULT_RETRY;
 801 
 802         /* We mustn't change page->private until writeback is complete as that
 803          * details the portion of the page we need to write back and we might
 804          * need to redirty the page if there's a problem.
 805          */
 806         wait_on_page_writeback(vmf->page);
 807 
 808         priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
 809         priv |= 0; /* From */
 810         trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
 811                              vmf->page->index, priv);
 812         SetPagePrivate(vmf->page);
 813         set_page_private(vmf->page, priv);
 814 
 815         sb_end_pagefault(inode->i_sb);
 816         return VM_FAULT_LOCKED;
 817 }
 818 
 819 /*
 820  * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
 821  */
 822 void afs_prune_wb_keys(struct afs_vnode *vnode)
 823 {
 824         LIST_HEAD(graveyard);
 825         struct afs_wb_key *wbk, *tmp;
 826 
 827         /* Discard unused keys */
 828         spin_lock(&vnode->wb_lock);
 829 
 830         if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
 831             !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
 832                 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
 833                         if (refcount_read(&wbk->usage) == 1)
 834                                 list_move(&wbk->vnode_link, &graveyard);
 835                 }
 836         }
 837 
 838         spin_unlock(&vnode->wb_lock);
 839 
 840         while (!list_empty(&graveyard)) {
 841                 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
 842                 list_del(&wbk->vnode_link);
 843                 afs_put_wb_key(wbk);
 844         }
 845 }
 846 
 847 /*
 848  * Clean up a page during invalidation.
 849  */
 850 int afs_launder_page(struct page *page)
 851 {
 852         struct address_space *mapping = page->mapping;
 853         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 854         unsigned long priv;
 855         unsigned int f, t;
 856         int ret = 0;
 857 
 858         _enter("{%lx}", page->index);
 859 
 860         priv = page_private(page);
 861         if (clear_page_dirty_for_io(page)) {
 862                 f = 0;
 863                 t = PAGE_SIZE;
 864                 if (PagePrivate(page)) {
 865                         f = priv & AFS_PRIV_MAX;
 866                         t = priv >> AFS_PRIV_SHIFT;
 867                 }
 868 
 869                 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
 870                                      page->index, priv);
 871                 ret = afs_store_data(mapping, page->index, page->index, t, f);
 872         }
 873 
 874         trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
 875                              page->index, priv);
 876         set_page_private(page, 0);
 877         ClearPagePrivate(page);
 878 
 879 #ifdef CONFIG_AFS_FSCACHE
 880         if (PageFsCache(page)) {
 881                 fscache_wait_on_page_write(vnode->cache, page);
 882                 fscache_uncache_page(vnode->cache, page);
 883         }
 884 #endif
 885         return ret;
 886 }

/* [<][>][^][v][top][bottom][index][help] */