root/mm/readahead.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. file_ra_state_init
  2. read_cache_pages_invalidate_page
  3. read_cache_pages_invalidate_pages
  4. read_cache_pages
  5. read_pages
  6. __do_page_cache_readahead
  7. force_page_cache_readahead
  8. get_init_ra_size
  9. get_next_ra_size
  10. count_history_pages
  11. try_context_readahead
  12. ondemand_readahead
  13. page_cache_sync_readahead
  14. page_cache_async_readahead
  15. ksys_readahead
  16. SYSCALL_DEFINE3

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * mm/readahead.c - address_space-level file readahead.
   4  *
   5  * Copyright (C) 2002, Linus Torvalds
   6  *
   7  * 09Apr2002    Andrew Morton
   8  *              Initial version.
   9  */
  10 
  11 #include <linux/kernel.h>
  12 #include <linux/dax.h>
  13 #include <linux/gfp.h>
  14 #include <linux/export.h>
  15 #include <linux/blkdev.h>
  16 #include <linux/backing-dev.h>
  17 #include <linux/task_io_accounting_ops.h>
  18 #include <linux/pagevec.h>
  19 #include <linux/pagemap.h>
  20 #include <linux/syscalls.h>
  21 #include <linux/file.h>
  22 #include <linux/mm_inline.h>
  23 #include <linux/blk-cgroup.h>
  24 #include <linux/fadvise.h>
  25 
  26 #include "internal.h"
  27 
  28 /*
  29  * Initialise a struct file's readahead state.  Assumes that the caller has
  30  * memset *ra to zero.
  31  */
  32 void
  33 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
  34 {
  35         ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
  36         ra->prev_pos = -1;
  37 }
  38 EXPORT_SYMBOL_GPL(file_ra_state_init);
  39 
  40 /*
  41  * see if a page needs releasing upon read_cache_pages() failure
  42  * - the caller of read_cache_pages() may have set PG_private or PG_fscache
  43  *   before calling, such as the NFS fs marking pages that are cached locally
  44  *   on disk, thus we need to give the fs a chance to clean up in the event of
  45  *   an error
  46  */
  47 static void read_cache_pages_invalidate_page(struct address_space *mapping,
  48                                              struct page *page)
  49 {
  50         if (page_has_private(page)) {
  51                 if (!trylock_page(page))
  52                         BUG();
  53                 page->mapping = mapping;
  54                 do_invalidatepage(page, 0, PAGE_SIZE);
  55                 page->mapping = NULL;
  56                 unlock_page(page);
  57         }
  58         put_page(page);
  59 }
  60 
  61 /*
  62  * release a list of pages, invalidating them first if need be
  63  */
  64 static void read_cache_pages_invalidate_pages(struct address_space *mapping,
  65                                               struct list_head *pages)
  66 {
  67         struct page *victim;
  68 
  69         while (!list_empty(pages)) {
  70                 victim = lru_to_page(pages);
  71                 list_del(&victim->lru);
  72                 read_cache_pages_invalidate_page(mapping, victim);
  73         }
  74 }
  75 
  76 /**
  77  * read_cache_pages - populate an address space with some pages & start reads against them
  78  * @mapping: the address_space
  79  * @pages: The address of a list_head which contains the target pages.  These
  80  *   pages have their ->index populated and are otherwise uninitialised.
  81  * @filler: callback routine for filling a single page.
  82  * @data: private data for the callback routine.
  83  *
  84  * Hides the details of the LRU cache etc from the filesystems.
  85  *
  86  * Returns: %0 on success, error return by @filler otherwise
  87  */
  88 int read_cache_pages(struct address_space *mapping, struct list_head *pages,
  89                         int (*filler)(void *, struct page *), void *data)
  90 {
  91         struct page *page;
  92         int ret = 0;
  93 
  94         while (!list_empty(pages)) {
  95                 page = lru_to_page(pages);
  96                 list_del(&page->lru);
  97                 if (add_to_page_cache_lru(page, mapping, page->index,
  98                                 readahead_gfp_mask(mapping))) {
  99                         read_cache_pages_invalidate_page(mapping, page);
 100                         continue;
 101                 }
 102                 put_page(page);
 103 
 104                 ret = filler(data, page);
 105                 if (unlikely(ret)) {
 106                         read_cache_pages_invalidate_pages(mapping, pages);
 107                         break;
 108                 }
 109                 task_io_account_read(PAGE_SIZE);
 110         }
 111         return ret;
 112 }
 113 
 114 EXPORT_SYMBOL(read_cache_pages);
 115 
 116 static int read_pages(struct address_space *mapping, struct file *filp,
 117                 struct list_head *pages, unsigned int nr_pages, gfp_t gfp)
 118 {
 119         struct blk_plug plug;
 120         unsigned page_idx;
 121         int ret;
 122 
 123         blk_start_plug(&plug);
 124 
 125         if (mapping->a_ops->readpages) {
 126                 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
 127                 /* Clean up the remaining pages */
 128                 put_pages_list(pages);
 129                 goto out;
 130         }
 131 
 132         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
 133                 struct page *page = lru_to_page(pages);
 134                 list_del(&page->lru);
 135                 if (!add_to_page_cache_lru(page, mapping, page->index, gfp))
 136                         mapping->a_ops->readpage(filp, page);
 137                 put_page(page);
 138         }
 139         ret = 0;
 140 
 141 out:
 142         blk_finish_plug(&plug);
 143 
 144         return ret;
 145 }
 146 
 147 /*
 148  * __do_page_cache_readahead() actually reads a chunk of disk.  It allocates
 149  * the pages first, then submits them for I/O. This avoids the very bad
 150  * behaviour which would occur if page allocations are causing VM writeback.
 151  * We really don't want to intermingle reads and writes like that.
 152  *
 153  * Returns the number of pages requested, or the maximum amount of I/O allowed.
 154  */
 155 unsigned int __do_page_cache_readahead(struct address_space *mapping,
 156                 struct file *filp, pgoff_t offset, unsigned long nr_to_read,
 157                 unsigned long lookahead_size)
 158 {
 159         struct inode *inode = mapping->host;
 160         struct page *page;
 161         unsigned long end_index;        /* The last page we want to read */
 162         LIST_HEAD(page_pool);
 163         int page_idx;
 164         unsigned int nr_pages = 0;
 165         loff_t isize = i_size_read(inode);
 166         gfp_t gfp_mask = readahead_gfp_mask(mapping);
 167 
 168         if (isize == 0)
 169                 goto out;
 170 
 171         end_index = ((isize - 1) >> PAGE_SHIFT);
 172 
 173         /*
 174          * Preallocate as many pages as we will need.
 175          */
 176         for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
 177                 pgoff_t page_offset = offset + page_idx;
 178 
 179                 if (page_offset > end_index)
 180                         break;
 181 
 182                 page = xa_load(&mapping->i_pages, page_offset);
 183                 if (page && !xa_is_value(page)) {
 184                         /*
 185                          * Page already present?  Kick off the current batch of
 186                          * contiguous pages before continuing with the next
 187                          * batch.
 188                          */
 189                         if (nr_pages)
 190                                 read_pages(mapping, filp, &page_pool, nr_pages,
 191                                                 gfp_mask);
 192                         nr_pages = 0;
 193                         continue;
 194                 }
 195 
 196                 page = __page_cache_alloc(gfp_mask);
 197                 if (!page)
 198                         break;
 199                 page->index = page_offset;
 200                 list_add(&page->lru, &page_pool);
 201                 if (page_idx == nr_to_read - lookahead_size)
 202                         SetPageReadahead(page);
 203                 nr_pages++;
 204         }
 205 
 206         /*
 207          * Now start the IO.  We ignore I/O errors - if the page is not
 208          * uptodate then the caller will launch readpage again, and
 209          * will then handle the error.
 210          */
 211         if (nr_pages)
 212                 read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask);
 213         BUG_ON(!list_empty(&page_pool));
 214 out:
 215         return nr_pages;
 216 }
 217 
 218 /*
 219  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
 220  * memory at once.
 221  */
 222 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
 223                                pgoff_t offset, unsigned long nr_to_read)
 224 {
 225         struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 226         struct file_ra_state *ra = &filp->f_ra;
 227         unsigned long max_pages;
 228 
 229         if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
 230                 return -EINVAL;
 231 
 232         /*
 233          * If the request exceeds the readahead window, allow the read to
 234          * be up to the optimal hardware IO size
 235          */
 236         max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
 237         nr_to_read = min(nr_to_read, max_pages);
 238         while (nr_to_read) {
 239                 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
 240 
 241                 if (this_chunk > nr_to_read)
 242                         this_chunk = nr_to_read;
 243                 __do_page_cache_readahead(mapping, filp, offset, this_chunk, 0);
 244 
 245                 offset += this_chunk;
 246                 nr_to_read -= this_chunk;
 247         }
 248         return 0;
 249 }
 250 
 251 /*
 252  * Set the initial window size, round to next power of 2 and square
 253  * for small size, x 4 for medium, and x 2 for large
 254  * for 128k (32 page) max ra
 255  * 1-8 page = 32k initial, > 8 page = 128k initial
 256  */
 257 static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
 258 {
 259         unsigned long newsize = roundup_pow_of_two(size);
 260 
 261         if (newsize <= max / 32)
 262                 newsize = newsize * 4;
 263         else if (newsize <= max / 4)
 264                 newsize = newsize * 2;
 265         else
 266                 newsize = max;
 267 
 268         return newsize;
 269 }
 270 
 271 /*
 272  *  Get the previous window size, ramp it up, and
 273  *  return it as the new window size.
 274  */
 275 static unsigned long get_next_ra_size(struct file_ra_state *ra,
 276                                       unsigned long max)
 277 {
 278         unsigned long cur = ra->size;
 279 
 280         if (cur < max / 16)
 281                 return 4 * cur;
 282         if (cur <= max / 2)
 283                 return 2 * cur;
 284         return max;
 285 }
 286 
 287 /*
 288  * On-demand readahead design.
 289  *
 290  * The fields in struct file_ra_state represent the most-recently-executed
 291  * readahead attempt:
 292  *
 293  *                        |<----- async_size ---------|
 294  *     |------------------- size -------------------->|
 295  *     |==================#===========================|
 296  *     ^start             ^page marked with PG_readahead
 297  *
 298  * To overlap application thinking time and disk I/O time, we do
 299  * `readahead pipelining': Do not wait until the application consumed all
 300  * readahead pages and stalled on the missing page at readahead_index;
 301  * Instead, submit an asynchronous readahead I/O as soon as there are
 302  * only async_size pages left in the readahead window. Normally async_size
 303  * will be equal to size, for maximum pipelining.
 304  *
 305  * In interleaved sequential reads, concurrent streams on the same fd can
 306  * be invalidating each other's readahead state. So we flag the new readahead
 307  * page at (start+size-async_size) with PG_readahead, and use it as readahead
 308  * indicator. The flag won't be set on already cached pages, to avoid the
 309  * readahead-for-nothing fuss, saving pointless page cache lookups.
 310  *
 311  * prev_pos tracks the last visited byte in the _previous_ read request.
 312  * It should be maintained by the caller, and will be used for detecting
 313  * small random reads. Note that the readahead algorithm checks loosely
 314  * for sequential patterns. Hence interleaved reads might be served as
 315  * sequential ones.
 316  *
 317  * There is a special-case: if the first page which the application tries to
 318  * read happens to be the first page of the file, it is assumed that a linear
 319  * read is about to happen and the window is immediately set to the initial size
 320  * based on I/O request size and the max_readahead.
 321  *
 322  * The code ramps up the readahead size aggressively at first, but slow down as
 323  * it approaches max_readhead.
 324  */
 325 
 326 /*
 327  * Count contiguously cached pages from @offset-1 to @offset-@max,
 328  * this count is a conservative estimation of
 329  *      - length of the sequential read sequence, or
 330  *      - thrashing threshold in memory tight systems
 331  */
 332 static pgoff_t count_history_pages(struct address_space *mapping,
 333                                    pgoff_t offset, unsigned long max)
 334 {
 335         pgoff_t head;
 336 
 337         rcu_read_lock();
 338         head = page_cache_prev_miss(mapping, offset - 1, max);
 339         rcu_read_unlock();
 340 
 341         return offset - 1 - head;
 342 }
 343 
 344 /*
 345  * page cache context based read-ahead
 346  */
 347 static int try_context_readahead(struct address_space *mapping,
 348                                  struct file_ra_state *ra,
 349                                  pgoff_t offset,
 350                                  unsigned long req_size,
 351                                  unsigned long max)
 352 {
 353         pgoff_t size;
 354 
 355         size = count_history_pages(mapping, offset, max);
 356 
 357         /*
 358          * not enough history pages:
 359          * it could be a random read
 360          */
 361         if (size <= req_size)
 362                 return 0;
 363 
 364         /*
 365          * starts from beginning of file:
 366          * it is a strong indication of long-run stream (or whole-file-read)
 367          */
 368         if (size >= offset)
 369                 size *= 2;
 370 
 371         ra->start = offset;
 372         ra->size = min(size + req_size, max);
 373         ra->async_size = 1;
 374 
 375         return 1;
 376 }
 377 
 378 /*
 379  * A minimal readahead algorithm for trivial sequential/random reads.
 380  */
 381 static unsigned long
 382 ondemand_readahead(struct address_space *mapping,
 383                    struct file_ra_state *ra, struct file *filp,
 384                    bool hit_readahead_marker, pgoff_t offset,
 385                    unsigned long req_size)
 386 {
 387         struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 388         unsigned long max_pages = ra->ra_pages;
 389         unsigned long add_pages;
 390         pgoff_t prev_offset;
 391 
 392         /*
 393          * If the request exceeds the readahead window, allow the read to
 394          * be up to the optimal hardware IO size
 395          */
 396         if (req_size > max_pages && bdi->io_pages > max_pages)
 397                 max_pages = min(req_size, bdi->io_pages);
 398 
 399         /*
 400          * start of file
 401          */
 402         if (!offset)
 403                 goto initial_readahead;
 404 
 405         /*
 406          * It's the expected callback offset, assume sequential access.
 407          * Ramp up sizes, and push forward the readahead window.
 408          */
 409         if ((offset == (ra->start + ra->size - ra->async_size) ||
 410              offset == (ra->start + ra->size))) {
 411                 ra->start += ra->size;
 412                 ra->size = get_next_ra_size(ra, max_pages);
 413                 ra->async_size = ra->size;
 414                 goto readit;
 415         }
 416 
 417         /*
 418          * Hit a marked page without valid readahead state.
 419          * E.g. interleaved reads.
 420          * Query the pagecache for async_size, which normally equals to
 421          * readahead size. Ramp it up and use it as the new readahead size.
 422          */
 423         if (hit_readahead_marker) {
 424                 pgoff_t start;
 425 
 426                 rcu_read_lock();
 427                 start = page_cache_next_miss(mapping, offset + 1, max_pages);
 428                 rcu_read_unlock();
 429 
 430                 if (!start || start - offset > max_pages)
 431                         return 0;
 432 
 433                 ra->start = start;
 434                 ra->size = start - offset;      /* old async_size */
 435                 ra->size += req_size;
 436                 ra->size = get_next_ra_size(ra, max_pages);
 437                 ra->async_size = ra->size;
 438                 goto readit;
 439         }
 440 
 441         /*
 442          * oversize read
 443          */
 444         if (req_size > max_pages)
 445                 goto initial_readahead;
 446 
 447         /*
 448          * sequential cache miss
 449          * trivial case: (offset - prev_offset) == 1
 450          * unaligned reads: (offset - prev_offset) == 0
 451          */
 452         prev_offset = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
 453         if (offset - prev_offset <= 1UL)
 454                 goto initial_readahead;
 455 
 456         /*
 457          * Query the page cache and look for the traces(cached history pages)
 458          * that a sequential stream would leave behind.
 459          */
 460         if (try_context_readahead(mapping, ra, offset, req_size, max_pages))
 461                 goto readit;
 462 
 463         /*
 464          * standalone, small random read
 465          * Read as is, and do not pollute the readahead state.
 466          */
 467         return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
 468 
 469 initial_readahead:
 470         ra->start = offset;
 471         ra->size = get_init_ra_size(req_size, max_pages);
 472         ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
 473 
 474 readit:
 475         /*
 476          * Will this read hit the readahead marker made by itself?
 477          * If so, trigger the readahead marker hit now, and merge
 478          * the resulted next readahead window into the current one.
 479          * Take care of maximum IO pages as above.
 480          */
 481         if (offset == ra->start && ra->size == ra->async_size) {
 482                 add_pages = get_next_ra_size(ra, max_pages);
 483                 if (ra->size + add_pages <= max_pages) {
 484                         ra->async_size = add_pages;
 485                         ra->size += add_pages;
 486                 } else {
 487                         ra->size = max_pages;
 488                         ra->async_size = max_pages >> 1;
 489                 }
 490         }
 491 
 492         return ra_submit(ra, mapping, filp);
 493 }
 494 
 495 /**
 496  * page_cache_sync_readahead - generic file readahead
 497  * @mapping: address_space which holds the pagecache and I/O vectors
 498  * @ra: file_ra_state which holds the readahead state
 499  * @filp: passed on to ->readpage() and ->readpages()
 500  * @offset: start offset into @mapping, in pagecache page-sized units
 501  * @req_size: hint: total size of the read which the caller is performing in
 502  *            pagecache pages
 503  *
 504  * page_cache_sync_readahead() should be called when a cache miss happened:
 505  * it will submit the read.  The readahead logic may decide to piggyback more
 506  * pages onto the read request if access patterns suggest it will improve
 507  * performance.
 508  */
 509 void page_cache_sync_readahead(struct address_space *mapping,
 510                                struct file_ra_state *ra, struct file *filp,
 511                                pgoff_t offset, unsigned long req_size)
 512 {
 513         /* no read-ahead */
 514         if (!ra->ra_pages)
 515                 return;
 516 
 517         if (blk_cgroup_congested())
 518                 return;
 519 
 520         /* be dumb */
 521         if (filp && (filp->f_mode & FMODE_RANDOM)) {
 522                 force_page_cache_readahead(mapping, filp, offset, req_size);
 523                 return;
 524         }
 525 
 526         /* do read-ahead */
 527         ondemand_readahead(mapping, ra, filp, false, offset, req_size);
 528 }
 529 EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
 530 
 531 /**
 532  * page_cache_async_readahead - file readahead for marked pages
 533  * @mapping: address_space which holds the pagecache and I/O vectors
 534  * @ra: file_ra_state which holds the readahead state
 535  * @filp: passed on to ->readpage() and ->readpages()
 536  * @page: the page at @offset which has the PG_readahead flag set
 537  * @offset: start offset into @mapping, in pagecache page-sized units
 538  * @req_size: hint: total size of the read which the caller is performing in
 539  *            pagecache pages
 540  *
 541  * page_cache_async_readahead() should be called when a page is used which
 542  * has the PG_readahead flag; this is a marker to suggest that the application
 543  * has used up enough of the readahead window that we should start pulling in
 544  * more pages.
 545  */
 546 void
 547 page_cache_async_readahead(struct address_space *mapping,
 548                            struct file_ra_state *ra, struct file *filp,
 549                            struct page *page, pgoff_t offset,
 550                            unsigned long req_size)
 551 {
 552         /* no read-ahead */
 553         if (!ra->ra_pages)
 554                 return;
 555 
 556         /*
 557          * Same bit is used for PG_readahead and PG_reclaim.
 558          */
 559         if (PageWriteback(page))
 560                 return;
 561 
 562         ClearPageReadahead(page);
 563 
 564         /*
 565          * Defer asynchronous read-ahead on IO congestion.
 566          */
 567         if (inode_read_congested(mapping->host))
 568                 return;
 569 
 570         if (blk_cgroup_congested())
 571                 return;
 572 
 573         /* do read-ahead */
 574         ondemand_readahead(mapping, ra, filp, true, offset, req_size);
 575 }
 576 EXPORT_SYMBOL_GPL(page_cache_async_readahead);
 577 
 578 ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
 579 {
 580         ssize_t ret;
 581         struct fd f;
 582 
 583         ret = -EBADF;
 584         f = fdget(fd);
 585         if (!f.file || !(f.file->f_mode & FMODE_READ))
 586                 goto out;
 587 
 588         /*
 589          * The readahead() syscall is intended to run only on files
 590          * that can execute readahead. If readahead is not possible
 591          * on this file, then we must return -EINVAL.
 592          */
 593         ret = -EINVAL;
 594         if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
 595             !S_ISREG(file_inode(f.file)->i_mode))
 596                 goto out;
 597 
 598         ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
 599 out:
 600         fdput(f);
 601         return ret;
 602 }
 603 
 604 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
 605 {
 606         return ksys_readahead(fd, offset, count);
 607 }

/* [<][>][^][v][top][bottom][index][help] */