/linux-4.1.27/mm/ |
H A D | readahead.c | 2 * mm/readahead.c - address_space-level file readahead. 24 * Initialise a struct file's readahead state. Assumes that the caller has 207 * Chunk the readahead into 2 megabyte units, so that we don't pin too much 237 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a 284 * On-demand readahead design. 287 * readahead attempt: 295 * `readahead pipelining': Do not wait until the application consumed all 296 * readahead pages and stalled on the missing page at readahead_index; 297 * Instead, submit an asynchronous readahead I/O as soon as there are 298 * only async_size pages left in the readahead window. Normally async_size 302 * be invalidating each other's readahead state. So we flag the new readahead 303 * page at (start+size-async_size) with PG_readahead, and use it as readahead 305 * readahead-for-nothing fuss, saving pointless page cache lookups. 309 * small random reads. Note that the readahead algorithm checks loosely 318 * The code ramps up the readahead size aggressively at first, but slow down as 375 * A minimal readahead algorithm for trivial sequential/random reads. 394 * Ramp up sizes, and push forward the readahead window. ondemand_readahead() 405 * Hit a marked page without valid readahead state. ondemand_readahead() 408 * readahead size. Ramp it up and use it as the new readahead size. ondemand_readahead() 452 * Read as is, and do not pollute the readahead state. ondemand_readahead() 463 * Will this read hit the readahead marker made by itself? ondemand_readahead() 464 * If so, trigger the readahead marker hit now, and merge ondemand_readahead() 465 * the resulted next readahead window into the current one. ondemand_readahead() 476 * page_cache_sync_readahead - generic file readahead 478 * @ra: file_ra_state which holds the readahead state 485 * it will submit the read. The readahead logic may decide to piggyback more 509 * page_cache_async_readahead - file readahead for marked pages 511 * @ra: file_ra_state which holds the readahead state 520 * has used up enough of the readahead window that we should start pulling in 562 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) SYSCALL_DEFINE3()
|
H A D | Makefile | 19 readahead.o swap.o truncate.o vmscan.o shmem.o \
|
H A D | swap_state.c | 405 * We can have no readahead hits to judge by: but must not get swapin_nr_pages() 422 /* Don't shrink readahead too fast */ swapin_nr_pages() 440 * Primitive swap readahead code. We simply read an aligned block of 443 * the 'original' request together with the readahead ones... 446 * the readahead.
|
H A D | filemap.c | 1434 * read(R) => miss => readahead(R...B) => media error => frustrating retries 1436 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 1437 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 1438 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 1440 * It is going insane. Fix it by quickly scaling down the readahead size. 1775 * Synchronous readahead happens when we don't even find 1820 * Asynchronous readahead happens when we find the page and PG_readahead, 1821 * so we want to possibly extend the readahead further.. 1887 * We found the page, so try async readahead before filemap_fault() 2375 * cached by non-direct readahead, or faulted in by get_user_pages() generic_file_direct_write()
|
H A D | page-writeback.c | 2222 * readahead/lru_deactivate_page could remain set_page_dirty() 2224 * About readahead, if the page is written, the flags would be set_page_dirty() 2227 * will be reset. So no problem. but if the page is used by readahead set_page_dirty() 2228 * it will confuse readahead and make it restart the size rampup set_page_dirty()
|
H A D | swap.c | 782 * It can make readahead confusing. But race window lru_deactivate_file_fn()
|
H A D | memcontrol.c | 1234 * Swapcache readahead pages are added to the LRU - and mem_cgroup_page_lruvec() 1872 * (like optional page cache readahead) and so an OOM killer mem_cgroup_oom() 5753 * Swapcache readahead pages can get migrated before being mem_cgroup_migrate()
|
H A D | migrate.c | 779 * For example, during page readahead pages are added locked __unmap_and_move()
|
H A D | mempolicy.c | 57 fix mmap readahead to honour policy and enable policy for any page cache
|
/linux-4.1.27/drivers/ata/ |
H A D | pata_rz1000.c | 71 printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n"); rz1000_fifo_disable() 81 * handling except that we *MUST* kill the chipset readahead or the
|
/linux-4.1.27/drivers/md/ |
H A D | dm-zero.c | 43 /* readahead of null bytes only wastes buffer cache */ zero_map()
|
H A D | dm-snap-persistent.c | 507 * Setup for one current buffer + desired readahead buffers. read_exceptions()
|
H A D | raid0.c | 464 * readahead at least twice a whole stripe. i.e. number of devices
|
H A D | dm-thin.c | 2325 * Note that we defer readahead too. thin_bio_map()
|
H A D | raid10.c | 3748 * We need to readahead at least twice a whole stripe....
|
/linux-4.1.27/fs/xfs/libxfs/ |
H A D | xfs_inode_buf.c | 61 * If we are doing readahead on an inode buffer, we might be in log recovery 63 * has not had the inode cores stamped into it. Hence for readahead, the buffer 66 * If the readahead buffer is invalid, we need to mark it with an error and 70 * because all we want to do is say readahead failed; there is no-one to report 72 * Changes to this readahead error behavour also need to be reflected in 78 bool readahead) xfs_inode_buf_verify() 99 if (readahead) { xfs_inode_buf_verify() 76 xfs_inode_buf_verify( struct xfs_buf *bp, bool readahead) xfs_inode_buf_verify() argument
|
H A D | xfs_dquot_buf.c | 267 * readahead errors are silent and simply leave the buffer as !done so a real
|
H A D | xfs_btree.h | 205 __uint8_t bc_ra[XFS_BTREE_MAXLEVELS]; /* readahead bits */
|
H A D | xfs_btree.c | 829 * No readahead needed if we are at the root level and the xfs_btree_readahead() 3963 * also do readahead on the sibling pointers to get IO moving more quickly, 3987 /* do right sibling readahead */ xfs_btree_block_change_owner() 4044 /* readahead the left most block for the next level down */ xfs_btree_change_owner()
|
H A D | xfs_dir2_data.c | 239 * format buffer or a data format buffer on readahead.
|
/linux-4.1.27/fs/jbd/ |
H A D | recovery.c | 48 /* Release readahead buffers after use */ journal_brelse_array() 58 * layer directly and so there is no readahead being done for us. We 59 * need to implement any readahead ourselves if we want it to happen at 64 * the readahead size, though. 128K is a purely arbitrary, good-enough 78 /* Do up to 128K of readahead */ do_readahead() 83 /* Do the readahead itself. We'll submit MAXBUF buffer_heads at do_readahead() 158 /* If this is a brand new buffer, start readahead. jread()
|
/linux-4.1.27/arch/mips/include/asm/ |
H A D | bmips.h | 135 /* Flush stale data out of the readahead cache */ bmips_post_dma_flush()
|
/linux-4.1.27/fs/jbd2/ |
H A D | recovery.c | 49 /* Release readahead buffers after use */ journal_brelse_array() 59 * layer directly and so there is no readahead being done for us. We 60 * need to implement any readahead ourselves if we want it to happen at 65 * the readahead size, though. 128K is a purely arbitrary, good-enough 79 /* Do up to 128K of readahead */ do_readahead() 84 /* Do the readahead itself. We'll submit MAXBUF buffer_heads at do_readahead() 159 /* If this is a brand new buffer, start readahead. jread()
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
H A D | rw.c | 291 * Get readahead pages from the filesystem readahead pool of the client for a 294 * /param sbi superblock for filesystem readahead state ll_ra_info 295 * /param ria per-thread readahead state 296 * /param pages number of pages requested for readahead for the thread. 301 * these readahead pages. 304 * to get an ra budget that is larger than the remaining readahead pages 325 /* If the non-strided (ria_pages == 0) readahead window ll_ra_count_get() 327 * readahead size by the amount beyond the RPC so it ends on an ll_ra_count_get() 328 * RPC boundary. If the readahead window is already ending on ll_ra_count_get() 330 * RPC (beyond_rpc < ret) the readahead size is unchanged. ll_ra_count_get() 552 * and max_read_ahead_per_file_mb otherwise the readahead budget can be used 1082 /* Since stride readahead is sensitive to the offset ras_update()
|
H A D | llite_mmap.c | 91 * \parm ra_flags - vma readahead flags. 137 * filemap_nopage. we do our readahead in ll_readpage. ll_fault_io_init()
|
H A D | lproc_llite.c | 250 CERROR("can't set file readahead more than %lu MB\n", ll_max_readahead_mb_seq_write() 293 CERROR("can't set file readahead more than max_read_ahead_mb %lu MB\n", ll_max_readahead_per_file_mb_seq_write() 334 /* Cap this at the current max readahead window size, the readahead ll_max_read_ahead_whole_mb_seq_write()
|
H A D | llite_internal.h | 184 * cleanup the dir readahead. */ 315 /* default to about 40meg of readahead on a given system. That much tied 316 * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */
|
/linux-4.1.27/include/uapi/linux/ |
H A D | blktrace_api.h | 22 BLK_TC_AHEAD = 1 << 11, /* readahead */
|
H A D | capability.h | 245 /* Allow setting readahead and flushing buffers on block devices */
|
/linux-4.1.27/drivers/md/bcache/ |
H A D | sysfs.c | 95 rw_attribute(readahead); variable 173 var_hprint(readahead); SHOW() 219 d_strtoi_h(readahead); SHOW_LOCKED()
|
H A D | bcache.h | 366 unsigned readahead; member in struct:cached_dev
|
H A D | request.c | 793 reada = min_t(sector_t, dc->readahead >> 9, cached_dev_cache_miss()
|
/linux-4.1.27/fs/nilfs2/ |
H A D | mdt.c | 174 int readahead, struct buffer_head **out_bh) nilfs_mdt_read_block() 188 if (readahead) { nilfs_mdt_read_block() 196 /* abort readahead if bmap lookup failed */ nilfs_mdt_read_block() 173 nilfs_mdt_read_block(struct inode *inode, unsigned long block, int readahead, struct buffer_head **out_bh) nilfs_mdt_read_block() argument
|
H A D | btree.c | 538 int readahead) nilfs_btree_do_lookup() 560 if (level == NILFS_BTREE_LEVEL_NODE_MIN && readahead) { nilfs_btree_do_lookup() 535 nilfs_btree_do_lookup(const struct nilfs_bmap *btree, struct nilfs_btree_path *path, __u64 key, __u64 *ptrp, int minlevel, int readahead) nilfs_btree_do_lookup() argument
|
/linux-4.1.27/fs/xfs/ |
H A D | xfs_dir2_readdir.c | 263 int ra_want; /* readahead count wanted */ 315 * Recalculate the readahead blocks wanted. xfs_dir2_leaf_readbuf() 405 * Do we need more readahead? xfs_dir2_leaf_readbuf() 522 * Get more blocks and readahead as necessary. xfs_dir2_leaf_getdents()
|
H A D | xfs_itable.c | 173 * record. Do a readahead if there are any allocated inodes in that cluster.
|
H A D | xfs_buf.c | 675 * If we are not low on memory then do the readahead in a deadlock 1226 * left over from previous use of the buffer (e.g. failed readahead). _xfs_buf_ioapply()
|
H A D | xfs_file.c | 1054 * readahead window and size the buffers used for mapping to xfs_file_readdir()
|
H A D | xfs_log_recover.c | 2355 * Note that we have to be extremely careful of readahead here. xlog_recover_buffer_pass2() 2357 * actually do any replay after readahead because of the LSN we found xlog_recover_buffer_pass2()
|
/linux-4.1.27/fs/ocfs2/ |
H A D | buffer_head_io.c | 235 * readahead. ocfs2_buffer_uptodate() will return ocfs2_read_blocks() 240 * 3) The current request is readahead (and so must ocfs2_read_blocks()
|
H A D | extent_map.c | 972 * from a previous readahead call to this function. Thus, ocfs2_read_virt_blocks()
|
H A D | dir.c | 700 int ra_max = 0; /* Number of bh's in the readahead ocfs2_find_entry_el() 702 int ra_ptr = 0; /* Current index into readahead ocfs2_find_entry_el() 721 /* Refill the readahead buffer */ ocfs2_find_entry_el()
|
/linux-4.1.27/fs/f2fs/ |
H A D | node.h | 17 /* # of pages to perform readahead before building free nids */ 20 /* maximum readahead size for node during getting data blocks */
|
H A D | node.c | 1072 * And, readahead MAX_RA_NODE number of node pages. 1099 /* Then, try readahead for siblings of the desired node */ get_node_page_ra() 1517 /* readahead nat pages to be scanned */ build_free_nids() 1757 /* readahead node pages */ restore_node_summary()
|
H A D | checkpoint.c | 175 bool readahead = false; ra_meta_pages_cond() local 179 readahead = true; ra_meta_pages_cond() 182 if (readahead) ra_meta_pages_cond()
|
H A D | f2fs.h | 123 * For CP/NAT/SIT/SSA readahead 276 LOOKUP_NODE, /* look up a node without readahead */ 278 * look up a node with readahead called
|
H A D | dir.c | 774 /* readahead for multi pages of dir */ f2fs_readdir()
|
H A D | gc.c | 717 /* readahead multi ssa blocks those have contiguous address */ f2fs_gc()
|
H A D | data.c | 1211 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh. 1301 /* Give more consecutive addresses for the readahead */ __get_data_block()
|
/linux-4.1.27/fs/nfsd/ |
H A D | vfs.c | 50 * This is a cache of readahead params that help us choose the proper 51 * readahead strategy. Initially, we set all readahead parameters to 0 774 * Obtain the readahead parameters for the file 1024 /* Get readahead parameters */ nfsd_get_tmp_read_open() 1034 /* Write back readahead params */ nfsd_put_tmp_read_open() 2102 dprintk("nfsd: freeing readahead buffers.\n"); nfsd_racache_shutdown() 2115 * Initialize readahead param cache 2132 dprintk("nfsd: allocating %d readahead buffers.\n", cache_size); nfsd_racache_init() 2151 dprintk("nfsd: kmalloc failed, freeing readahead buffers\n"); nfsd_racache_init()
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | systbl.h | 198 COMPAT_SYS_SPU(readahead)
|
/linux-4.1.27/fs/btrfs/ |
H A D | reada.c | 37 * To trigger a readahead, btrfs_reada_add must be called. It will start 39 * handle can either be used to wait on the readahead to finish 151 * trigger more readahead depending from the content, e.g. __readahead_hook() 367 "readahead: more than %d copies not supported", reada_find_extent()
|
H A D | extent_io.h | 37 #define EXTENT_BUFFER_READAHEAD 4 /* this got triggered by readahead */
|
H A D | volumes.h | 138 /* readahead state */
|
H A D | ctree.h | 196 /* ioprio of readahead is set to idle */ 1753 /* readahead tree */ 3876 /* This forces readahead on a given range of bytes in an inode */ btrfs_force_ra()
|
H A D | scrub.c | 3044 * trigger the readahead for extent tree csum tree and wait for scrub_stripe() 3045 * completion. During readahead, the scrub is officially paused scrub_stripe() 3061 /* FIXME it might be better to start readahead at commit root */ scrub_stripe()
|
H A D | disk-io.c | 2291 btrfs_alloc_workqueue("readahead", flags, max_active, 2); btrfs_init_workqueues() 2545 /* readahead state */ open_ctree()
|
H A D | ctree.c | 2231 * readahead one full node of leaves, finding things that are close
|
H A D | ioctl.c | 1282 * if we were not given a file, allocate a readahead btrfs_defrag_file()
|
H A D | send.c | 4395 /* initial readahead */ fill_read_buf()
|
H A D | extent-tree.c | 7732 /* We don't care about errors in readahead. */ reada_walk_down()
|
H A D | inode.c | 6727 * readahead btrfs_get_extent()
|
/linux-4.1.27/include/linux/ |
H A D | backing-dev.h | 64 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
|
H A D | page-flags.h | 431 * The PG_reclaim bit would have to be used for reclaim or readahead
|
H A D | fs.h | 807 * Track a single file's readahead state 810 pgoff_t start; /* where readahead started */ 811 unsigned int size; /* # of readahead pages */ 812 unsigned int async_size; /* do asynchronous readahead when 815 unsigned int ra_pages; /* Maximum readahead window */ 821 * Check if @index falls in the readahead windows.
|
H A D | fscache.h | 644 * readahead failed we need to clean up the pagelist (unmark and uncache).
|
H A D | mm.h | 1946 /* readahead.c */
|
/linux-4.1.27/tools/vm/ |
H A D | page-types.c | 148 [KPF_READAHEAD] = "I:readahead", 875 /* turn off readahead */ walk_file()
|
/linux-4.1.27/drivers/ide/ |
H A D | trm290.c | 89 * bits7-0 bits7-0 of readahead count
|
H A D | cmd640.c | 82 * change "readahead" to "prefetch" to avoid confusion
|
/linux-4.1.27/fs/isofs/ |
H A D | compress.c | 295 * cache as a form of readahead.
|
/linux-4.1.27/fs/ext2/ |
H A D | namei.c | 16 * TODO: get rid of kmap() use, add readahead.
|
/linux-4.1.27/fs/gfs2/ |
H A D | meta_io.c | 355 * gfs2_meta_ra - start readahead on an extent of a file
|
H A D | aops.c | 593 * 1. This is only for readahead, so we can simply ignore any things
|
/linux-4.1.27/arch/parisc/kernel/ |
H A D | syscall_table.S | 305 ENTRY_OURS(readahead)
|
/linux-4.1.27/fs/fat/ |
H A D | fatent.c | 669 /* readahead of fat blocks */ fat_count_free_clusters()
|
/linux-4.1.27/fs/ceph/ |
H A D | super.h | 48 #define CEPH_RASIZE_DEFAULT (8192*1024) /* readahead */ 59 int rasize; /* max readahead */
|
H A D | super.c | 886 * construct our own bdi so we can control readahead, etc.
|
/linux-4.1.27/drivers/staging/lustre/lustre/osc/ |
H A D | osc_cl_internal.h | 393 * read-ahead: for a readahead page, we hold it's covering lock to
|
/linux-4.1.27/fs/nfs/ |
H A D | internal.h | 15 /* Maximum number of readahead requests
|
/linux-4.1.27/fs/ext3/ |
H A D | namei.c | 851 int ra_max = 0; /* Number of bh's in the readahead ext3_find_entry() 853 int ra_ptr = 0; /* Current index into readahead ext3_find_entry() 896 /* Refill the readahead buffer */ ext3_find_entry()
|
/linux-4.1.27/fs/ubifs/ |
H A D | file.c | 48 * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not 49 * set as well. However, UBIFS disables readahead.
|
/linux-4.1.27/drivers/block/ |
H A D | sunvdc.c | 899 * a readahead I/O first, and once that fails it will try to read a vdc_port_probe()
|
H A D | cpqarray.c | 1142 * setting readahead and submitting commands from userspace to the controller.
|
/linux-4.1.27/arch/ia64/kernel/ |
H A D | fsys.S | 744 data8 0 // readahead
|
/linux-4.1.27/fs/fscache/ |
H A D | page.c | 740 * Unmark pages allocate in the readahead code path (via:
|
/linux-4.1.27/fs/ext4/ |
H A D | namei.c | 1367 int ra_max = 0; /* Number of bh's in the readahead ext4_find_entry() 1369 int ra_ptr = 0; /* Current index into readahead ext4_find_entry() 1425 /* Refill the readahead buffer */ ext4_find_entry()
|
H A D | inode.c | 3965 * If we need to do any I/O, try to pre-readahead extra __ext4_get_inode_loc()
|
/linux-4.1.27/fs/ |
H A D | splice.c | 343 * readahead/allocate the rest and fill in the holes. __generic_file_splice_read()
|
H A D | buffer.c | 3063 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
|
/linux-4.1.27/drivers/staging/lustre/lustre/ldlm/ |
H A D | ldlm_request.c | 1406 * readahead requests, ...)
|
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmfmac/ |
H A D | sdio.c | 2032 brcmf_err("readahead on control packet %d?\n", brcmf_sdio_readframes()
|
/linux-4.1.27/fs/cifs/ |
H A D | connect.c | 3521 /* tune readahead according to rsize */ cifs_mount()
|
H A D | file.c | 3337 * The VFS will not try to do readahead past the cifs_readpages_read_into_pages()
|