Searched refs:pagevec_count (Results 1 – 17 of 17) sorted by relevance
427 for (i = 0; i < pagevec_count(pvec); i++) { in pagevec_lru_move_fn()527 if (pagevec_count(pvec)) in activate_page_drain()533 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; in need_activate_page_drain()583 for (i = pagevec_count(pvec) - 1; i >= 0; i--) { in __lru_cache_activate_page()811 if (pagevec_count(pvec)) in lru_add_drain_cpu()815 if (pagevec_count(pvec)) { in lru_add_drain_cpu()825 if (pagevec_count(pvec)) in lru_add_drain_cpu()883 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || in lru_add_drain_all()884 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || in lru_add_drain_all()885 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || in lru_add_drain_all()[all …]
260 for (i = 0; i < pagevec_count(&pvec); i++) { in truncate_inode_pages_range()346 for (i = 0; i < pagevec_count(&pvec); i++) { in truncate_inode_pages_range()468 for (i = 0; i < pagevec_count(&pvec); i++) { in invalidate_mapping_pages()580 for (i = 0; i < pagevec_count(&pvec); i++) { in invalidate_inode_pages2_range()
257 count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec)); in __putback_lru_fast()279 int nr = pagevec_count(pvec); in __munlock_pagevec()311 delta_munlocked = -nr + pagevec_count(&pvec_putback); in __munlock_pagevec()342 if (pagevec_count(&pvec_putback)) in __munlock_pagevec()
422 for (i = 0; i < pagevec_count(&pvec); i++) { in shmem_undo_range()496 for (i = 0; i < pagevec_count(&pvec); i++) { in shmem_undo_range()
47 static inline unsigned pagevec_count(struct pagevec *pvec) in pagevec_count() function68 if (pagevec_count(pvec)) in pagevec_release()
269 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_copy_dirty_pages()325 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_copy_back_pages()387 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_clear_dirty_pages()569 } while (++i < pagevec_count(&pvec)); in nilfs_find_uncommitted_extent()
713 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_lookup_dirty_data_buffers()758 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_lookup_dirty_node_buffers()
2152 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_btree_lookup_dirty_buffers()
278 PAGEVEC_SIZE - pagevec_count(&pvec)); in nfs_fscache_inode_now_uncached()
310 PAGEVEC_SIZE - pagevec_count(&pvec)); in cifs_fscache_inode_now_uncached()
160 PAGEVEC_SIZE - pagevec_count(&pvec)); in ceph_fscache_inode_now_uncached()
167 PAGEVEC_SIZE - pagevec_count(&pvec)); in v9fs_cache_inode_now_uncached()
756 if (pagevec_count(&pagevec) > 0) in cachefiles_read_or_alloc_pages()854 if (pagevec_count(&pagevec) > 0) in cachefiles_allocate_pages()
387 PAGEVEC_SIZE - pagevec_count(&pvec)); in afs_vnode_cache_now_uncached()
115 for (i = 0; i < pagevec_count(pvec); ++i) in huge_pagevec_release()380 for (i = 0; i < pagevec_count(&pvec); ++i) { in remove_inode_hugepages()
1181 for (i = 0; i < pagevec_count(&pvec); i++) { in __fscache_uncache_all_inode_pages()
846 for (i = 0; i < pagevec_count(&pvec); i++) { in xfs_cluster_write()