pvec 2251 arch/x86/kvm/mmu.c static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, pvec 2257 arch/x86/kvm/mmu.c for (i=0; i < pvec->nr; i++) pvec 2258 arch/x86/kvm/mmu.c if (pvec->page[i].sp == sp) pvec 2261 arch/x86/kvm/mmu.c pvec->page[pvec->nr].sp = sp; pvec 2262 arch/x86/kvm/mmu.c pvec->page[pvec->nr].idx = idx; pvec 2263 arch/x86/kvm/mmu.c pvec->nr++; pvec 2264 arch/x86/kvm/mmu.c return (pvec->nr == KVM_PAGE_ARRAY_NR); pvec 2275 arch/x86/kvm/mmu.c struct kvm_mmu_pages *pvec) pvec 2291 arch/x86/kvm/mmu.c if (mmu_pages_add(pvec, child, i)) pvec 2294 arch/x86/kvm/mmu.c ret = __mmu_unsync_walk(child, pvec); pvec 2304 arch/x86/kvm/mmu.c if (mmu_pages_add(pvec, child, i)) pvec 2316 arch/x86/kvm/mmu.c struct kvm_mmu_pages *pvec) pvec 2318 arch/x86/kvm/mmu.c pvec->nr = 0; pvec 2322 arch/x86/kvm/mmu.c mmu_pages_add(pvec, sp, INVALID_INDEX); pvec 2323 arch/x86/kvm/mmu.c return __mmu_unsync_walk(sp, pvec); pvec 2436 arch/x86/kvm/mmu.c #define for_each_sp(pvec, sp, parents, i) \ pvec 2437 arch/x86/kvm/mmu.c for (i = mmu_pages_first(&pvec, &parents); \ pvec 2438 arch/x86/kvm/mmu.c i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ pvec 2439 arch/x86/kvm/mmu.c i = mmu_pages_next(&pvec, &parents, i)) pvec 2441 arch/x86/kvm/mmu.c static int mmu_pages_next(struct kvm_mmu_pages *pvec, pvec 2447 arch/x86/kvm/mmu.c for (n = i+1; n < pvec->nr; n++) { pvec 2448 arch/x86/kvm/mmu.c struct kvm_mmu_page *sp = pvec->page[n].sp; pvec 2449 arch/x86/kvm/mmu.c unsigned idx = pvec->page[n].idx; pvec 2462 arch/x86/kvm/mmu.c static int mmu_pages_first(struct kvm_mmu_pages *pvec, pvec 2468 arch/x86/kvm/mmu.c if (pvec->nr == 0) pvec 2471 arch/x86/kvm/mmu.c WARN_ON(pvec->page[0].idx != INVALID_INDEX); pvec 2473 arch/x86/kvm/mmu.c sp = pvec->page[0].sp; pvec 2483 arch/x86/kvm/mmu.c return mmu_pages_next(pvec, parents, 0); pvec 525 drivers/gpu/drm/drm_gem.c static void drm_gem_check_release_pagevec(struct pagevec *pvec) pvec 527 drivers/gpu/drm/drm_gem.c check_move_unevictable_pages(pvec); pvec 528 drivers/gpu/drm/drm_gem.c __pagevec_release(pvec); pvec 557 drivers/gpu/drm/drm_gem.c struct pagevec pvec; pvec 596 drivers/gpu/drm/drm_gem.c pagevec_init(&pvec); pvec 598 drivers/gpu/drm/drm_gem.c if (!pagevec_add(&pvec, pages[i])) pvec 599 drivers/gpu/drm/drm_gem.c drm_gem_check_release_pagevec(&pvec); pvec 601 drivers/gpu/drm/drm_gem.c if (pagevec_count(&pvec)) pvec 602 drivers/gpu/drm/drm_gem.c drm_gem_check_release_pagevec(&pvec); pvec 621 drivers/gpu/drm/drm_gem.c struct pagevec pvec; pvec 634 drivers/gpu/drm/drm_gem.c pagevec_init(&pvec); pvec 646 drivers/gpu/drm/drm_gem.c if (!pagevec_add(&pvec, pages[i])) pvec 647 drivers/gpu/drm/drm_gem.c drm_gem_check_release_pagevec(&pvec); pvec 649 drivers/gpu/drm/drm_gem.c if (pagevec_count(&pvec)) pvec 650 drivers/gpu/drm/drm_gem.c drm_gem_check_release_pagevec(&pvec); pvec 660 drivers/gpu/drm/etnaviv/etnaviv_gem.c struct page **pvec = NULL; pvec 669 drivers/gpu/drm/etnaviv/etnaviv_gem.c pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); pvec 670 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (!pvec) pvec 676 drivers/gpu/drm/etnaviv/etnaviv_gem.c struct page **pages = pvec + pinned; pvec 681 drivers/gpu/drm/etnaviv/etnaviv_gem.c release_pages(pvec, pinned); pvec 682 drivers/gpu/drm/etnaviv/etnaviv_gem.c kvfree(pvec); pvec 690 drivers/gpu/drm/etnaviv/etnaviv_gem.c etnaviv_obj->pages = pvec; pvec 19 drivers/gpu/drm/i915/gem/i915_gem_shmem.c static void check_release_pagevec(struct pagevec *pvec) pvec 21 drivers/gpu/drm/i915/gem/i915_gem_shmem.c check_move_unevictable_pages(pvec); pvec 22 drivers/gpu/drm/i915/gem/i915_gem_shmem.c __pagevec_release(pvec); pvec 39 drivers/gpu/drm/i915/gem/i915_gem_shmem.c struct pagevec pvec; pvec 191 drivers/gpu/drm/i915/gem/i915_gem_shmem.c pagevec_init(&pvec); pvec 193 drivers/gpu/drm/i915/gem/i915_gem_shmem.c if (!pagevec_add(&pvec, page)) pvec 194 drivers/gpu/drm/i915/gem/i915_gem_shmem.c check_release_pagevec(&pvec); pvec 196 drivers/gpu/drm/i915/gem/i915_gem_shmem.c if (pagevec_count(&pvec)) pvec 197 drivers/gpu/drm/i915/gem/i915_gem_shmem.c check_release_pagevec(&pvec); pvec 297 drivers/gpu/drm/i915/gem/i915_gem_shmem.c struct pagevec pvec; pvec 309 drivers/gpu/drm/i915/gem/i915_gem_shmem.c pagevec_init(&pvec); pvec 317 drivers/gpu/drm/i915/gem/i915_gem_shmem.c if (!pagevec_add(&pvec, page)) pvec 318 drivers/gpu/drm/i915/gem/i915_gem_shmem.c check_release_pagevec(&pvec); pvec 320 drivers/gpu/drm/i915/gem/i915_gem_shmem.c if (pagevec_count(&pvec)) pvec 321 drivers/gpu/drm/i915/gem/i915_gem_shmem.c check_release_pagevec(&pvec); pvec 430 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct page **pvec, unsigned long num_pages) pvec 442 drivers/gpu/drm/i915/gem/i915_gem_userptr.c ret = __sg_alloc_table_from_pages(st, pvec, num_pages, pvec 478 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct page **pvec; pvec 484 drivers/gpu/drm/i915/gem/i915_gem_userptr.c pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); pvec 485 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (pvec != NULL) { pvec 501 drivers/gpu/drm/i915/gem/i915_gem_userptr.c pvec + pinned, NULL, NULL); pvec 517 drivers/gpu/drm/i915/gem/i915_gem_userptr.c pages = __i915_gem_userptr_alloc_pages(obj, pvec, pvec 531 drivers/gpu/drm/i915/gem/i915_gem_userptr.c release_pages(pvec, pinned); pvec 532 drivers/gpu/drm/i915/gem/i915_gem_userptr.c kvfree(pvec); pvec 584 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct page **pvec; pvec 614 drivers/gpu/drm/i915/gem/i915_gem_userptr.c pvec = NULL; pvec 618 drivers/gpu/drm/i915/gem/i915_gem_userptr.c pvec = kvmalloc_array(num_pages, sizeof(struct page *), pvec 630 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (pvec) /* defer to worker if malloc fails */ pvec 634 drivers/gpu/drm/i915/gem/i915_gem_userptr.c pvec); pvec 645 drivers/gpu/drm/i915/gem/i915_gem_userptr.c pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages); pvec 652 drivers/gpu/drm/i915/gem/i915_gem_userptr.c release_pages(pvec, pinned); pvec 653 drivers/gpu/drm/i915/gem/i915_gem_userptr.c kvfree(pvec); pvec 332 drivers/gpu/drm/i915/i915_gem_gtt.c pagevec_init(&stash->pvec); pvec 341 drivers/gpu/drm/i915/i915_gem_gtt.c if (likely(stash->pvec.nr)) pvec 342 drivers/gpu/drm/i915/i915_gem_gtt.c page = stash->pvec.pages[--stash->pvec.nr]; pvec 348 drivers/gpu/drm/i915/i915_gem_gtt.c static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) pvec 354 drivers/gpu/drm/i915/i915_gem_gtt.c nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec)); pvec 355 drivers/gpu/drm/i915/i915_gem_gtt.c memcpy(stash->pvec.pages + stash->pvec.nr, pvec 356 drivers/gpu/drm/i915/i915_gem_gtt.c pvec->pages + pvec->nr - nr, pvec 357 drivers/gpu/drm/i915/i915_gem_gtt.c sizeof(pvec->pages[0]) * nr); pvec 358 drivers/gpu/drm/i915/i915_gem_gtt.c stash->pvec.nr += nr; pvec 362 drivers/gpu/drm/i915/i915_gem_gtt.c pvec->nr -= nr; pvec 428 drivers/gpu/drm/i915/i915_gem_gtt.c struct pagevec *pvec = &vm->free_pages.pvec; pvec 432 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(!pagevec_count(pvec)); pvec 439 drivers/gpu/drm/i915/i915_gem_gtt.c stash_push_pagevec(&vm->i915->mm.wc_stash, pvec); pvec 447 drivers/gpu/drm/i915/i915_gem_gtt.c if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1)) pvec 455 drivers/gpu/drm/i915/i915_gem_gtt.c stack = *pvec; pvec 456 drivers/gpu/drm/i915/i915_gem_gtt.c pagevec_reinit(pvec); pvec 459 drivers/gpu/drm/i915/i915_gem_gtt.c pvec = &stack; pvec 460 drivers/gpu/drm/i915/i915_gem_gtt.c set_pages_array_wb(pvec->pages, pvec->nr); pvec 465 drivers/gpu/drm/i915/i915_gem_gtt.c __pagevec_release(pvec); pvec 479 drivers/gpu/drm/i915/i915_gem_gtt.c while (!pagevec_space(&vm->free_pages.pvec)) pvec 481 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE); pvec 482 drivers/gpu/drm/i915/i915_gem_gtt.c pagevec_add(&vm->free_pages.pvec, page); pvec 489 drivers/gpu/drm/i915/i915_gem_gtt.c if (pagevec_count(&vm->free_pages.pvec)) pvec 491 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); pvec 2788 drivers/gpu/drm/i915/i915_gem_gtt.c struct pagevec *pvec; pvec 2794 drivers/gpu/drm/i915/i915_gem_gtt.c pvec = &i915->mm.wc_stash.pvec; pvec 2795 drivers/gpu/drm/i915/i915_gem_gtt.c if (pvec->nr) { pvec 2796 drivers/gpu/drm/i915/i915_gem_gtt.c set_pages_array_wb(pvec->pages, pvec->nr); pvec 2797 drivers/gpu/drm/i915/i915_gem_gtt.c __pagevec_release(pvec); pvec 285 drivers/gpu/drm/i915/i915_gem_gtt.h struct pagevec pvec; pvec 1703 drivers/mtd/chips/cfi_cmdset_0001.c unsigned long adr, const struct kvec **pvec, pvec 1789 drivers/mtd/chips/cfi_cmdset_0001.c vec = *pvec; pvec 1819 drivers/mtd/chips/cfi_cmdset_0001.c *pvec = vec; pvec 377 drivers/mtd/lpddr/lpddr_cmds.c unsigned long adr, const struct kvec **pvec, pvec 409 drivers/mtd/lpddr/lpddr_cmds.c vec = *pvec; pvec 439 drivers/mtd/lpddr/lpddr_cmds.c *pvec = vec; pvec 3935 fs/btrfs/extent_io.c struct pagevec pvec; pvec 3942 fs/btrfs/extent_io.c pagevec_init(&pvec); pvec 3964 fs/btrfs/extent_io.c (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, pvec 3969 fs/btrfs/extent_io.c struct page *page = pvec.pages[i]; pvec 4028 fs/btrfs/extent_io.c pagevec_release(&pvec); pvec 4103 fs/btrfs/extent_io.c struct pagevec pvec; pvec 4124 fs/btrfs/extent_io.c pagevec_init(&pvec); pvec 4162 fs/btrfs/extent_io.c (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, pvec 4167 fs/btrfs/extent_io.c struct page *page = pvec.pages[i]; pvec 4215 fs/btrfs/extent_io.c pagevec_release(&pvec); pvec 1578 fs/buffer.c struct pagevec pvec; pvec 1586 fs/buffer.c pagevec_init(&pvec); pvec 1587 fs/buffer.c while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) { pvec 1588 fs/buffer.c count = pagevec_count(&pvec); pvec 1590 fs/buffer.c struct page *page = pvec.pages[i]; pvec 1619 fs/buffer.c pagevec_release(&pvec); pvec 788 fs/ceph/addr.c struct pagevec pvec; pvec 812 fs/ceph/addr.c pagevec_init(&pvec); pvec 870 fs/ceph/addr.c pvec_pages = pagevec_lookup_range_nr_tag(&pvec, mapping, &index, pvec 877 fs/ceph/addr.c page = pvec.pages[i]; pvec 997 fs/ceph/addr.c pvec.pages[i] = NULL; pvec 1009 fs/ceph/addr.c if (!pvec.pages[j]) pvec 1012 fs/ceph/addr.c pvec.pages[n] = pvec.pages[j]; pvec 1015 fs/ceph/addr.c pvec.nr = n; pvec 1020 fs/ceph/addr.c pagevec_release(&pvec); pvec 1145 fs/ceph/addr.c dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, pvec 1146 fs/ceph/addr.c pvec.nr ? pvec.pages[0] : NULL); pvec 1147 fs/ceph/addr.c pagevec_release(&pvec); pvec 1164 fs/ceph/addr.c (nr = pagevec_lookup_tag(&pvec, mapping, &index, pvec 1167 fs/ceph/addr.c page = pvec.pages[i]; pvec 1172 fs/ceph/addr.c pagevec_release(&pvec); pvec 1694 fs/ext4/inode.c struct pagevec pvec; pvec 1711 fs/ext4/inode.c pagevec_init(&pvec); pvec 1713 fs/ext4/inode.c nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end); pvec 1717 fs/ext4/inode.c struct page *page = pvec.pages[i]; pvec 1729 fs/ext4/inode.c pagevec_release(&pvec); pvec 2368 fs/ext4/inode.c struct pagevec pvec; pvec 2383 fs/ext4/inode.c pagevec_init(&pvec); pvec 2385 fs/ext4/inode.c nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, pvec 2390 fs/ext4/inode.c struct page *page = pvec.pages[i]; pvec 2412 fs/ext4/inode.c pagevec_release(&pvec); pvec 2433 fs/ext4/inode.c pagevec_release(&pvec); pvec 2437 fs/ext4/inode.c pagevec_release(&pvec); pvec 2634 fs/ext4/inode.c struct pagevec pvec; pvec 2650 fs/ext4/inode.c pagevec_init(&pvec); pvec 2654 fs/ext4/inode.c nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, pvec 2660 fs/ext4/inode.c struct page *page = pvec.pages[i]; pvec 2709 fs/ext4/inode.c pagevec_release(&pvec); pvec 2714 fs/ext4/inode.c pagevec_release(&pvec); pvec 367 fs/f2fs/checkpoint.c struct pagevec pvec; pvec 375 fs/f2fs/checkpoint.c pagevec_init(&pvec); pvec 379 fs/f2fs/checkpoint.c while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, pvec 384 fs/f2fs/checkpoint.c struct page *page = pvec.pages[i]; pvec 389 fs/f2fs/checkpoint.c pagevec_release(&pvec); pvec 419 fs/f2fs/checkpoint.c pagevec_release(&pvec); pvec 2252 fs/f2fs/data.c struct pagevec pvec; pvec 2266 fs/f2fs/data.c pagevec_init(&pvec); pvec 2300 fs/f2fs/data.c nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, pvec 2306 fs/f2fs/data.c struct page *page = pvec.pages[i]; pvec 2378 fs/f2fs/data.c pagevec_release(&pvec); pvec 1445 fs/f2fs/node.c struct pagevec pvec; pvec 1449 fs/f2fs/node.c pagevec_init(&pvec); pvec 1452 fs/f2fs/node.c while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, pvec 1457 fs/f2fs/node.c struct page *page = pvec.pages[i]; pvec 1461 fs/f2fs/node.c pagevec_release(&pvec); pvec 1492 fs/f2fs/node.c pagevec_release(&pvec); pvec 1654 fs/f2fs/node.c struct pagevec pvec; pvec 1668 fs/f2fs/node.c pagevec_init(&pvec); pvec 1671 fs/f2fs/node.c while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, pvec 1676 fs/f2fs/node.c struct page *page = pvec.pages[i]; pvec 1681 fs/f2fs/node.c pagevec_release(&pvec); pvec 1746 fs/f2fs/node.c pagevec_release(&pvec); pvec 1813 fs/f2fs/node.c struct pagevec pvec; pvec 1819 fs/f2fs/node.c pagevec_init(&pvec); pvec 1824 fs/f2fs/node.c while (!done && (nr_pages = pagevec_lookup_tag(&pvec, pvec 1829 fs/f2fs/node.c struct page *page = pvec.pages[i]; pvec 1904 fs/f2fs/node.c pagevec_release(&pvec); pvec 1219 fs/fscache/page.c struct pagevec pvec; pvec 1230 fs/fscache/page.c pagevec_init(&pvec); pvec 1233 fs/fscache/page.c if (!pagevec_lookup(&pvec, mapping, &next)) pvec 1235 fs/fscache/page.c for (i = 0; i < pagevec_count(&pvec); i++) { pvec 1236 fs/fscache/page.c struct page *page = pvec.pages[i]; pvec 1242 fs/fscache/page.c pagevec_release(&pvec); pvec 240 fs/gfs2/aops.c struct pagevec *pvec, pvec 255 fs/gfs2/aops.c struct page *page = pvec->pages[i]; pvec 338 fs/gfs2/aops.c struct pagevec pvec; pvec 348 fs/gfs2/aops.c pagevec_init(&pvec); pvec 374 fs/gfs2/aops.c nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, pvec 379 fs/gfs2/aops.c ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index); pvec 384 fs/gfs2/aops.c pagevec_release(&pvec); pvec 115 fs/hugetlbfs/inode.c static void huge_pagevec_release(struct pagevec *pvec) pvec 119 fs/hugetlbfs/inode.c for (i = 0; i < pagevec_count(pvec); ++i) pvec 120 fs/hugetlbfs/inode.c put_page(pvec->pages[i]); pvec 122 fs/hugetlbfs/inode.c pagevec_reinit(pvec); pvec 422 fs/hugetlbfs/inode.c struct pagevec pvec; pvec 429 fs/hugetlbfs/inode.c pagevec_init(&pvec); pvec 435 fs/hugetlbfs/inode.c if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1)) pvec 438 fs/hugetlbfs/inode.c for (i = 0; i < pagevec_count(&pvec); ++i) { pvec 439 fs/hugetlbfs/inode.c struct page *page = pvec.pages[i]; pvec 487 fs/hugetlbfs/inode.c huge_pagevec_release(&pvec); pvec 80 fs/iomap/seek.c struct pagevec pvec; pvec 85 fs/iomap/seek.c pagevec_init(&pvec); pvec 90 fs/iomap/seek.c nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index, pvec 96 fs/iomap/seek.c struct page *page = pvec.pages[i]; pvec 102 fs/iomap/seek.c pagevec_release(&pvec); pvec 115 fs/iomap/seek.c pagevec_release(&pvec); pvec 2140 fs/nilfs2/btree.c struct pagevec pvec; pvec 2150 fs/nilfs2/btree.c pagevec_init(&pvec); pvec 2152 fs/nilfs2/btree.c while (pagevec_lookup_tag(&pvec, btcache, &index, pvec 2154 fs/nilfs2/btree.c for (i = 0; i < pagevec_count(&pvec); i++) { pvec 2155 fs/nilfs2/btree.c bh = head = page_buffers(pvec.pages[i]); pvec 2162 fs/nilfs2/btree.c pagevec_release(&pvec); pvec 244 fs/nilfs2/page.c struct pagevec pvec; pvec 249 fs/nilfs2/page.c pagevec_init(&pvec); pvec 251 fs/nilfs2/page.c if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY)) pvec 254 fs/nilfs2/page.c for (i = 0; i < pagevec_count(&pvec); i++) { pvec 255 fs/nilfs2/page.c struct page *page = pvec.pages[i], *dpage; pvec 279 fs/nilfs2/page.c pagevec_release(&pvec); pvec 298 fs/nilfs2/page.c struct pagevec pvec; pvec 302 fs/nilfs2/page.c pagevec_init(&pvec); pvec 304 fs/nilfs2/page.c n = pagevec_lookup(&pvec, smap, &index); pvec 308 fs/nilfs2/page.c for (i = 0; i < pagevec_count(&pvec); i++) { pvec 309 fs/nilfs2/page.c struct page *page = pvec.pages[i], *dpage; pvec 348 fs/nilfs2/page.c pagevec_release(&pvec); pvec 361 fs/nilfs2/page.c struct pagevec pvec; pvec 365 fs/nilfs2/page.c pagevec_init(&pvec); pvec 367 fs/nilfs2/page.c while (pagevec_lookup_tag(&pvec, mapping, &index, pvec 369 fs/nilfs2/page.c for (i = 0; i < pagevec_count(&pvec); i++) { pvec 370 fs/nilfs2/page.c struct page *page = pvec.pages[i]; pvec 376 fs/nilfs2/page.c pagevec_release(&pvec); pvec 500 fs/nilfs2/page.c struct pagevec pvec; pvec 509 fs/nilfs2/page.c pagevec_init(&pvec); pvec 512 fs/nilfs2/page.c pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE, pvec 513 fs/nilfs2/page.c pvec.pages); pvec 514 fs/nilfs2/page.c if (pvec.nr == 0) pvec 517 fs/nilfs2/page.c if (length > 0 && pvec.pages[0]->index > index) pvec 520 fs/nilfs2/page.c b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits); pvec 523 fs/nilfs2/page.c page = pvec.pages[i]; pvec 549 fs/nilfs2/page.c } while (++i < pagevec_count(&pvec)); pvec 552 fs/nilfs2/page.c pagevec_release(&pvec); pvec 559 fs/nilfs2/page.c pagevec_release(&pvec); pvec 688 fs/nilfs2/segment.c struct pagevec pvec; pvec 702 fs/nilfs2/segment.c pagevec_init(&pvec); pvec 705 fs/nilfs2/segment.c !pagevec_lookup_range_tag(&pvec, mapping, &index, last, pvec 709 fs/nilfs2/segment.c for (i = 0; i < pagevec_count(&pvec); i++) { pvec 711 fs/nilfs2/segment.c struct page *page = pvec.pages[i]; pvec 726 fs/nilfs2/segment.c pagevec_release(&pvec); pvec 732 fs/nilfs2/segment.c pagevec_release(&pvec); pvec 742 fs/nilfs2/segment.c struct pagevec pvec; pvec 747 fs/nilfs2/segment.c pagevec_init(&pvec); pvec 749 fs/nilfs2/segment.c while (pagevec_lookup_tag(&pvec, mapping, &index, pvec 751 fs/nilfs2/segment.c for (i = 0; i < pagevec_count(&pvec); i++) { pvec 752 fs/nilfs2/segment.c bh = head = page_buffers(pvec.pages[i]); pvec 763 fs/nilfs2/segment.c pagevec_release(&pvec); pvec 615 include/linux/pagemap.h struct pagevec *pvec); pvec 26 include/linux/pagevec.h void __pagevec_release(struct pagevec *pvec); pvec 27 include/linux/pagevec.h void __pagevec_lru_add(struct pagevec *pvec); pvec 28 include/linux/pagevec.h unsigned pagevec_lookup_entries(struct pagevec *pvec, pvec 32 include/linux/pagevec.h void pagevec_remove_exceptionals(struct pagevec *pvec); pvec 33 include/linux/pagevec.h unsigned pagevec_lookup_range(struct pagevec *pvec, pvec 36 include/linux/pagevec.h static inline unsigned pagevec_lookup(struct pagevec *pvec, pvec 40 include/linux/pagevec.h return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1); pvec 43 include/linux/pagevec.h unsigned pagevec_lookup_range_tag(struct pagevec *pvec, pvec 46 include/linux/pagevec.h unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec, pvec 49 include/linux/pagevec.h static inline unsigned pagevec_lookup_tag(struct pagevec *pvec, pvec 52 include/linux/pagevec.h return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag); pvec 55 include/linux/pagevec.h static inline void pagevec_init(struct pagevec *pvec) pvec 57 include/linux/pagevec.h pvec->nr = 0; pvec 58 include/linux/pagevec.h pvec->percpu_pvec_drained = false; pvec 61 include/linux/pagevec.h static inline void pagevec_reinit(struct pagevec *pvec) pvec 63 include/linux/pagevec.h pvec->nr = 0; pvec 66 include/linux/pagevec.h static inline unsigned pagevec_count(struct pagevec *pvec) pvec 68 include/linux/pagevec.h return pvec->nr; pvec 71 include/linux/pagevec.h static inline unsigned pagevec_space(struct pagevec *pvec) pvec 73 include/linux/pagevec.h return PAGEVEC_SIZE - pvec->nr; pvec 79 include/linux/pagevec.h static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page) pvec 81 include/linux/pagevec.h pvec->pages[pvec->nr++] = page; pvec 82 include/linux/pagevec.h return pagevec_space(pvec); pvec 85 include/linux/pagevec.h static inline void pagevec_release(struct pagevec *pvec) pvec 87 include/linux/pagevec.h if (pagevec_count(pvec)) pvec 88 include/linux/pagevec.h __pagevec_release(pvec); pvec 378 include/linux/swap.h extern void check_move_unevictable_pages(struct pagevec *pvec); pvec 295 mm/filemap.c struct pagevec *pvec) pvec 297 mm/filemap.c XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); pvec 304 mm/filemap.c if (i >= pagevec_count(pvec)) pvec 317 mm/filemap.c if (page != pvec->pages[i]) { pvec 318 mm/filemap.c VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, pvec 343 mm/filemap.c struct pagevec *pvec) pvec 348 mm/filemap.c if (!pagevec_count(pvec)) pvec 352 mm/filemap.c for (i = 0; i < pagevec_count(pvec); i++) { pvec 353 mm/filemap.c trace_mm_filemap_delete_from_page_cache(pvec->pages[i]); pvec 355 mm/filemap.c unaccount_page_cache_page(mapping, pvec->pages[i]); pvec 357 mm/filemap.c page_cache_delete_batch(mapping, pvec); pvec 360 mm/filemap.c for (i = 0; i < pagevec_count(pvec); i++) pvec 361 mm/filemap.c page_cache_free_page(mapping, pvec->pages[i]); pvec 508 mm/filemap.c struct pagevec pvec; pvec 514 mm/filemap.c pagevec_init(&pvec); pvec 518 mm/filemap.c nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, pvec 524 mm/filemap.c struct page *page = pvec.pages[i]; pvec 529 mm/filemap.c pagevec_release(&pvec); pvec 246 mm/mlock.c static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, pvec 253 mm/mlock.c pagevec_add(pvec, page); pvec 269 mm/mlock.c static void __putback_lru_fast(struct pagevec *pvec, int pgrescued) pvec 271 mm/mlock.c count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec)); pvec 276 mm/mlock.c __pagevec_lru_add(pvec); pvec 290 mm/mlock.c static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) pvec 293 mm/mlock.c int nr = pagevec_count(pvec); pvec 303 mm/mlock.c struct page *page = pvec->pages[i]; pvec 324 mm/mlock.c pagevec_add(&pvec_putback, pvec->pages[i]); pvec 325 mm/mlock.c pvec->pages[i] = NULL; pvec 335 mm/mlock.c struct page *page = pvec->pages[i]; pvec 374 mm/mlock.c static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, pvec 420 mm/mlock.c if (pagevec_add(pvec, page) == 0) pvec 454 mm/mlock.c struct pagevec pvec; pvec 457 mm/mlock.c pagevec_init(&pvec); pvec 488 mm/mlock.c pagevec_add(&pvec, page); pvec 497 mm/mlock.c start = __munlock_pagevec_fill(&pvec, vma, pvec 499 mm/mlock.c __munlock_pagevec(&pvec, zone); pvec 2165 mm/page-writeback.c struct pagevec pvec; pvec 2174 mm/page-writeback.c pagevec_init(&pvec); pvec 2195 mm/page-writeback.c nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, pvec 2201 mm/page-writeback.c struct page *page = pvec.pages[i]; pvec 2277 mm/page-writeback.c pagevec_release(&pvec); pvec 766 mm/shmem.c struct pagevec pvec; pvec 770 mm/shmem.c pagevec_init(&pvec); pvec 779 mm/shmem.c pvec.nr = find_get_entries(mapping, index, pvec 780 mm/shmem.c PAGEVEC_SIZE, pvec.pages, indices); pvec 781 mm/shmem.c if (!pvec.nr) pvec 783 mm/shmem.c index = indices[pvec.nr - 1] + 1; pvec 784 mm/shmem.c pagevec_remove_exceptionals(&pvec); pvec 785 mm/shmem.c check_move_unevictable_pages(&pvec); pvec 786 mm/shmem.c pagevec_release(&pvec); pvec 804 mm/shmem.c struct pagevec pvec; pvec 813 mm/shmem.c pagevec_init(&pvec); pvec 816 mm/shmem.c pvec.nr = find_get_entries(mapping, index, pvec 818 mm/shmem.c pvec.pages, indices); pvec 819 mm/shmem.c if (!pvec.nr) pvec 821 mm/shmem.c for (i = 0; i < pagevec_count(&pvec); i++) { pvec 822 mm/shmem.c struct page *page = pvec.pages[i]; pvec 869 mm/shmem.c pagevec_remove_exceptionals(&pvec); pvec 870 mm/shmem.c pagevec_release(&pvec); pvec 907 mm/shmem.c pvec.nr = find_get_entries(mapping, index, pvec 909 mm/shmem.c pvec.pages, indices); pvec 910 mm/shmem.c if (!pvec.nr) { pvec 918 mm/shmem.c for (i = 0; i < pagevec_count(&pvec); i++) { pvec 919 mm/shmem.c struct page *page = pvec.pages[i]; pvec 979 mm/shmem.c pagevec_remove_exceptionals(&pvec); pvec 980 mm/shmem.c pagevec_release(&pvec); pvec 1168 mm/shmem.c static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec, pvec 1176 mm/shmem.c for (i = 0; i < pvec.nr; i++) { pvec 1177 mm/shmem.c struct page *page = pvec.pages[i]; pvec 1205 mm/shmem.c struct pagevec pvec; pvec 1210 mm/shmem.c pagevec_init(&pvec); pvec 1217 mm/shmem.c pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries, pvec 1218 mm/shmem.c pvec.pages, indices, pvec 1220 mm/shmem.c if (pvec.nr == 0) { pvec 1225 mm/shmem.c ret = shmem_unuse_swap_entries(inode, pvec, indices); pvec 1237 mm/shmem.c start = indices[pvec.nr - 1]; pvec 2646 mm/shmem.c struct pagevec pvec; pvec 2651 mm/shmem.c pagevec_init(&pvec); pvec 2652 mm/shmem.c pvec.nr = 1; /* start small: we may be there already */ pvec 2654 mm/shmem.c pvec.nr = find_get_entries(mapping, index, pvec 2655 mm/shmem.c pvec.nr, pvec.pages, indices); pvec 2656 mm/shmem.c if (!pvec.nr) { pvec 2661 mm/shmem.c for (i = 0; i < pvec.nr; i++, index++) { pvec 2669 mm/shmem.c page = pvec.pages[i]; pvec 2681 mm/shmem.c pagevec_remove_exceptionals(&pvec); pvec 2682 mm/shmem.c pagevec_release(&pvec); pvec 2683 mm/shmem.c pvec.nr = PAGEVEC_SIZE; pvec 190 mm/swap.c static void pagevec_lru_move_fn(struct pagevec *pvec, pvec 199 mm/swap.c for (i = 0; i < pagevec_count(pvec); i++) { pvec 200 mm/swap.c struct page *page = pvec->pages[i]; pvec 215 mm/swap.c release_pages(pvec->pages, pvec->nr); pvec 216 mm/swap.c pagevec_reinit(pvec); pvec 236 mm/swap.c static void pagevec_move_tail(struct pagevec *pvec) pvec 240 mm/swap.c pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); pvec 253 mm/swap.c struct pagevec *pvec; pvec 258 mm/swap.c pvec = this_cpu_ptr(&lru_rotate_pvecs); pvec 259 mm/swap.c if (!pagevec_add(pvec, page) || PageCompound(page)) pvec 260 mm/swap.c pagevec_move_tail(pvec); pvec 296 mm/swap.c struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); pvec 298 mm/swap.c if (pagevec_count(pvec)) pvec 299 mm/swap.c pagevec_lru_move_fn(pvec, __activate_page, NULL); pvec 311 mm/swap.c struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); pvec 314 mm/swap.c if (!pagevec_add(pvec, page) || PageCompound(page)) pvec 315 mm/swap.c pagevec_lru_move_fn(pvec, __activate_page, NULL); pvec 338 mm/swap.c struct pagevec *pvec = &get_cpu_var(lru_add_pvec); pvec 351 mm/swap.c for (i = pagevec_count(pvec) - 1; i >= 0; i--) { pvec 352 mm/swap.c struct page *pagevec_page = pvec->pages[i]; pvec 402 mm/swap.c struct pagevec *pvec = &get_cpu_var(lru_add_pvec); pvec 405 mm/swap.c if (!pagevec_add(pvec, page) || PageCompound(page)) pvec 406 mm/swap.c __pagevec_lru_add(pvec); pvec 591 mm/swap.c struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); pvec 593 mm/swap.c if (pagevec_count(pvec)) pvec 594 mm/swap.c __pagevec_lru_add(pvec); pvec 596 mm/swap.c pvec = &per_cpu(lru_rotate_pvecs, cpu); pvec 597 mm/swap.c if (pagevec_count(pvec)) { pvec 602 mm/swap.c pagevec_move_tail(pvec); pvec 606 mm/swap.c pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); pvec 607 mm/swap.c if (pagevec_count(pvec)) pvec 608 mm/swap.c pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); pvec 610 mm/swap.c pvec = &per_cpu(lru_deactivate_pvecs, cpu); pvec 611 mm/swap.c if (pagevec_count(pvec)) pvec 612 mm/swap.c pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); pvec 614 mm/swap.c pvec = &per_cpu(lru_lazyfree_pvecs, cpu); pvec 615 mm/swap.c if (pagevec_count(pvec)) pvec 616 mm/swap.c pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); pvec 639 mm/swap.c struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); pvec 641 mm/swap.c if (!pagevec_add(pvec, page) || PageCompound(page)) pvec 642 mm/swap.c pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); pvec 658 mm/swap.c struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); pvec 661 mm/swap.c if (!pagevec_add(pvec, page) || PageCompound(page)) pvec 662 mm/swap.c pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); pvec 678 mm/swap.c struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); pvec 681 mm/swap.c if (!pagevec_add(pvec, page) || PageCompound(page)) pvec 682 mm/swap.c pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); pvec 856 mm/swap.c void __pagevec_release(struct pagevec *pvec) pvec 858 mm/swap.c if (!pvec->percpu_pvec_drained) { pvec 860 mm/swap.c pvec->percpu_pvec_drained = true; pvec 862 mm/swap.c release_pages(pvec->pages, pagevec_count(pvec)); pvec 863 mm/swap.c pagevec_reinit(pvec); pvec 964 mm/swap.c void __pagevec_lru_add(struct pagevec *pvec) pvec 966 mm/swap.c pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL); pvec 990 mm/swap.c unsigned pagevec_lookup_entries(struct pagevec *pvec, pvec 995 mm/swap.c pvec->nr = find_get_entries(mapping, start, nr_entries, pvec 996 mm/swap.c pvec->pages, indices); pvec 997 mm/swap.c return pagevec_count(pvec); pvec 1009 mm/swap.c void pagevec_remove_exceptionals(struct pagevec *pvec) pvec 1013 mm/swap.c for (i = 0, j = 0; i < pagevec_count(pvec); i++) { pvec 1014 mm/swap.c struct page *page = pvec->pages[i]; pvec 1016 mm/swap.c pvec->pages[j++] = page; pvec 1018 mm/swap.c pvec->nr = j; pvec 1041 mm/swap.c unsigned pagevec_lookup_range(struct pagevec *pvec, pvec 1044 mm/swap.c pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, pvec 1045 mm/swap.c pvec->pages); pvec 1046 mm/swap.c return pagevec_count(pvec); pvec 1050 mm/swap.c unsigned pagevec_lookup_range_tag(struct pagevec *pvec, pvec 1054 mm/swap.c pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, pvec 1055 mm/swap.c PAGEVEC_SIZE, pvec->pages); pvec 1056 mm/swap.c return pagevec_count(pvec); pvec 1060 mm/swap.c unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec, pvec 1064 mm/swap.c pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, pvec 1065 mm/swap.c min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages); pvec 1066 mm/swap.c return pagevec_count(pvec); pvec 60 mm/truncate.c struct pagevec *pvec, pgoff_t *indices, pvec 70 mm/truncate.c for (j = 0; j < pagevec_count(pvec); j++) pvec 71 mm/truncate.c if (xa_is_value(pvec->pages[j])) pvec 74 mm/truncate.c if (j == pagevec_count(pvec)) pvec 82 mm/truncate.c for (i = j; i < pagevec_count(pvec); i++) { pvec 83 mm/truncate.c struct page *page = pvec->pages[i]; pvec 87 mm/truncate.c pvec->pages[j++] = page; pvec 104 mm/truncate.c pvec->nr = j; pvec 298 mm/truncate.c struct pagevec pvec; pvec 327 mm/truncate.c pagevec_init(&pvec); pvec 329 mm/truncate.c while (index < end && pagevec_lookup_entries(&pvec, mapping, index, pvec 340 mm/truncate.c for (i = 0; i < pagevec_count(&pvec); i++) { pvec 341 mm/truncate.c struct page *page = pvec.pages[i]; pvec 369 mm/truncate.c truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); pvec 370 mm/truncate.c pagevec_release(&pvec); pvec 416 mm/truncate.c if (!pagevec_lookup_entries(&pvec, mapping, index, pvec 427 mm/truncate.c pagevec_remove_exceptionals(&pvec); pvec 428 mm/truncate.c pagevec_release(&pvec); pvec 432 mm/truncate.c for (i = 0; i < pagevec_count(&pvec); i++) { pvec 433 mm/truncate.c struct page *page = pvec.pages[i]; pvec 452 mm/truncate.c truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); pvec 453 mm/truncate.c pagevec_release(&pvec); pvec 550 mm/truncate.c struct pagevec pvec; pvec 556 mm/truncate.c pagevec_init(&pvec); pvec 557 mm/truncate.c while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, pvec 560 mm/truncate.c for (i = 0; i < pagevec_count(&pvec); i++) { pvec 561 mm/truncate.c struct page *page = pvec.pages[i]; pvec 603 mm/truncate.c pagevec_remove_exceptionals(&pvec); pvec 604 mm/truncate.c pagevec_release(&pvec); pvec 619 mm/truncate.c pagevec_remove_exceptionals(&pvec); pvec 620 mm/truncate.c pagevec_release(&pvec); pvec 688 mm/truncate.c struct pagevec pvec; pvec 698 mm/truncate.c pagevec_init(&pvec); pvec 700 mm/truncate.c while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, pvec 703 mm/truncate.c for (i = 0; i < pagevec_count(&pvec); i++) { pvec 704 mm/truncate.c struct page *page = pvec.pages[i]; pvec 751 mm/truncate.c pagevec_remove_exceptionals(&pvec); pvec 752 mm/truncate.c pagevec_release(&pvec); pvec 4338 mm/vmscan.c void check_move_unevictable_pages(struct pagevec *pvec) pvec 4346 mm/vmscan.c for (i = 0; i < pvec->nr; i++) { pvec 4347 mm/vmscan.c struct page *page = pvec->pages[i];