page_list 163 arch/arm/kernel/machine_kexec.c unsigned long page_list, reboot_entry_phys; page_list 175 arch/arm/kernel/machine_kexec.c page_list = image->head & PAGE_MASK; page_list 182 arch/arm/kernel/machine_kexec.c kexec_indirection_page = page_list; page_list 32 arch/powerpc/kernel/machine_kexec_32.c unsigned long page_list; page_list 43 arch/powerpc/kernel/machine_kexec_32.c page_list = image->head; page_list 59 arch/powerpc/kernel/machine_kexec_32.c relocate_new_kernel(page_list, reboot_code_buffer_phys, image->start); page_list 63 arch/powerpc/kernel/machine_kexec_32.c (*rnk)(page_list, reboot_code_buffer_phys, image->start); page_list 73 arch/sh/kernel/machine_kexec.c unsigned long page_list; page_list 103 arch/sh/kernel/machine_kexec.c page_list = image->head; page_list 120 arch/sh/kernel/machine_kexec.c (*rnk)(page_list, reboot_code_buffer, page_list 133 arch/x86/include/asm/kexec.h unsigned long page_list, page_list 175 arch/x86/kernel/machine_kexec_32.c unsigned long page_list[PAGES_NR]; page_list 213 arch/x86/kernel/machine_kexec_32.c page_list[PA_CONTROL_PAGE] = __pa(control_page); page_list 214 arch/x86/kernel/machine_kexec_32.c page_list[VA_CONTROL_PAGE] = (unsigned long)control_page; page_list 215 arch/x86/kernel/machine_kexec_32.c page_list[PA_PGD] = __pa(image->arch.pgd); page_list 218 arch/x86/kernel/machine_kexec_32.c page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) page_list 241 arch/x86/kernel/machine_kexec_32.c (unsigned long)page_list, page_list 375 arch/x86/kernel/machine_kexec_64.c unsigned long page_list[PAGES_NR]; page_list 406 arch/x86/kernel/machine_kexec_64.c page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page); page_list 407 arch/x86/kernel/machine_kexec_64.c page_list[VA_CONTROL_PAGE] = (unsigned long)control_page; page_list 408 arch/x86/kernel/machine_kexec_64.c page_list[PA_TABLE_PAGE] = page_list 412 arch/x86/kernel/machine_kexec_64.c page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) page_list 435 arch/x86/kernel/machine_kexec_64.c (unsigned long)page_list, page_list 21 block/blk-mq-tag.h struct list_head page_list; page_list 2078 block/blk-mq.c while (!list_empty(&tags->page_list)) { page_list 2079 block/blk-mq.c page = list_first_entry(&tags->page_list, struct page, lru); page_list 2168 block/blk-mq.c INIT_LIST_HEAD(&tags->page_list); page_list 2203 block/blk-mq.c list_add_tail(&page->lru, &tags->page_list); page_list 3376 drivers/dma/ste_dma40.c unsigned long *page_list; page_list 3385 drivers/dma/ste_dma40.c page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS, page_list 3386 drivers/dma/ste_dma40.c sizeof(*page_list), page_list 3388 drivers/dma/ste_dma40.c if (!page_list) page_list 3395 drivers/dma/ste_dma40.c page_list[i] = __get_free_pages(GFP_KERNEL, page_list 3397 drivers/dma/ste_dma40.c if (!page_list[i]) { page_list 3404 drivers/dma/ste_dma40.c free_pages(page_list[j], base->lcla_pool.pages); page_list 3408 drivers/dma/ste_dma40.c if ((virt_to_phys((void *)page_list[i]) & page_list 3414 drivers/dma/ste_dma40.c free_pages(page_list[j], base->lcla_pool.pages); page_list 3417 drivers/dma/ste_dma40.c base->lcla_pool.base = (void *)page_list[i]; page_list 3452 drivers/dma/ste_dma40.c kfree(page_list); page_list 123 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c struct list_head page_list; page_list 366 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c list_del(&d_page->page_list); page_list 376 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c list_for_each_entry_safe(d_page, tmp, d_pages, page_list) page_list 388 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { page_list 389 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c list_del(&d_page->page_list); page_list 436 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c page_list) { page_list 441 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c list_move(&dma_p->page_list, &d_pages); page_list 679 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { page_list 683 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c list_del(&d_page->page_list); page_list 745 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c list_add(&dma_p->page_list, d_pages); page_list 818 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c list_for_each_entry(d_page, &d_pages, page_list) { page_list 846 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); page_list 849 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c list_move_tail(&d_page->page_list, &ttm_dma->pages_list); page_list 1010 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c page_list) { page_list 1040 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c page_list) { page_list 128 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_add_tail(&page->lru, &ctx->page_list); page_list 151 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_for_each_entry_safe(entry, next, &ctx->page_list, lru) { page_list 80 drivers/gpu/drm/vmwgfx/vmwgfx_validation.h struct list_head page_list; page_list 112 drivers/gpu/drm/vmwgfx/vmwgfx_validation.h .page_list = LIST_HEAD_INIT((_name).page_list), \ page_list 116 drivers/infiniband/core/fmr_pool.c u64 *page_list, page_list 126 drivers/infiniband/core/fmr_pool.c bucket = pool->cache_bucket + ib_fmr_hash(*page_list); page_list 131 drivers/infiniband/core/fmr_pool.c !memcmp(page_list, fmr->page_list, page_list 132 drivers/infiniband/core/fmr_pool.c page_list_len * sizeof *page_list)) page_list 393 drivers/infiniband/core/fmr_pool.c u64 *page_list, page_list 407 drivers/infiniband/core/fmr_pool.c page_list, page_list 432 drivers/infiniband/core/fmr_pool.c result = ib_map_phys_fmr(fmr->fmr, page_list, list_len, page_list 451 drivers/infiniband/core/fmr_pool.c memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list)); page_list 455 drivers/infiniband/core/fmr_pool.c pool->cache_bucket + ib_fmr_hash(fmr->page_list[0])); page_list 74 drivers/infiniband/core/umem.c struct page **page_list, page_list 88 drivers/infiniband/core/umem.c page_to_pfn(page_list[0]))) page_list 93 drivers/infiniband/core/umem.c struct page *first_page = page_list[i]; page_list 101 drivers/infiniband/core/umem.c first_pfn + len == page_to_pfn(page_list[i]) && page_list 198 drivers/infiniband/core/umem.c struct page **page_list; page_list 244 drivers/infiniband/core/umem.c page_list = (struct page **) __get_free_page(GFP_KERNEL); page_list 245 drivers/infiniband/core/umem.c if (!page_list) { page_list 282 drivers/infiniband/core/umem.c page_list, NULL); page_list 291 drivers/infiniband/core/umem.c sg = ib_umem_add_sg_table(sg, page_list, ret, page_list 319 drivers/infiniband/core/umem.c free_page((unsigned long) page_list); page_list 242 drivers/infiniband/core/umem_odp.c umem_odp->page_list = kvcalloc( page_list 243 drivers/infiniband/core/umem_odp.c pages, sizeof(*umem_odp->page_list), GFP_KERNEL); page_list 244 drivers/infiniband/core/umem_odp.c if (!umem_odp->page_list) page_list 279 drivers/infiniband/core/umem_odp.c kvfree(umem_odp->page_list); page_list 459 drivers/infiniband/core/umem_odp.c kvfree(umem_odp->page_list); page_list 532 drivers/infiniband/core/umem_odp.c umem_odp->page_list[page_index] = page; page_list 534 drivers/infiniband/core/umem_odp.c } else if (umem_odp->page_list[page_index] == page) { page_list 538 drivers/infiniband/core/umem_odp.c umem_odp->page_list[page_index], page); page_list 735 drivers/infiniband/core/umem_odp.c if (umem_odp->page_list[idx]) { page_list 736 drivers/infiniband/core/umem_odp.c struct page *page = umem_odp->page_list[idx]; page_list 758 drivers/infiniband/core/umem_odp.c umem_odp->page_list[idx] = NULL; page_list 2181 drivers/infiniband/hw/bnxt_re/ib_verbs.c wqe->frmr.page_list = mr->pages; page_list 122 drivers/infiniband/hw/bnxt_re/ib_verbs.h u64 *page_list; page_list 1724 drivers/infiniband/hw/bnxt_re/qplib_fp.c wqe->frmr.page_list[i] | page_list 206 drivers/infiniband/hw/bnxt_re/qplib_fp.h u64 *page_list; page_list 362 drivers/infiniband/hw/cxgb3/iwch_provider.c __be64 *page_list; page_list 387 drivers/infiniband/hw/cxgb3/iwch_provider.c page_list = kmalloc_array(npages, sizeof(u64), GFP_KERNEL); page_list 388 drivers/infiniband/hw/cxgb3/iwch_provider.c if (!page_list) { page_list 394 drivers/infiniband/hw/cxgb3/iwch_provider.c page_list[i] = cpu_to_be64((u64)i << shift); page_list 401 drivers/infiniband/hw/cxgb3/iwch_provider.c kfree(page_list); page_list 405 drivers/infiniband/hw/cxgb3/iwch_provider.c ret = iwch_write_pbl(mhp, page_list, npages, 0); page_list 406 drivers/infiniband/hw/cxgb3/iwch_provider.c kfree(page_list); page_list 1047 drivers/infiniband/hw/efa/efa_verbs.c u64 *page_list, page_list 1060 drivers/infiniband/hw/efa/efa_verbs.c page_list[hp_idx++] = rdma_block_iter_dma_address(&biter); page_list 170 drivers/infiniband/hw/hns/hns_roce_alloc.c if (buf->page_list[i].buf) page_list 172 drivers/infiniband/hw/hns/hns_roce_alloc.c buf->page_list[i].buf, page_list 173 drivers/infiniband/hw/hns/hns_roce_alloc.c buf->page_list[i].map); page_list 174 drivers/infiniband/hw/hns/hns_roce_alloc.c kfree(buf->page_list); page_list 214 drivers/infiniband/hw/hns/hns_roce_alloc.c buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), page_list 217 drivers/infiniband/hw/hns/hns_roce_alloc.c if (!buf->page_list) page_list 221 drivers/infiniband/hw/hns/hns_roce_alloc.c buf->page_list[i].buf = dma_alloc_coherent(dev, page_list 226 drivers/infiniband/hw/hns/hns_roce_alloc.c if (!buf->page_list[i].buf) page_list 229 drivers/infiniband/hw/hns/hns_roce_alloc.c buf->page_list[i].map = t; page_list 260 drivers/infiniband/hw/hns/hns_roce_alloc.c bufs[total++] = buf->page_list[i].map; page_list 22 drivers/infiniband/hw/hns/hns_roce_db.c list_for_each_entry(page, &context->page_list, list) page_list 41 drivers/infiniband/hw/hns/hns_roce_db.c list_add(&page->list, &context->page_list); page_list 285 drivers/infiniband/hw/hns/hns_roce_device.h struct list_head page_list; page_list 451 drivers/infiniband/hw/hns/hns_roce_device.h struct hns_roce_buf_list *page_list; page_list 1117 drivers/infiniband/hw/hns/hns_roce_device.h return (char *)(buf->page_list[offset >> buf->page_shift].buf) + page_list 328 drivers/infiniband/hw/hns/hns_roce_main.c INIT_LIST_HEAD(&context->page_list); page_list 788 drivers/infiniband/hw/hns/hns_roce_mr.c u32 npages, u64 *page_list) page_list 835 drivers/infiniband/hw/hns/hns_roce_mr.c mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT); page_list 837 drivers/infiniband/hw/hns/hns_roce_mr.c mtts[i] = cpu_to_le64(page_list[i]); page_list 845 drivers/infiniband/hw/hns/hns_roce_mr.c u32 npages, u64 *page_list) page_list 878 drivers/infiniband/hw/hns/hns_roce_mr.c page_list); page_list 884 drivers/infiniband/hw/hns/hns_roce_mr.c page_list += chunk; page_list 893 drivers/infiniband/hw/hns/hns_roce_mr.c u64 *page_list; page_list 897 drivers/infiniband/hw/hns/hns_roce_mr.c page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL); page_list 898 drivers/infiniband/hw/hns/hns_roce_mr.c if (!page_list) page_list 903 drivers/infiniband/hw/hns/hns_roce_mr.c page_list[i] = buf->direct.map + (i << buf->page_shift); page_list 905 drivers/infiniband/hw/hns/hns_roce_mr.c page_list[i] = buf->page_list[i].map; page_list 908 drivers/infiniband/hw/hns/hns_roce_mr.c ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list); page_list 910 drivers/infiniband/hw/hns/hns_roce_mr.c kfree(page_list); page_list 799 drivers/infiniband/hw/mlx4/mlx4_ib.h int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages, page_list 735 drivers/infiniband/hw/mlx4/mr.c int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, page_list 741 drivers/infiniband/hw/mlx4/mr.c return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, page_list 122 drivers/infiniband/hw/mthca/mthca_allocator.c if (array->page_list[p].page) page_list 123 drivers/infiniband/hw/mthca/mthca_allocator.c return array->page_list[p].page[index & MTHCA_ARRAY_MASK]; page_list 133 drivers/infiniband/hw/mthca/mthca_allocator.c if (!array->page_list[p].page) page_list 134 drivers/infiniband/hw/mthca/mthca_allocator.c array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC); page_list 136 drivers/infiniband/hw/mthca/mthca_allocator.c if (!array->page_list[p].page) page_list 139 drivers/infiniband/hw/mthca/mthca_allocator.c array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value; page_list 140 drivers/infiniband/hw/mthca/mthca_allocator.c ++array->page_list[p].used; page_list 149 drivers/infiniband/hw/mthca/mthca_allocator.c if (--array->page_list[p].used == 0) { page_list 150 drivers/infiniband/hw/mthca/mthca_allocator.c free_page((unsigned long) array->page_list[p].page); page_list 151 drivers/infiniband/hw/mthca/mthca_allocator.c array->page_list[p].page = NULL; page_list 153 drivers/infiniband/hw/mthca/mthca_allocator.c array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL; page_list 155 drivers/infiniband/hw/mthca/mthca_allocator.c if (array->page_list[p].used < 0) page_list 157 drivers/infiniband/hw/mthca/mthca_allocator.c array, index, p, array->page_list[p].used); page_list 165 drivers/infiniband/hw/mthca/mthca_allocator.c array->page_list = kmalloc_array(npage, sizeof(*array->page_list), page_list 167 drivers/infiniband/hw/mthca/mthca_allocator.c if (!array->page_list) page_list 171 drivers/infiniband/hw/mthca/mthca_allocator.c array->page_list[i].page = NULL; page_list 172 drivers/infiniband/hw/mthca/mthca_allocator.c array->page_list[i].used = 0; page_list 183 drivers/infiniband/hw/mthca/mthca_allocator.c free_page((unsigned long) array->page_list[i].page); page_list 185 drivers/infiniband/hw/mthca/mthca_allocator.c kfree(array->page_list); page_list 239 drivers/infiniband/hw/mthca/mthca_allocator.c buf->page_list = kmalloc_array(npages, page_list 240 drivers/infiniband/hw/mthca/mthca_allocator.c sizeof(*buf->page_list), page_list 242 drivers/infiniband/hw/mthca/mthca_allocator.c if (!buf->page_list) page_list 246 drivers/infiniband/hw/mthca/mthca_allocator.c buf->page_list[i].buf = NULL; page_list 249 drivers/infiniband/hw/mthca/mthca_allocator.c buf->page_list[i].buf = page_list 252 drivers/infiniband/hw/mthca/mthca_allocator.c if (!buf->page_list[i].buf) page_list 256 drivers/infiniband/hw/mthca/mthca_allocator.c dma_unmap_addr_set(&buf->page_list[i], mapping, t); page_list 258 drivers/infiniband/hw/mthca/mthca_allocator.c clear_page(buf->page_list[i].buf); page_list 298 drivers/infiniband/hw/mthca/mthca_allocator.c buf->page_list[i].buf, page_list 299 drivers/infiniband/hw/mthca/mthca_allocator.c dma_unmap_addr(&buf->page_list[i], page_list 301 drivers/infiniband/hw/mthca/mthca_allocator.c kfree(buf->page_list); page_list 165 drivers/infiniband/hw/mthca/mthca_cq.c return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf page_list 191 drivers/infiniband/hw/mthca/mthca_dev.h } *page_list; page_list 483 drivers/infiniband/hw/mthca/mthca_dev.h int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, page_list 486 drivers/infiniband/hw/mthca/mthca_dev.h int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, page_list 231 drivers/infiniband/hw/mthca/mthca_eq.c return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; page_list 482 drivers/infiniband/hw/mthca/mthca_eq.c eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list), page_list 484 drivers/infiniband/hw/mthca/mthca_eq.c if (!eq->page_list) page_list 488 drivers/infiniband/hw/mthca/mthca_eq.c eq->page_list[i].buf = NULL; page_list 500 drivers/infiniband/hw/mthca/mthca_eq.c eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, page_list 502 drivers/infiniband/hw/mthca/mthca_eq.c if (!eq->page_list[i].buf) page_list 506 drivers/infiniband/hw/mthca/mthca_eq.c dma_unmap_addr_set(&eq->page_list[i], mapping, t); page_list 508 drivers/infiniband/hw/mthca/mthca_eq.c clear_page(eq->page_list[i].buf); page_list 572 drivers/infiniband/hw/mthca/mthca_eq.c if (eq->page_list[i].buf) page_list 574 drivers/infiniband/hw/mthca/mthca_eq.c eq->page_list[i].buf, page_list 575 drivers/infiniband/hw/mthca/mthca_eq.c dma_unmap_addr(&eq->page_list[i], page_list 581 drivers/infiniband/hw/mthca/mthca_eq.c kfree(eq->page_list); page_list 621 drivers/infiniband/hw/mthca/mthca_eq.c eq->page_list[i].buf, page_list 622 drivers/infiniband/hw/mthca/mthca_eq.c dma_unmap_addr(&eq->page_list[i], mapping)); page_list 624 drivers/infiniband/hw/mthca/mthca_eq.c kfree(eq->page_list); page_list 689 drivers/infiniband/hw/mthca/mthca_mr.c static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, page_list 706 drivers/infiniband/hw/mthca/mthca_mr.c if (page_list[i] & ~page_mask) page_list 717 drivers/infiniband/hw/mthca/mthca_mr.c int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, page_list 726 drivers/infiniband/hw/mthca/mthca_mr.c err = mthca_check_fmr(fmr, page_list, list_len, iova); page_list 739 drivers/infiniband/hw/mthca/mthca_mr.c __be64 mtt_entry = cpu_to_be64(page_list[i] | page_list 758 drivers/infiniband/hw/mthca/mthca_mr.c int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, page_list 766 drivers/infiniband/hw/mthca/mthca_mr.c err = mthca_check_fmr(fmr, page_list, list_len, iova); page_list 787 drivers/infiniband/hw/mthca/mthca_mr.c fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] | page_list 54 drivers/infiniband/hw/mthca/mthca_provider.h struct mthca_buf_list *page_list; page_list 114 drivers/infiniband/hw/mthca/mthca_provider.h struct mthca_buf_list *page_list; page_list 213 drivers/infiniband/hw/mthca/mthca_qp.c return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + page_list 223 drivers/infiniband/hw/mthca/mthca_qp.c return qp->queue.page_list[(qp->send_wqe_offset + page_list 79 drivers/infiniband/hw/mthca/mthca_srq.c return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + page_list 51 drivers/infiniband/hw/usnic/usnic_uiom.c ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\ page_list 52 drivers/infiniband/hw/usnic/usnic_uiom.c ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \ page_list 53 drivers/infiniband/hw/usnic/usnic_uiom.c (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0])) page_list 75 drivers/infiniband/hw/usnic/usnic_uiom.c for_each_sg(chunk->page_list, sg, chunk->nents, i) { page_list 89 drivers/infiniband/hw/usnic/usnic_uiom.c struct page **page_list; page_list 119 drivers/infiniband/hw/usnic/usnic_uiom.c page_list = (struct page **) __get_free_page(GFP_KERNEL); page_list 120 drivers/infiniband/hw/usnic/usnic_uiom.c if (!page_list) page_list 148 drivers/infiniband/hw/usnic/usnic_uiom.c page_list, NULL); page_list 157 drivers/infiniband/hw/usnic/usnic_uiom.c chunk = kmalloc(struct_size(chunk, page_list, page_list 166 drivers/infiniband/hw/usnic/usnic_uiom.c sg_init_table(chunk->page_list, chunk->nents); page_list 167 drivers/infiniband/hw/usnic/usnic_uiom.c for_each_sg(chunk->page_list, sg, chunk->nents, i) { page_list 168 drivers/infiniband/hw/usnic/usnic_uiom.c sg_set_page(sg, page_list[i + off], page_list 191 drivers/infiniband/hw/usnic/usnic_uiom.c free_page((unsigned long) page_list); page_list 265 drivers/infiniband/hw/usnic/usnic_uiom.c pa = sg_phys(&chunk->page_list[i]); page_list 80 drivers/infiniband/hw/usnic/usnic_uiom.h struct scatterlist page_list[0]; page_list 546 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h u64 *page_list, int num_pages); page_list 207 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c u64 *page_list, page_list 217 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c ret = pvrdma_page_dir_insert_dma(pdir, i, page_list[i]); page_list 784 drivers/infiniband/sw/rdmavt/mr.c int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, page_list 811 drivers/infiniband/sw/rdmavt/mr.c fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i]; page_list 813 drivers/infiniband/sw/rdmavt/mr.c trace_rvt_mr_fmr_seg(&fmr->mr, m, n, (void *)page_list[i], ps); page_list 88 drivers/infiniband/sw/rdmavt/mr.h int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, page_list 165 drivers/md/dm-integrity.c struct page_list *journal; page_list 166 drivers/md/dm-integrity.c struct page_list *journal_io; page_list 167 drivers/md/dm-integrity.c struct page_list *journal_xor; page_list 168 drivers/md/dm-integrity.c struct page_list *recalc_bitmap; page_list 169 drivers/md/dm-integrity.c struct page_list *may_write_bitmap; page_list 500 drivers/md/dm-integrity.c static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap, page_list 605 drivers/md/dm-integrity.c static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src) page_list 654 drivers/md/dm-integrity.c static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl, page_list 788 drivers/md/dm-integrity.c struct page_list *source_pl, *target_pl; page_list 3145 drivers/md/dm-integrity.c static void dm_integrity_free_page_list(struct page_list *pl) page_list 3156 drivers/md/dm-integrity.c static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages) page_list 3158 drivers/md/dm-integrity.c struct page_list *pl; page_list 3161 drivers/md/dm-integrity.c pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO); page_list 3189 drivers/md/dm-integrity.c struct page_list *pl) page_list 3322 drivers/md/dm-integrity.c journal_desc_size = journal_pages * sizeof(struct page_list); page_list 183 drivers/md/dm-io.c struct page_list *pl = (struct page_list *) dp->context_ptr; page_list 192 drivers/md/dm-io.c struct page_list *pl = (struct page_list *) dp->context_ptr; page_list 197 drivers/md/dm-io.c static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) page_list 58 drivers/md/dm-kcopyd.c struct page_list *pages; page_list 93 drivers/md/dm-kcopyd.c static struct page_list zero_page_list; page_list 214 drivers/md/dm-kcopyd.c static struct page_list *alloc_pl(gfp_t gfp) page_list 216 drivers/md/dm-kcopyd.c struct page_list *pl; page_list 231 drivers/md/dm-kcopyd.c static void free_pl(struct page_list *pl) page_list 241 drivers/md/dm-kcopyd.c static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) page_list 243 drivers/md/dm-kcopyd.c struct page_list *next; page_list 261 drivers/md/dm-kcopyd.c unsigned int nr, struct page_list **pages) page_list 263 drivers/md/dm-kcopyd.c struct page_list *pl; page_list 292 drivers/md/dm-kcopyd.c static void drop_pages(struct page_list *pl) page_list 294 drivers/md/dm-kcopyd.c struct page_list *next; page_list 309 drivers/md/dm-kcopyd.c struct page_list *pl = NULL, *next; page_list 364 drivers/md/dm-kcopyd.c struct page_list *pages; page_list 170 drivers/misc/genwqe/card_base.h struct page **page_list; /* list of pages used by user buff */ page_list 240 drivers/misc/genwqe/card_utils.c struct page **page_list, int num_pages, page_list 251 drivers/misc/genwqe/card_utils.c daddr = pci_map_page(pci_dev, page_list[i], page_list 526 drivers/misc/genwqe/card_utils.c static int genwqe_free_user_pages(struct page **page_list, page_list 532 drivers/misc/genwqe/card_utils.c if (page_list[i] != NULL) { page_list 534 drivers/misc/genwqe/card_utils.c set_page_dirty_lock(page_list[i]); page_list 535 drivers/misc/genwqe/card_utils.c put_page(page_list[i]); page_list 587 drivers/misc/genwqe/card_utils.c m->page_list = kcalloc(m->nr_pages, page_list 590 drivers/misc/genwqe/card_utils.c if (!m->page_list) { page_list 597 drivers/misc/genwqe/card_utils.c m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages); page_list 603 drivers/misc/genwqe/card_utils.c m->page_list); /* ptrs to pages */ page_list 609 drivers/misc/genwqe/card_utils.c genwqe_free_user_pages(m->page_list, rc, m->write); page_list 614 drivers/misc/genwqe/card_utils.c rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list); page_list 621 drivers/misc/genwqe/card_utils.c genwqe_free_user_pages(m->page_list, m->nr_pages, m->write); page_list 624 drivers/misc/genwqe/card_utils.c kfree(m->page_list); page_list 625 drivers/misc/genwqe/card_utils.c m->page_list = NULL; page_list 652 drivers/misc/genwqe/card_utils.c if (m->page_list) { page_list 653 drivers/misc/genwqe/card_utils.c genwqe_free_user_pages(m->page_list, m->nr_pages, m->write); page_list 655 drivers/misc/genwqe/card_utils.c kfree(m->page_list); page_list 656 drivers/misc/genwqe/card_utils.c m->page_list = NULL; page_list 924 drivers/misc/vmw_balloon.c static void vmballoon_release_page_list(struct list_head *page_list, page_list 930 drivers/misc/vmw_balloon.c list_for_each_entry_safe(page, tmp, page_list, lru) { page_list 338 drivers/net/ethernet/google/gve/gve_adminq.c __be64 *page_list; page_list 343 drivers/net/ethernet/google/gve/gve_adminq.c page_list = dma_alloc_coherent(hdev, size, &page_list_bus, GFP_KERNEL); page_list 344 drivers/net/ethernet/google/gve/gve_adminq.c if (!page_list) page_list 348 drivers/net/ethernet/google/gve/gve_adminq.c page_list[i] = cpu_to_be64(qpl->page_buses[i]); page_list 358 drivers/net/ethernet/google/gve/gve_adminq.c dma_free_coherent(hdev, size, page_list, page_list_bus); page_list 619 drivers/net/ethernet/mellanox/mlx4/alloc.c buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), page_list 621 drivers/net/ethernet/mellanox/mlx4/alloc.c if (!buf->page_list) page_list 625 drivers/net/ethernet/mellanox/mlx4/alloc.c buf->page_list[i].buf = page_list 628 drivers/net/ethernet/mellanox/mlx4/alloc.c if (!buf->page_list[i].buf) page_list 631 drivers/net/ethernet/mellanox/mlx4/alloc.c buf->page_list[i].map = t; page_list 653 drivers/net/ethernet/mellanox/mlx4/alloc.c if (buf->page_list[i].buf) page_list 656 drivers/net/ethernet/mellanox/mlx4/alloc.c buf->page_list[i].buf, page_list 657 drivers/net/ethernet/mellanox/mlx4/alloc.c buf->page_list[i].map); page_list 658 drivers/net/ethernet/mellanox/mlx4/alloc.c kfree(buf->page_list); page_list 337 drivers/net/ethernet/mellanox/mlx4/cq.c memset(buf->page_list[i].buf, 0xcc, page_list 118 drivers/net/ethernet/mellanox/mlx4/eq.c return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; page_list 989 drivers/net/ethernet/mellanox/mlx4/eq.c eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list), page_list 991 drivers/net/ethernet/mellanox/mlx4/eq.c if (!eq->page_list) page_list 995 drivers/net/ethernet/mellanox/mlx4/eq.c eq->page_list[i].buf = NULL; page_list 1007 drivers/net/ethernet/mellanox/mlx4/eq.c eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> page_list 1011 drivers/net/ethernet/mellanox/mlx4/eq.c if (!eq->page_list[i].buf) page_list 1015 drivers/net/ethernet/mellanox/mlx4/eq.c eq->page_list[i].map = t; page_list 1073 drivers/net/ethernet/mellanox/mlx4/eq.c if (eq->page_list[i].buf) page_list 1075 drivers/net/ethernet/mellanox/mlx4/eq.c eq->page_list[i].buf, page_list 1076 drivers/net/ethernet/mellanox/mlx4/eq.c eq->page_list[i].map); page_list 1081 drivers/net/ethernet/mellanox/mlx4/eq.c kfree(eq->page_list); page_list 1109 drivers/net/ethernet/mellanox/mlx4/eq.c eq->page_list[i].buf, page_list 1110 drivers/net/ethernet/mellanox/mlx4/eq.c eq->page_list[i].map); page_list 1112 drivers/net/ethernet/mellanox/mlx4/eq.c kfree(eq->page_list); page_list 399 drivers/net/ethernet/mellanox/mlx4/mlx4.h struct mlx4_buf_list *page_list; page_list 1032 drivers/net/ethernet/mellanox/mlx4/mlx4.h int start_index, int npages, u64 *page_list); page_list 693 drivers/net/ethernet/mellanox/mlx4/mr.c int start_index, int npages, u64 *page_list) page_list 710 drivers/net/ethernet/mellanox/mlx4/mr.c mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); page_list 719 drivers/net/ethernet/mellanox/mlx4/mr.c int start_index, int npages, u64 *page_list) page_list 734 drivers/net/ethernet/mellanox/mlx4/mr.c err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); page_list 739 drivers/net/ethernet/mellanox/mlx4/mr.c page_list += chunk; page_list 747 drivers/net/ethernet/mellanox/mlx4/mr.c int start_index, int npages, u64 *page_list) page_list 770 drivers/net/ethernet/mellanox/mlx4/mr.c inbox[i + 2] = cpu_to_be64(page_list[i] | page_list 780 drivers/net/ethernet/mellanox/mlx4/mr.c page_list += chunk; page_list 786 drivers/net/ethernet/mellanox/mlx4/mr.c return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); page_list 793 drivers/net/ethernet/mellanox/mlx4/mr.c u64 *page_list; page_list 797 drivers/net/ethernet/mellanox/mlx4/mr.c page_list = kcalloc(buf->npages, sizeof(*page_list), GFP_KERNEL); page_list 798 drivers/net/ethernet/mellanox/mlx4/mr.c if (!page_list) page_list 803 drivers/net/ethernet/mellanox/mlx4/mr.c page_list[i] = buf->direct.map + (i << buf->page_shift); page_list 805 drivers/net/ethernet/mellanox/mlx4/mr.c page_list[i] = buf->page_list[i].map; page_list 807 drivers/net/ethernet/mellanox/mlx4/mr.c err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); page_list 809 drivers/net/ethernet/mellanox/mlx4/mr.c kfree(page_list); page_list 969 drivers/net/ethernet/mellanox/mlx4/mr.c static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, page_list 986 drivers/net/ethernet/mellanox/mlx4/mr.c if (page_list[i] & ~page_mask) page_list 996 drivers/net/ethernet/mellanox/mlx4/mr.c int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, page_list 1002 drivers/net/ethernet/mellanox/mlx4/mr.c err = mlx4_check_fmr(fmr, page_list, npages, iova); page_list 1021 drivers/net/ethernet/mellanox/mlx4/mr.c fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); page_list 3269 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c __be64 *page_list = inbox->buf; page_list 3270 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c u64 *pg_list = (u64 *)page_list; page_list 3273 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c int start = be64_to_cpu(page_list[0]); page_list 3289 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL); page_list 3291 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, page_list 3292 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c ((u64 *)page_list + 2)); page_list 541 drivers/rapidio/devices/rio_mport_cdev.c struct page **page_list; page_list 580 drivers/rapidio/devices/rio_mport_cdev.c if (req->page_list) { page_list 582 drivers/rapidio/devices/rio_mport_cdev.c put_page(req->page_list[i]); page_list 583 drivers/rapidio/devices/rio_mport_cdev.c kfree(req->page_list); page_list 814 drivers/rapidio/devices/rio_mport_cdev.c struct page **page_list = NULL; page_list 858 drivers/rapidio/devices/rio_mport_cdev.c page_list = kmalloc_array(nr_pages, page_list 859 drivers/rapidio/devices/rio_mport_cdev.c sizeof(*page_list), GFP_KERNEL); page_list 860 drivers/rapidio/devices/rio_mport_cdev.c if (page_list == NULL) { page_list 869 drivers/rapidio/devices/rio_mport_cdev.c page_list); page_list 888 drivers/rapidio/devices/rio_mport_cdev.c ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages, page_list 895 drivers/rapidio/devices/rio_mport_cdev.c req->page_list = page_list; page_list 953 drivers/rapidio/devices/rio_mport_cdev.c if (!req->page_list) { page_list 955 drivers/rapidio/devices/rio_mport_cdev.c put_page(page_list[i]); page_list 956 drivers/rapidio/devices/rio_mport_cdev.c kfree(page_list); page_list 14481 drivers/scsi/lpfc/lpfc_sli.c while (!list_empty(&queue->page_list)) { page_list 14482 drivers/scsi/lpfc/lpfc_sli.c list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, page_list 14539 drivers/scsi/lpfc/lpfc_sli.c INIT_LIST_HEAD(&queue->page_list); page_list 14567 drivers/scsi/lpfc/lpfc_sli.c list_add_tail(&dmabuf->list, &queue->page_list); page_list 14825 drivers/scsi/lpfc/lpfc_sli.c list_for_each_entry(dmabuf, &eq->page_list, list) { page_list 14959 drivers/scsi/lpfc/lpfc_sli.c list_for_each_entry(dmabuf, &cq->page_list, list) { page_list 15211 drivers/scsi/lpfc/lpfc_sli.c list_for_each_entry(dmabuf, &cq->page_list, list) { page_list 15305 drivers/scsi/lpfc/lpfc_sli.c list_for_each_entry(dmabuf, &mq->page_list, list) { page_list 15416 drivers/scsi/lpfc/lpfc_sli.c list_for_each_entry(dmabuf, &mq->page_list, list) { page_list 15578 drivers/scsi/lpfc/lpfc_sli.c list_for_each_entry(dmabuf, &wq->page_list, list) { page_list 15841 drivers/scsi/lpfc/lpfc_sli.c list_for_each_entry(dmabuf, &hrq->page_list, list) { page_list 15984 drivers/scsi/lpfc/lpfc_sli.c list_for_each_entry(dmabuf, &drq->page_list, list) { page_list 16139 drivers/scsi/lpfc/lpfc_sli.c list_for_each_entry(dmabuf, &hrq->page_list, list) { page_list 16151 drivers/scsi/lpfc/lpfc_sli.c list_for_each_entry(dmabuf, &drq->page_list, list) { page_list 158 drivers/scsi/lpfc/lpfc_sli4.h struct list_head page_list; page_list 29 drivers/staging/comedi/comedi_buf.c if (bm->page_list) { page_list 35 drivers/staging/comedi/comedi_buf.c buf = &bm->page_list[0]; page_list 41 drivers/staging/comedi/comedi_buf.c buf = &bm->page_list[i]; page_list 46 drivers/staging/comedi/comedi_buf.c vfree(bm->page_list); page_list 93 drivers/staging/comedi/comedi_buf.c bm->page_list = vzalloc(sizeof(*buf) * n_pages); page_list 94 drivers/staging/comedi/comedi_buf.c if (!bm->page_list) page_list 112 drivers/staging/comedi/comedi_buf.c buf = &bm->page_list[i]; page_list 120 drivers/staging/comedi/comedi_buf.c buf = &bm->page_list[i]; page_list 170 drivers/staging/comedi/comedi_buf.c buf = &bm->page_list[0]; page_list 178 drivers/staging/comedi/comedi_buf.c buf = &bm->page_list[i]; page_list 213 drivers/staging/comedi/comedi_buf.c void *b = bm->page_list[pg].virt_addr + pgoff; page_list 2370 drivers/staging/comedi/comedi_fops.c buf = &bm->page_list[0]; page_list 2377 drivers/staging/comedi/comedi_fops.c buf = &bm->page_list[i]; page_list 251 drivers/staging/comedi/comedidev.h struct comedi_buf_page *page_list; page_list 673 drivers/staging/comedi/drivers/mite.c desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr); page_list 683 drivers/staging/comedi/drivers/mite.c desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr); page_list 350 drivers/virt/vboxguest/vboxguest_utils.c dst_parm->u.page_list.size = len; page_list 351 drivers/virt/vboxguest/vboxguest_utils.c dst_parm->u.page_list.offset = *off_extra; page_list 569 drivers/virt/vboxguest/vboxguest_utils.c dst_parm->u.page_list.size = src_parm->u.page_list.size; page_list 308 fs/ceph/addr.c struct list_head *page_list, int max) page_list 313 fs/ceph/addr.c struct page *page = lru_to_page(page_list); page_list 340 fs/ceph/addr.c while (!list_empty(page_list)) { page_list 341 fs/ceph/addr.c page = lru_to_page(page_list); page_list 353 fs/ceph/addr.c list_for_each_entry_reverse(page, page_list, lru) { page_list 383 fs/ceph/addr.c page = list_entry(page_list->prev, struct page, lru); page_list 442 fs/ceph/addr.c struct list_head *page_list, unsigned nr_pages) page_list 454 fs/ceph/addr.c rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list, page_list 464 fs/ceph/addr.c while (!list_empty(page_list)) { page_list 465 fs/ceph/addr.c rc = start_read(inode, rw_ctx, page_list, max); page_list 470 fs/ceph/addr.c ceph_fscache_readpages_cancel(inode, page_list); page_list 4226 fs/cifs/file.c readpages_get_pages(struct address_space *mapping, struct list_head *page_list, page_list 4237 fs/cifs/file.c page = lru_to_page(page_list); page_list 4262 fs/cifs/file.c list_for_each_entry_safe_reverse(page, tpage, page_list, lru) { page_list 4285 fs/cifs/file.c struct list_head *page_list, unsigned num_pages) page_list 4303 fs/cifs/file.c rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, page_list 4332 fs/cifs/file.c while (!list_empty(page_list)) { page_list 4365 fs/cifs/file.c rc = readpages_get_pages(mapping, page_list, rsize, &tmplist, page_list 4431 fs/cifs/file.c cifs_fscache_readpages_cancel(mapping->host, page_list); page_list 495 fs/nfs/internal.h void nfs_retry_commit(struct list_head *page_list, page_list 1777 fs/nfs/write.c void nfs_retry_commit(struct list_head *page_list, page_list 1784 fs/nfs/write.c while (!list_empty(page_list)) { page_list 1785 fs/nfs/write.c req = nfs_list_entry(page_list->next); page_list 24 include/linux/dm-io.h struct page_list *next; page_list 43 include/linux/dm-io.h struct page_list *pl; page_list 386 include/linux/memcontrol.h void mem_cgroup_uncharge_list(struct list_head *page_list); page_list 897 include/linux/memcontrol.h static inline void mem_cgroup_uncharge_list(struct list_head *page_list) page_list 644 include/linux/mlx4/device.h struct mlx4_buf_list *page_list; page_list 1098 include/linux/mlx4/device.h return buf->page_list[offset >> PAGE_SHIFT].buf + page_list 1126 include/linux/mlx4/device.h int start_index, int npages, u64 *page_list); page_list 1415 include/linux/mlx4/device.h int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, page_list 368 include/linux/swap.h extern unsigned long reclaim_pages(struct list_head *page_list); page_list 76 include/rdma/ib_fmr_pool.h u64 page_list[0]; page_list 87 include/rdma/ib_fmr_pool.h u64 *page_list, page_list 49 include/rdma/ib_umem_odp.h struct page **page_list; page_list 2420 include/rdma/ib_verbs.h int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len, page_list 4192 include/rdma/ib_verbs.h u64 *page_list, int list_len, page_list 4195 include/rdma/ib_verbs.h return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova); page_list 235 include/uapi/linux/vbox_vmmdev_types.h } page_list; page_list 258 include/uapi/linux/vbox_vmmdev_types.h } page_list; page_list 43 mm/dmapool.c struct list_head page_list; page_list 54 mm/dmapool.c struct list_head page_list; page_list 86 mm/dmapool.c list_for_each_entry(page, &pool->page_list, page_list) { page_list 165 mm/dmapool.c INIT_LIST_HEAD(&retval->page_list); page_list 257 mm/dmapool.c list_del(&page->page_list); page_list 286 mm/dmapool.c while (!list_empty(&pool->page_list)) { page_list 288 mm/dmapool.c page = list_entry(pool->page_list.next, page_list 289 mm/dmapool.c struct dma_page, page_list); page_list 299 mm/dmapool.c list_del(&page->page_list); page_list 330 mm/dmapool.c list_for_each_entry(page, &pool->page_list, page_list) { page_list 344 mm/dmapool.c list_add(&page->page_list, &pool->page_list); page_list 392 mm/dmapool.c list_for_each_entry(page, &pool->page_list, page_list) { page_list 501 mm/internal.h struct list_head *page_list); page_list 312 mm/madvise.c LIST_HEAD(page_list); page_list 372 mm/madvise.c list_add(&page->lru, &page_list); page_list 379 mm/madvise.c reclaim_pages(&page_list); page_list 458 mm/madvise.c list_add(&page->lru, &page_list); page_list 467 mm/madvise.c reclaim_pages(&page_list); page_list 6742 mm/memcontrol.c static void uncharge_list(struct list_head *page_list) page_list 6753 mm/memcontrol.c next = page_list->next; page_list 6761 mm/memcontrol.c } while (next != page_list); page_list 6797 mm/memcontrol.c void mem_cgroup_uncharge_list(struct list_head *page_list) page_list 6802 mm/memcontrol.c if (!list_empty(page_list)) page_list 6803 mm/memcontrol.c uncharge_list(page_list); page_list 1119 mm/vmscan.c static unsigned long shrink_page_list(struct list_head *page_list, page_list 1134 mm/vmscan.c while (!list_empty(page_list)) { page_list 1144 mm/vmscan.c page = lru_to_page(page_list); page_list 1264 mm/vmscan.c list_add_tail(&page->lru, page_list); page_list 1303 mm/vmscan.c page_list)) page_list 1311 mm/vmscan.c page_list)) page_list 1327 mm/vmscan.c if (split_huge_page_to_list(page, page_list)) page_list 1530 mm/vmscan.c list_splice(&ret_pages, page_list); page_list 1537 mm/vmscan.c struct list_head *page_list) page_list 1549 mm/vmscan.c list_for_each_entry_safe(page, next, page_list, lru) { page_list 1559 mm/vmscan.c list_splice(&clean_pages, page_list); page_list 1951 mm/vmscan.c LIST_HEAD(page_list); page_list 1979 mm/vmscan.c nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list, page_list 1994 mm/vmscan.c nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0, page_list 2006 mm/vmscan.c move_pages_to_lru(lruvec, &page_list); page_list 2012 mm/vmscan.c mem_cgroup_uncharge_list(&page_list); page_list 2013 mm/vmscan.c free_unref_page_list(&page_list); page_list 2146 mm/vmscan.c unsigned long reclaim_pages(struct list_head *page_list) page_list 2161 mm/vmscan.c while (!list_empty(page_list)) { page_list 2162 mm/vmscan.c page = lru_to_page(page_list);