Searched refs:page_list (Results 1 - 77 of 77) sorted by relevance

/linux-4.4.14/drivers/infiniband/hw/mthca/
H A Dmthca_allocator.c122 if (array->page_list[p].page) mthca_array_get()
123 return array->page_list[p].page[index & MTHCA_ARRAY_MASK]; mthca_array_get()
133 if (!array->page_list[p].page) mthca_array_set()
134 array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC); mthca_array_set()
136 if (!array->page_list[p].page) mthca_array_set()
139 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value; mthca_array_set()
140 ++array->page_list[p].used; mthca_array_set()
149 if (--array->page_list[p].used == 0) { mthca_array_clear()
150 free_page((unsigned long) array->page_list[p].page); mthca_array_clear()
151 array->page_list[p].page = NULL; mthca_array_clear()
153 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL; mthca_array_clear()
155 if (array->page_list[p].used < 0) mthca_array_clear()
157 array, index, p, array->page_list[p].used); mthca_array_clear()
165 array->page_list = kmalloc(npage * sizeof *array->page_list, GFP_KERNEL); mthca_array_init()
166 if (!array->page_list) mthca_array_init()
170 array->page_list[i].page = NULL; mthca_array_init()
171 array->page_list[i].used = 0; mthca_array_init()
182 free_page((unsigned long) array->page_list[i].page); mthca_array_cleanup()
184 kfree(array->page_list); mthca_array_cleanup()
238 buf->page_list = kmalloc(npages * sizeof *buf->page_list, mthca_buf_alloc()
240 if (!buf->page_list) mthca_buf_alloc()
244 buf->page_list[i].buf = NULL; mthca_buf_alloc()
247 buf->page_list[i].buf = mthca_buf_alloc()
250 if (!buf->page_list[i].buf) mthca_buf_alloc()
254 dma_unmap_addr_set(&buf->page_list[i], mapping, t); mthca_buf_alloc()
256 clear_page(buf->page_list[i].buf); mthca_buf_alloc()
296 buf->page_list[i].buf, mthca_buf_free()
297 dma_unmap_addr(&buf->page_list[i], mthca_buf_free()
299 kfree(buf->page_list); mthca_buf_free()
H A Dmthca_eq.c231 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; get_eqe()
482 eq->page_list = kmalloc(npages * sizeof *eq->page_list, mthca_create_eq()
484 if (!eq->page_list) mthca_create_eq()
488 eq->page_list[i].buf = NULL; mthca_create_eq()
500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, mthca_create_eq()
502 if (!eq->page_list[i].buf) mthca_create_eq()
506 dma_unmap_addr_set(&eq->page_list[i], mapping, t); mthca_create_eq()
508 clear_page(eq->page_list[i].buf); mthca_create_eq()
572 if (eq->page_list[i].buf) mthca_create_eq()
574 eq->page_list[i].buf, mthca_create_eq()
575 dma_unmap_addr(&eq->page_list[i], mthca_create_eq()
581 kfree(eq->page_list); mthca_create_eq()
621 eq->page_list[i].buf, mthca_free_eq()
622 dma_unmap_addr(&eq->page_list[i], mapping)); mthca_free_eq()
624 kfree(eq->page_list); mthca_free_eq()
H A Dmthca_mr.c689 static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, mthca_check_fmr() argument
703 /* Trust the user not to pass misaligned data in page_list */ mthca_check_fmr()
706 if (page_list[i] & ~page_mask) mthca_check_fmr()
717 int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, mthca_tavor_map_phys_fmr() argument
726 err = mthca_check_fmr(fmr, page_list, list_len, iova); mthca_tavor_map_phys_fmr()
739 __be64 mtt_entry = cpu_to_be64(page_list[i] | mthca_tavor_map_phys_fmr()
758 int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, mthca_arbel_map_phys_fmr() argument
766 err = mthca_check_fmr(fmr, page_list, list_len, iova); mthca_arbel_map_phys_fmr()
787 fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] | mthca_arbel_map_phys_fmr()
H A Dmthca_provider.c908 u64 *page_list; mthca_reg_phys_mr() local
946 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL); mthca_reg_phys_mr()
947 if (!page_list) { mthca_reg_phys_mr()
957 page_list[n++] = buffer_list[i].addr + ((u64) j << shift); mthca_reg_phys_mr()
968 page_list, shift, npages, mthca_reg_phys_mr()
973 kfree(page_list); mthca_reg_phys_mr()
978 kfree(page_list); mthca_reg_phys_mr()
H A Dmthca_provider.h54 struct mthca_buf_list *page_list; member in union:mthca_buf
114 struct mthca_buf_list *page_list; member in struct:mthca_eq
H A Dmthca_dev.h191 } *page_list; member in struct:mthca_array
483 int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
486 int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
H A Dmthca_srq.c77 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + get_wqe()
H A Dmthca_qp.c212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + get_recv_wqe()
222 return qp->queue.page_list[(qp->send_wqe_offset + get_send_wqe()
H A Dmthca_cq.c165 return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf get_cqe_from_buf()
/linux-4.4.14/include/linux/
H A Ddm-io.h23 struct page_list { struct
24 struct page_list *next;
43 struct page_list *pl;
H A Dmemcontrol.h302 void mem_cgroup_uncharge_list(struct list_head *page_list);
537 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) mem_cgroup_uncharge_list() argument
/linux-4.4.14/arch/powerpc/kernel/
H A Dmachine_kexec_32.c35 unsigned long page_list; default_machine_kexec() local
46 page_list = image->head; default_machine_kexec()
63 (*rnk)(page_list, reboot_code_buffer_phys, image->start); default_machine_kexec()
H A Dmisc_32.S716 /* r3 = page_list */
/linux-4.4.14/mm/
H A Ddmapool.c19 * allocated pages. Each page in the page_list is split into blocks of at
46 struct list_head page_list; member in struct:dma_pool
57 struct list_head page_list; member in struct:dma_page
89 list_for_each_entry(page, &pool->page_list, page_list) { show_pools()
166 INIT_LIST_HEAD(&retval->page_list); dma_pool_create()
258 list_del(&page->page_list); pool_free_page()
287 while (!list_empty(&pool->page_list)) { dma_pool_destroy()
289 page = list_entry(pool->page_list.next, dma_pool_destroy()
290 struct dma_page, page_list); dma_pool_destroy()
301 list_del(&page->page_list); dma_pool_destroy()
332 list_for_each_entry(page, &pool->page_list, page_list) { dma_pool_alloc()
346 list_add(&page->page_list, &pool->page_list); dma_pool_alloc()
394 list_for_each_entry(page, &pool->page_list, page_list) { pool_find_page()
H A Dvmscan.c880 static unsigned long shrink_page_list(struct list_head *page_list, shrink_page_list() argument
903 while (!list_empty(page_list)) { shrink_page_list()
912 page = lru_to_page(page_list); shrink_page_list()
1025 list_add_tail(&page->lru, page_list); shrink_page_list()
1050 if (!add_to_swap(page, page_list)) shrink_page_list()
1223 list_splice(&ret_pages, page_list); shrink_page_list()
1235 struct list_head *page_list) reclaim_clean_pages_from_list()
1246 list_for_each_entry_safe(page, next, page_list, lru) { list_for_each_entry_safe()
1257 list_splice(&clean_pages, page_list);
1486 putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) putback_inactive_pages() argument
1495 while (!list_empty(page_list)) { putback_inactive_pages()
1496 struct page *page = lru_to_page(page_list); putback_inactive_pages()
1537 list_splice(&pages_to_free, page_list); putback_inactive_pages()
1561 LIST_HEAD(page_list); shrink_inactive_list()
1592 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list, shrink_inactive_list()
1610 nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, shrink_inactive_list()
1628 putback_inactive_pages(lruvec, &page_list); shrink_inactive_list()
1634 mem_cgroup_uncharge_list(&page_list); shrink_inactive_list()
1635 free_hot_cold_page_list(&page_list, true); shrink_inactive_list()
1234 reclaim_clean_pages_from_list(struct zone *zone, struct list_head *page_list) reclaim_clean_pages_from_list() argument
H A Dinternal.h433 struct list_head *page_list);
H A Dmemcontrol.c5489 static void uncharge_list(struct list_head *page_list) uncharge_list() argument
5499 next = page_list->next; uncharge_list()
5541 } while (next != page_list); uncharge_list()
5570 * @page_list: list of pages to uncharge
5575 void mem_cgroup_uncharge_list(struct list_head *page_list) mem_cgroup_uncharge_list() argument
5580 if (!list_empty(page_list)) mem_cgroup_uncharge_list()
5581 uncharge_list(page_list); mem_cgroup_uncharge_list()
/linux-4.4.14/drivers/infiniband/core/
H A Dfmr_pool.c115 u64 *page_list, ib_fmr_cache_lookup()
125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); ib_fmr_cache_lookup()
130 !memcmp(page_list, fmr->page_list, ib_fmr_cache_lookup()
131 page_list_len * sizeof *page_list)) ib_fmr_cache_lookup()
428 * @page_list:List of pages to map
429 * @list_len:Number of pages in @page_list
435 u64 *page_list, ib_fmr_pool_map_phys()
449 page_list, ib_fmr_pool_map_phys()
474 result = ib_map_phys_fmr(fmr->fmr, page_list, list_len, ib_fmr_pool_map_phys()
493 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list)); ib_fmr_pool_map_phys()
497 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0])); ib_fmr_pool_map_phys()
114 ib_fmr_cache_lookup(struct ib_fmr_pool *pool, u64 *page_list, int page_list_len, u64 io_virtual_address) ib_fmr_cache_lookup() argument
434 ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, u64 *page_list, int list_len, u64 io_virtual_address) ib_fmr_pool_map_phys() argument
H A Dumem.c87 struct page **page_list; ib_umem_get() local
150 page_list = (struct page **) __get_free_page(GFP_KERNEL); ib_umem_get()
151 if (!page_list) { ib_umem_get()
194 1, !umem->writable, page_list, vma_list); ib_umem_get()
207 sg_set_page(sg, page_list[i], PAGE_SIZE, 0); for_each_sg()
239 free_page((unsigned long) page_list);
H A Dumem_odp.c273 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) * ib_umem_odp_get()
274 sizeof(*umem->odp_data->page_list)); ib_umem_odp_get()
275 if (!umem->odp_data->page_list) { ib_umem_odp_get()
342 vfree(umem->odp_data->page_list); ib_umem_odp_get()
412 vfree(umem->odp_data->page_list); ib_umem_odp_release()
468 umem->odp_data->page_list[page_index] = page; ib_umem_odp_map_dma_single_page()
470 } else if (umem->odp_data->page_list[page_index] == page) { ib_umem_odp_map_dma_single_page()
474 umem->odp_data->page_list[page_index], page); ib_umem_odp_map_dma_single_page()
638 if (umem->odp_data->page_list[idx]) { ib_umem_odp_unmap_dma_pages()
639 struct page *page = umem->odp_data->page_list[idx]; ib_umem_odp_unmap_dma_pages()
663 umem->odp_data->page_list[idx] = NULL; ib_umem_odp_unmap_dma_pages()
/linux-4.4.14/drivers/infiniband/hw/usnic/
H A Dusnic_uiom.c52 ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
53 ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
54 (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
88 for_each_sg(chunk->page_list, sg, chunk->nents, i) { list_for_each_entry_safe()
103 struct page **page_list; usnic_uiom_get_pages() local
125 page_list = (struct page **) __get_free_page(GFP_KERNEL); usnic_uiom_get_pages()
126 if (!page_list) usnic_uiom_get_pages()
150 1, !writable, page_list, NULL); usnic_uiom_get_pages()
169 sg_init_table(chunk->page_list, chunk->nents); usnic_uiom_get_pages()
170 for_each_sg(chunk->page_list, sg, chunk->nents, i) { usnic_uiom_get_pages()
171 sg_set_page(sg, page_list[i + off], usnic_uiom_get_pages()
193 free_page((unsigned long) page_list); usnic_uiom_get_pages()
267 pa = sg_phys(&chunk->page_list[i]); list_for_each_entry()
H A Dusnic_uiom.h79 struct scatterlist page_list[0]; member in struct:usnic_uiom_chunk
/linux-4.4.14/drivers/infiniband/hw/cxgb3/
H A Diwch_mem.c140 __be64 **page_list) build_phys_page_list()
184 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL); build_phys_page_list()
185 if (!*page_list) build_phys_page_list()
193 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr + build_phys_page_list()
134 build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) build_phys_page_list() argument
H A Diwch_provider.c488 __be64 *page_list; iwch_register_phys_mem() local
520 &total_size, &npages, &shift, &page_list); iwch_register_phys_mem()
526 kfree(page_list); iwch_register_phys_mem()
530 ret = iwch_write_pbl(mhp, page_list, npages, 0); iwch_register_phys_mem()
531 kfree(page_list); iwch_register_phys_mem()
570 __be64 *page_list = NULL; iwch_reregister_phys_mem() local
600 &shift, &page_list); iwch_reregister_phys_mem()
606 kfree(page_list); iwch_reregister_phys_mem()
H A Diwch_provider.h357 __be64 **page_list);
/linux-4.4.14/include/rdma/
H A Dib_umem_odp.h51 struct page **page_list; member in struct:ib_umem_odp
53 * An array of the same size as page_list, with DMA addresses mapped
54 * for pages the pages in page_list. The lower two bits designate
60 * The umem_mutex protects the page_list and dma_list fields of an ODP
H A Dib_fmr_pool.h76 u64 page_list[0]; member in struct:ib_pool_fmr
87 u64 *page_list,
H A Dib_verbs.h1770 u64 *page_list, int list_len,
2936 * @page_list: An array of physical pages to map to the fast memory region.
2937 * @list_len: The number of pages in page_list.
2941 u64 *page_list, int list_len, ib_map_phys_fmr()
2944 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); ib_map_phys_fmr()
2940 ib_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, u64 iova) ib_map_phys_fmr() argument
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/
H A Dmr.c695 int start_index, int npages, u64 *page_list) mlx4_write_mtt_chunk()
712 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); mlx4_write_mtt_chunk()
721 int start_index, int npages, u64 *page_list) __mlx4_write_mtt()
736 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); __mlx4_write_mtt()
741 page_list += chunk; __mlx4_write_mtt()
749 int start_index, int npages, u64 *page_list) mlx4_write_mtt()
772 inbox[i + 2] = cpu_to_be64(page_list[i] | mlx4_write_mtt()
782 page_list += chunk; mlx4_write_mtt()
788 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); mlx4_write_mtt()
795 u64 *page_list; mlx4_buf_write_mtt() local
799 page_list = kmalloc(buf->npages * sizeof *page_list, mlx4_buf_write_mtt()
801 if (!page_list) mlx4_buf_write_mtt()
806 page_list[i] = buf->direct.map + (i << buf->page_shift); mlx4_buf_write_mtt()
808 page_list[i] = buf->page_list[i].map; mlx4_buf_write_mtt()
810 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); mlx4_buf_write_mtt()
812 kfree(page_list); mlx4_buf_write_mtt()
972 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, mlx4_check_fmr() argument
986 /* Trust the user not to pass misaligned data in page_list */ mlx4_check_fmr()
989 if (page_list[i] & ~page_mask) mlx4_check_fmr()
999 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, mlx4_map_phys_fmr() argument
1005 err = mlx4_check_fmr(fmr, page_list, npages, iova); mlx4_map_phys_fmr()
1024 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); mlx4_map_phys_fmr()
694 mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) mlx4_write_mtt_chunk() argument
720 __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) __mlx4_write_mtt() argument
748 mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) mlx4_write_mtt() argument
H A Dalloc.c615 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), mlx4_buf_alloc()
617 if (!buf->page_list) mlx4_buf_alloc()
621 buf->page_list[i].buf = mlx4_buf_alloc()
625 if (!buf->page_list[i].buf) mlx4_buf_alloc()
628 buf->page_list[i].map = t; mlx4_buf_alloc()
630 memset(buf->page_list[i].buf, 0, PAGE_SIZE); mlx4_buf_alloc()
639 pages[i] = virt_to_page(buf->page_list[i].buf); mlx4_buf_alloc()
669 if (buf->page_list[i].buf) mlx4_buf_free()
672 buf->page_list[i].buf, mlx4_buf_free()
673 buf->page_list[i].map); mlx4_buf_free()
674 kfree(buf->page_list); mlx4_buf_free()
H A Deq.c118 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; get_eqe()
972 eq->page_list = kmalloc(npages * sizeof *eq->page_list, mlx4_create_eq()
974 if (!eq->page_list) mlx4_create_eq()
978 eq->page_list[i].buf = NULL; mlx4_create_eq()
990 eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> mlx4_create_eq()
994 if (!eq->page_list[i].buf) mlx4_create_eq()
998 eq->page_list[i].map = t; mlx4_create_eq()
1000 memset(eq->page_list[i].buf, 0, PAGE_SIZE); mlx4_create_eq()
1058 if (eq->page_list[i].buf) mlx4_create_eq()
1060 eq->page_list[i].buf, mlx4_create_eq()
1061 eq->page_list[i].map); mlx4_create_eq()
1066 kfree(eq->page_list); mlx4_create_eq()
1094 eq->page_list[i].buf, mlx4_free_eq()
1095 eq->page_list[i].map); mlx4_free_eq()
1097 kfree(eq->page_list); mlx4_free_eq()
H A Den_resources.c122 pages[i] = virt_to_page(buf->page_list[i].buf); mlx4_en_map_buffer()
H A Dmlx4.h399 struct mlx4_buf_list *page_list; member in struct:mlx4_eq
1013 int start_index, int npages, u64 *page_list);
H A Dresource_tracker.c3189 __be64 *page_list = inbox->buf; mlx4_WRITE_MTT_wrapper() local
3190 u64 *pg_list = (u64 *)page_list; mlx4_WRITE_MTT_wrapper()
3193 int start = be64_to_cpu(page_list[0]); mlx4_WRITE_MTT_wrapper()
3209 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL); mlx4_WRITE_MTT_wrapper()
3211 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, mlx4_WRITE_MTT_wrapper()
3212 ((u64 *)page_list + 2)); mlx4_WRITE_MTT_wrapper()
/linux-4.4.14/arch/x86/kernel/
H A Dmachine_kexec_32.c184 unsigned long page_list[PAGES_NR]; machine_kexec() local
222 page_list[PA_CONTROL_PAGE] = __pa(control_page); machine_kexec()
223 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page; machine_kexec()
224 page_list[PA_PGD] = __pa(image->arch.pgd); machine_kexec()
227 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) machine_kexec()
250 (unsigned long)page_list, machine_kexec()
H A Dmachine_kexec_64.c257 unsigned long page_list[PAGES_NR]; machine_kexec() local
288 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page); machine_kexec()
289 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page; machine_kexec()
290 page_list[PA_TABLE_PAGE] = machine_kexec()
294 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) machine_kexec()
317 (unsigned long)page_list, machine_kexec()
H A Drelocate_kernel_64.S47 * %rsi page_list
214 movq %rdi, %rcx /* Put the page_list in %rcx */
H A Drelocate_kernel_32.S61 movl 20+4(%esp), %ebx /* page_list */
/linux-4.4.14/drivers/misc/genwqe/
H A Dcard_utils.c250 struct page **page_list, int num_pages, genwqe_map_pages()
261 daddr = pci_map_page(pci_dev, page_list[i], genwqe_map_pages()
520 static int free_user_pages(struct page **page_list, unsigned int nr_pages, free_user_pages() argument
526 if (page_list[i] != NULL) { free_user_pages()
528 set_page_dirty_lock(page_list[i]); free_user_pages()
529 put_page(page_list[i]); free_user_pages()
550 * page_list and pci_alloc_consistent for the sg_list.
552 * be fixed with some effort. The page_list must be split into
572 /* determine space needed for page_list. */ genwqe_user_vmap()
577 m->page_list = kcalloc(m->nr_pages, genwqe_user_vmap()
580 if (!m->page_list) { genwqe_user_vmap()
581 dev_err(&pci_dev->dev, "err: alloc page_list failed\n"); genwqe_user_vmap()
587 m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages); genwqe_user_vmap()
593 m->page_list); /* ptrs to pages */ genwqe_user_vmap()
599 free_user_pages(m->page_list, rc, 0); genwqe_user_vmap()
604 rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list); genwqe_user_vmap()
611 free_user_pages(m->page_list, m->nr_pages, 0); genwqe_user_vmap()
614 kfree(m->page_list); genwqe_user_vmap()
615 m->page_list = NULL; genwqe_user_vmap()
643 if (m->page_list) { genwqe_user_vunmap()
644 free_user_pages(m->page_list, m->nr_pages, 1); genwqe_user_vunmap()
646 kfree(m->page_list); genwqe_user_vunmap()
647 m->page_list = NULL; genwqe_user_vunmap()
249 genwqe_map_pages(struct genwqe_dev *cd, struct page **page_list, int num_pages, dma_addr_t *dma_list) genwqe_map_pages() argument
H A Dcard_base.h179 struct page **page_list; /* list of pages used by user buff */ member in struct:dma_mapping
/linux-4.4.14/drivers/md/
H A Ddm-kcopyd.c41 struct page_list *pages;
72 static struct page_list zero_page_list;
193 static struct page_list *alloc_pl(gfp_t gfp) alloc_pl()
195 struct page_list *pl; alloc_pl()
210 static void free_pl(struct page_list *pl) free_pl()
220 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) kcopyd_put_pages()
222 struct page_list *next; kcopyd_put_pages()
240 unsigned int nr, struct page_list **pages) kcopyd_get_pages()
242 struct page_list *pl; kcopyd_get_pages()
271 static void drop_pages(struct page_list *pl) drop_pages()
273 struct page_list *next; drop_pages()
288 struct page_list *pl = NULL, *next; client_reserve_pages()
343 struct page_list *pages;
H A Ddm-io.c179 struct page_list *pl = (struct page_list *) dp->context_ptr; list_get_page()
188 struct page_list *pl = (struct page_list *) dp->context_ptr; list_next_page()
193 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) list_dp_init()
/linux-4.4.14/arch/sh/kernel/
H A Dmachine_kexec.c75 unsigned long page_list; machine_kexec() local
105 page_list = image->head; machine_kexec()
122 (*rnk)(page_list, reboot_code_buffer, machine_kexec()
/linux-4.4.14/arch/arm/kernel/
H A Dmachine_kexec.c146 unsigned long page_list; machine_kexec() local
160 page_list = image->head & PAGE_MASK; machine_kexec()
170 kexec_indirection_page = page_list; machine_kexec()
/linux-4.4.14/drivers/gpu/drm/ttm/
H A Dttm_page_alloc_dma.c123 * @page_list: The link to the 'page_list' in 'struct dma_pool'.
129 struct list_head page_list; member in struct:dma_page
392 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { list_for_each_entry_safe()
393 list_del(&d_page->page_list); list_for_each_entry_safe()
405 list_del(&d_page->page_list); ttm_dma_page_put()
456 page_list) { ttm_dma_page_pool_free()
461 list_move(&dma_p->page_list, &d_pages); ttm_dma_page_pool_free()
696 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { list_for_each_entry_safe()
700 list_del(&d_page->page_list); list_for_each_entry_safe()
783 list_add(&dma_p->page_list, d_pages); ttm_dma_pool_alloc_new_pages()
831 list_for_each_entry(d_page, &d_pages, page_list) { ttm_dma_page_pool_fill_locked()
859 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); ttm_dma_pool_get_pages()
863 list_move_tail(&d_page->page_list, &ttm_dma->pages_list); ttm_dma_pool_get_pages()
954 list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) { ttm_dma_unpopulate()
977 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) { ttm_dma_unpopulate()
/linux-4.4.14/drivers/staging/rdma/amso1100/
H A Dc2_provider.c345 u64 *page_list; c2_reg_phys_mr() local
378 page_list = vmalloc(sizeof(u64) * pbl_depth); c2_reg_phys_mr()
379 if (!page_list) { c2_reg_phys_mr()
380 pr_debug("couldn't vmalloc page_list of size %zd\n", c2_reg_phys_mr()
392 page_list[j++] = (buffer_list[i].addr + c2_reg_phys_mr()
398 vfree(page_list); c2_reg_phys_mr()
408 (unsigned long long) page_list[0], c2_reg_phys_mr()
409 (unsigned long long) page_list[pbl_depth-1]); c2_reg_phys_mr()
410 err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list, c2_reg_phys_mr()
414 vfree(page_list); c2_reg_phys_mr()
H A Dc2.h241 } *page_list; member in struct:c2_array
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
H A Dmem.c437 int *shift, __be64 **page_list) build_phys_page_list()
481 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL); build_phys_page_list()
482 if (!*page_list) build_phys_page_list()
490 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr + build_phys_page_list()
510 __be64 *page_list = NULL; c4iw_reregister_phys_mem() local
543 &shift, &page_list); c4iw_reregister_phys_mem()
549 kfree(page_list); c4iw_reregister_phys_mem()
554 kfree(page_list); c4iw_reregister_phys_mem()
576 __be64 *page_list; c4iw_register_phys_mem() local
609 &page_list); c4iw_register_phys_mem()
614 kfree(page_list); c4iw_register_phys_mem()
621 kfree(page_list); c4iw_register_phys_mem()
625 ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr, c4iw_register_phys_mem()
627 kfree(page_list); c4iw_register_phys_mem()
434 build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) build_phys_page_list() argument
/linux-4.4.14/block/
H A Dblk-mq-tag.h44 struct list_head page_list; member in struct:blk_mq_tags
H A Dblk-mq.c1437 while (!list_empty(&tags->page_list)) { blk_mq_free_rq_map()
1438 page = list_first_entry(&tags->page_list, struct page, lru); blk_mq_free_rq_map()
1471 INIT_LIST_HEAD(&tags->page_list); blk_mq_init_rq_map()
1514 list_add_tail(&page->lru, &tags->page_list); blk_mq_init_rq_map()
/linux-4.4.14/drivers/staging/comedi/
H A Dcomedi_buf.c38 if (bm->page_list) { comedi_buf_map_kref_release()
40 buf = &bm->page_list[i]; comedi_buf_map_kref_release()
54 vfree(bm->page_list); comedi_buf_map_kref_release()
111 bm->page_list = vzalloc(sizeof(*buf) * n_pages); __comedi_buf_alloc()
112 if (bm->page_list) __comedi_buf_alloc()
119 buf = &bm->page_list[i]; __comedi_buf_alloc()
H A Dcomedidev.h234 * @page_list: Pointer to array of &struct comedi_buf_page, one for each
256 struct comedi_buf_page *page_list; member in struct:comedi_buf_map
H A Dcomedi_fops.c2237 struct comedi_buf_page *buf = &bm->page_list[i]; comedi_mmap()
/linux-4.4.14/drivers/staging/rdma/ipath/
H A Dipath_mr.c340 * @page_list: the list of pages to associate with the fast memory region
347 int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list, ipath_map_phys_fmr() argument
371 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; ipath_map_phys_fmr()
H A Dipath_verbs.h844 int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
/linux-4.4.14/drivers/infiniband/hw/qib/
H A Dqib_keys.c351 u64 *page_list; qib_reg_mr() local
376 page_list = mr->pages; qib_reg_mr()
380 mrg->map[m]->segs[n].vaddr = (void *) page_list[i]; qib_reg_mr()
H A Dqib_mr.c436 * @page_list: the list of pages to associate with the fast memory region
443 int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, qib_map_phys_fmr() argument
470 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; qib_map_phys_fmr()
H A Dqib_verbs.h1058 int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
/linux-4.4.14/drivers/staging/rdma/ehca/
H A Dehca_mrmw.h107 u64 *page_list,
H A Dehca_mrmw.c856 u64 *page_list, ehca_map_phys_fmr()
874 ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len); ehca_map_phys_fmr()
897 pginfo.u.fmr.page_list = page_list; ehca_map_phys_fmr()
916 ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x " ehca_map_phys_fmr()
917 "iova=%llx", ret, fmr, page_list, list_len, iova); ehca_map_phys_fmr()
1822 u64 *page_list, ehca_fmr_check_page_list()
1836 page = page_list; ehca_fmr_check_page_list()
2066 /* loop over desired page_list entries */ ehca_set_pagebuf_fmr()
2067 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem; ehca_set_pagebuf_fmr()
855 ehca_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, u64 iova) ehca_map_phys_fmr() argument
1821 ehca_fmr_check_page_list(struct ehca_mr *e_fmr, u64 *page_list, int list_len) ehca_fmr_check_page_list() argument
H A Dehca_iverbs.h114 u64 *page_list, int list_len, u64 iova);
H A Dehca_classes.h330 u64 *page_list; member in struct:ehca_mr_pginfo::__anon10620::__anon10623
/linux-4.4.14/fs/ceph/
H A Daddr.c311 static int start_read(struct inode *inode, struct list_head *page_list, int max) start_read() argument
316 struct page *page = list_entry(page_list->prev, struct page, lru); start_read()
331 list_for_each_entry_reverse(page, page_list, lru) { list_for_each_entry_reverse()
358 page = list_entry(page_list->prev, struct page, lru);
398 * Read multiple pages. Leave pages we don't read + unlock in page_list;
402 struct list_head *page_list, unsigned nr_pages) ceph_readpages()
412 rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list, ceph_readpages()
425 while (!list_empty(page_list)) { ceph_readpages()
426 rc = start_read(inode, page_list, max); ceph_readpages()
432 ceph_fscache_readpages_cancel(inode, page_list); ceph_readpages()
401 ceph_readpages(struct file *file, struct address_space *mapping, struct list_head *page_list, unsigned nr_pages) ceph_readpages() argument
/linux-4.4.14/drivers/staging/rdma/hfi1/
H A Dmr.c426 * @page_list: the list of pages to associate with the fast memory region
433 int hfi1_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, hfi1_map_phys_fmr() argument
460 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; hfi1_map_phys_fmr()
H A Dverbs.h1032 int hfi1_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
/linux-4.4.14/drivers/infiniband/hw/mlx4/
H A Dmr.c480 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, mlx4_ib_map_phys_fmr() argument
486 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, mlx4_ib_map_phys_fmr()
H A Dmlx4_ib.h769 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
/linux-4.4.14/arch/x86/include/asm/
H A Dkexec.h148 unsigned long page_list,
/linux-4.4.14/include/linux/mlx4/
H A Ddevice.h615 struct mlx4_buf_list *page_list; member in struct:mlx4_buf
1053 return buf->page_list[offset >> PAGE_SHIFT].buf + mlx4_buf_offset()
1081 int start_index, int npages, u64 *page_list);
1360 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
/linux-4.4.14/arch/ia64/kernel/
H A Drelocate_kernel.S132 mov r30=in0 // in0 is page_list
/linux-4.4.14/drivers/dma/
H A Dste_dma40.c3407 unsigned long *page_list; d40_lcla_allocate() local
3416 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, d40_lcla_allocate()
3419 if (!page_list) { d40_lcla_allocate()
3428 page_list[i] = __get_free_pages(GFP_KERNEL, d40_lcla_allocate()
3430 if (!page_list[i]) { d40_lcla_allocate()
3437 free_pages(page_list[j], base->lcla_pool.pages); d40_lcla_allocate()
3441 if ((virt_to_phys((void *)page_list[i]) & d40_lcla_allocate()
3447 free_pages(page_list[j], base->lcla_pool.pages); d40_lcla_allocate()
3450 base->lcla_pool.base = (void *)page_list[i]; d40_lcla_allocate()
3484 kfree(page_list); d40_lcla_allocate()
/linux-4.4.14/drivers/staging/comedi/drivers/
H A Dmite.c332 cpu_to_le32(async->buf_map->page_list[i].dma_addr); mite_buf_change()
/linux-4.4.14/fs/cifs/
H A Dfile.c3376 readpages_get_pages(struct address_space *mapping, struct list_head *page_list, readpages_get_pages() argument
3387 page = list_entry(page_list->prev, struct page, lru); readpages_get_pages()
3412 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) { list_for_each_entry_safe_reverse()
3435 struct list_head *page_list, unsigned num_pages) cifs_readpages()
3451 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, cifs_readpages()
3474 * Note that list order is important. The page_list is in cifs_readpages()
3478 while (!list_empty(page_list)) { cifs_readpages()
3501 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist, cifs_readpages()
3559 cifs_fscache_readpages_cancel(mapping->host, page_list); cifs_readpages()
3434 cifs_readpages(struct file *file, struct address_space *mapping, struct list_head *page_list, unsigned num_pages) cifs_readpages() argument
/linux-4.4.14/fs/nfs/
H A Dwrite.c1651 void nfs_retry_commit(struct list_head *page_list, nfs_retry_commit() argument
1658 while (!list_empty(page_list)) { nfs_retry_commit()
1659 req = nfs_list_entry(page_list->next); nfs_retry_commit()
H A Dinternal.h481 void nfs_retry_commit(struct list_head *page_list,
/linux-4.4.14/drivers/scsi/lpfc/
H A Dlpfc_sli4.h140 struct list_head page_list; member in struct:lpfc_queue
H A Dlpfc_sli.c12737 while (!list_empty(&queue->page_list)) { lpfc_sli4_queue_free()
12738 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, lpfc_sli4_queue_free()
12778 INIT_LIST_HEAD(&queue->page_list); lpfc_sli4_queue_alloc()
12792 list_add_tail(&dmabuf->list, &queue->page_list); lpfc_sli4_queue_alloc()
13015 list_for_each_entry(dmabuf, &eq->page_list, list) { lpfc_eq_create()
13135 list_for_each_entry(dmabuf, &cq->page_list, list) { lpfc_cq_create()
13226 list_for_each_entry(dmabuf, &mq->page_list, list) { lpfc_mq_create_fb_init()
13337 list_for_each_entry(dmabuf, &mq->page_list, list) { lpfc_mq_create()
13513 list_for_each_entry(dmabuf, &wq->page_list, list) { lpfc_wq_create()
13731 list_for_each_entry(dmabuf, &hrq->page_list, list) { lpfc_rq_create()
13862 list_for_each_entry(dmabuf, &drq->page_list, list) { lpfc_rq_create()

Completed in 2448 milliseconds