Home
last modified time | relevance | path

Searched refs:page_list (Results 1 – 94 of 94) sorted by relevance

/linux-4.1.27/drivers/infiniband/hw/mthca/
Dmthca_allocator.c122 if (array->page_list[p].page) in mthca_array_get()
123 return array->page_list[p].page[index & MTHCA_ARRAY_MASK]; in mthca_array_get()
133 if (!array->page_list[p].page) in mthca_array_set()
134 array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC); in mthca_array_set()
136 if (!array->page_list[p].page) in mthca_array_set()
139 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value; in mthca_array_set()
140 ++array->page_list[p].used; in mthca_array_set()
149 if (--array->page_list[p].used == 0) { in mthca_array_clear()
150 free_page((unsigned long) array->page_list[p].page); in mthca_array_clear()
151 array->page_list[p].page = NULL; in mthca_array_clear()
[all …]
Dmthca_eq.c231 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; in get_eqe()
482 eq->page_list = kmalloc(npages * sizeof *eq->page_list, in mthca_create_eq()
484 if (!eq->page_list) in mthca_create_eq()
488 eq->page_list[i].buf = NULL; in mthca_create_eq()
500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, in mthca_create_eq()
502 if (!eq->page_list[i].buf) in mthca_create_eq()
506 dma_unmap_addr_set(&eq->page_list[i], mapping, t); in mthca_create_eq()
508 clear_page(eq->page_list[i].buf); in mthca_create_eq()
572 if (eq->page_list[i].buf) in mthca_create_eq()
574 eq->page_list[i].buf, in mthca_create_eq()
[all …]
Dmthca_provider.h54 struct mthca_buf_list *page_list; member
114 struct mthca_buf_list *page_list; member
Dmthca_mr.c689 static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, in mthca_check_fmr() argument
706 if (page_list[i] & ~page_mask) in mthca_check_fmr()
717 int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, in mthca_tavor_map_phys_fmr() argument
726 err = mthca_check_fmr(fmr, page_list, list_len, iova); in mthca_tavor_map_phys_fmr()
739 __be64 mtt_entry = cpu_to_be64(page_list[i] | in mthca_tavor_map_phys_fmr()
758 int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, in mthca_arbel_map_phys_fmr() argument
766 err = mthca_check_fmr(fmr, page_list, list_len, iova); in mthca_arbel_map_phys_fmr()
787 fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] | in mthca_arbel_map_phys_fmr()
Dmthca_provider.c900 u64 *page_list; in mthca_reg_phys_mr() local
938 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL); in mthca_reg_phys_mr()
939 if (!page_list) { in mthca_reg_phys_mr()
949 page_list[n++] = buffer_list[i].addr + ((u64) j << shift); in mthca_reg_phys_mr()
960 page_list, shift, npages, in mthca_reg_phys_mr()
965 kfree(page_list); in mthca_reg_phys_mr()
970 kfree(page_list); in mthca_reg_phys_mr()
Dmthca_dev.h191 } *page_list; member
483 int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
486 int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
Dmthca_srq.c77 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + in get_wqe()
Dmthca_cq.c165 return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf in get_cqe_from_buf()
Dmthca_qp.c212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + in get_recv_wqe()
222 return qp->queue.page_list[(qp->send_wqe_offset + in get_send_wqe()
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/
Dalloc.c78 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), in mlx5_buf_alloc()
80 if (!buf->page_list) in mlx5_buf_alloc()
84 buf->page_list[i].buf = in mlx5_buf_alloc()
87 if (!buf->page_list[i].buf) in mlx5_buf_alloc()
90 buf->page_list[i].map = t; in mlx5_buf_alloc()
99 pages[i] = virt_to_page(buf->page_list[i].buf); in mlx5_buf_alloc()
128 if (buf->page_list[i].buf) in mlx5_buf_free()
130 buf->page_list[i].buf, in mlx5_buf_free()
131 buf->page_list[i].map); in mlx5_buf_free()
132 kfree(buf->page_list); in mlx5_buf_free()
[all …]
/linux-4.1.27/mm/
Ddmapool.c46 struct list_head page_list; member
57 struct list_head page_list; member
89 list_for_each_entry(page, &pool->page_list, page_list) { in show_pools()
166 INIT_LIST_HEAD(&retval->page_list); in dma_pool_create()
258 list_del(&page->page_list); in pool_free_page()
284 while (!list_empty(&pool->page_list)) { in dma_pool_destroy()
286 page = list_entry(pool->page_list.next, in dma_pool_destroy()
287 struct dma_page, page_list); in dma_pool_destroy()
298 list_del(&page->page_list); in dma_pool_destroy()
329 list_for_each_entry(page, &pool->page_list, page_list) { in dma_pool_alloc()
[all …]
Dvmscan.c843 static unsigned long shrink_page_list(struct list_head *page_list, in shrink_page_list() argument
866 while (!list_empty(page_list)) { in shrink_page_list()
875 page = lru_to_page(page_list); in shrink_page_list()
1010 if (!add_to_swap(page, page_list)) in shrink_page_list()
1176 list_splice(&ret_pages, page_list); in shrink_page_list()
1188 struct list_head *page_list) in reclaim_clean_pages_from_list() argument
1199 list_for_each_entry_safe(page, next, page_list, lru) { in reclaim_clean_pages_from_list()
1210 list_splice(&clean_pages, page_list); in reclaim_clean_pages_from_list()
1438 putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) in putback_inactive_pages() argument
1447 while (!list_empty(page_list)) { in putback_inactive_pages()
[all …]
Dinternal.h421 struct list_head *page_list);
Dmemcontrol.c5626 static void uncharge_list(struct list_head *page_list) in uncharge_list() argument
5636 next = page_list->next; in uncharge_list()
5678 } while (next != page_list); in uncharge_list()
5712 void mem_cgroup_uncharge_list(struct list_head *page_list) in mem_cgroup_uncharge_list() argument
5717 if (!list_empty(page_list)) in mem_cgroup_uncharge_list()
5718 uncharge_list(page_list); in mem_cgroup_uncharge_list()
/linux-4.1.27/drivers/infiniband/core/
Dfmr_pool.c115 u64 *page_list, in ib_fmr_cache_lookup() argument
125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); in ib_fmr_cache_lookup()
130 !memcmp(page_list, fmr->page_list, in ib_fmr_cache_lookup()
131 page_list_len * sizeof *page_list)) in ib_fmr_cache_lookup()
435 u64 *page_list, in ib_fmr_pool_map_phys() argument
449 page_list, in ib_fmr_pool_map_phys()
474 result = ib_map_phys_fmr(fmr->fmr, page_list, list_len, in ib_fmr_pool_map_phys()
493 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list)); in ib_fmr_pool_map_phys()
497 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0])); in ib_fmr_pool_map_phys()
Dumem_odp.c273 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) * in ib_umem_odp_get()
274 sizeof(*umem->odp_data->page_list)); in ib_umem_odp_get()
275 if (!umem->odp_data->page_list) { in ib_umem_odp_get()
342 vfree(umem->odp_data->page_list); in ib_umem_odp_get()
412 vfree(umem->odp_data->page_list); in ib_umem_odp_release()
468 umem->odp_data->page_list[page_index] = page; in ib_umem_odp_map_dma_single_page()
470 } else if (umem->odp_data->page_list[page_index] == page) { in ib_umem_odp_map_dma_single_page()
474 umem->odp_data->page_list[page_index], page); in ib_umem_odp_map_dma_single_page()
638 if (umem->odp_data->page_list[idx]) { in ib_umem_odp_unmap_dma_pages()
639 struct page *page = umem->odp_data->page_list[idx]; in ib_umem_odp_unmap_dma_pages()
[all …]
Dumem.c87 struct page **page_list; in ib_umem_get() local
150 page_list = (struct page **) __get_free_page(GFP_KERNEL); in ib_umem_get()
151 if (!page_list) { in ib_umem_get()
194 1, !umem->writable, page_list, vma_list); in ib_umem_get()
207 sg_set_page(sg, page_list[i], PAGE_SIZE, 0); in ib_umem_get()
239 free_page((unsigned long) page_list); in ib_umem_get()
Dverbs.c1236 struct ib_fast_reg_page_list *page_list; in ib_alloc_fast_reg_page_list() local
1241 page_list = device->alloc_fast_reg_page_list(device, max_page_list_len); in ib_alloc_fast_reg_page_list()
1243 if (!IS_ERR(page_list)) { in ib_alloc_fast_reg_page_list()
1244 page_list->device = device; in ib_alloc_fast_reg_page_list()
1245 page_list->max_page_list_len = max_page_list_len; in ib_alloc_fast_reg_page_list()
1248 return page_list; in ib_alloc_fast_reg_page_list()
1252 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) in ib_free_fast_reg_page_list() argument
1254 page_list->device->free_fast_reg_page_list(page_list); in ib_free_fast_reg_page_list()
/linux-4.1.27/arch/powerpc/kernel/
Dmachine_kexec_32.c35 unsigned long page_list; in default_machine_kexec() local
46 page_list = image->head; in default_machine_kexec()
63 (*rnk)(page_list, reboot_code_buffer_phys, image->start); in default_machine_kexec()
/linux-4.1.27/include/linux/
Ddm-io.h23 struct page_list { struct
24 struct page_list *next; argument
43 struct page_list *pl; argument
Dmemcontrol.h82 void mem_cgroup_uncharge_list(struct list_head *page_list);
233 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) in mem_cgroup_uncharge_list() argument
/linux-4.1.27/arch/x86/kernel/
Dmachine_kexec_32.c184 unsigned long page_list[PAGES_NR]; in machine_kexec() local
222 page_list[PA_CONTROL_PAGE] = __pa(control_page); in machine_kexec()
223 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page; in machine_kexec()
224 page_list[PA_PGD] = __pa(image->arch.pgd); in machine_kexec()
227 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) in machine_kexec()
250 (unsigned long)page_list, in machine_kexec()
Dmachine_kexec_64.c255 unsigned long page_list[PAGES_NR]; in machine_kexec() local
286 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page); in machine_kexec()
287 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page; in machine_kexec()
288 page_list[PA_TABLE_PAGE] = in machine_kexec()
292 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) in machine_kexec()
315 (unsigned long)page_list, in machine_kexec()
/linux-4.1.27/drivers/misc/genwqe/
Dcard_utils.c249 struct page **page_list, int num_pages, in genwqe_map_pages() argument
260 daddr = pci_map_page(pci_dev, page_list[i], in genwqe_map_pages()
519 static int free_user_pages(struct page **page_list, unsigned int nr_pages, in free_user_pages() argument
525 if (page_list[i] != NULL) { in free_user_pages()
527 set_page_dirty_lock(page_list[i]); in free_user_pages()
528 put_page(page_list[i]); in free_user_pages()
576 m->page_list = kcalloc(m->nr_pages, in genwqe_user_vmap()
579 if (!m->page_list) { in genwqe_user_vmap()
586 m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages); in genwqe_user_vmap()
592 m->page_list); /* ptrs to pages */ in genwqe_user_vmap()
[all …]
Dcard_base.h179 struct page **page_list; /* list of pages used by user buff */ member
/linux-4.1.27/drivers/infiniband/hw/usnic/
Dusnic_uiom.c52 ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
53 ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
54 (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
88 for_each_sg(chunk->page_list, sg, chunk->nents, i) { in usnic_uiom_put_pages()
103 struct page **page_list; in usnic_uiom_get_pages() local
125 page_list = (struct page **) __get_free_page(GFP_KERNEL); in usnic_uiom_get_pages()
126 if (!page_list) in usnic_uiom_get_pages()
150 1, !writable, page_list, NULL); in usnic_uiom_get_pages()
169 sg_init_table(chunk->page_list, chunk->nents); in usnic_uiom_get_pages()
170 for_each_sg(chunk->page_list, sg, chunk->nents, i) { in usnic_uiom_get_pages()
[all …]
Dusnic_uiom.h64 struct scatterlist page_list[0]; member
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/
Dmr.c695 int start_index, int npages, u64 *page_list) in mlx4_write_mtt_chunk() argument
712 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); in mlx4_write_mtt_chunk()
721 int start_index, int npages, u64 *page_list) in __mlx4_write_mtt() argument
736 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); in __mlx4_write_mtt()
741 page_list += chunk; in __mlx4_write_mtt()
749 int start_index, int npages, u64 *page_list) in mlx4_write_mtt() argument
772 inbox[i + 2] = cpu_to_be64(page_list[i] | in mlx4_write_mtt()
782 page_list += chunk; in mlx4_write_mtt()
788 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); in mlx4_write_mtt()
795 u64 *page_list; in mlx4_buf_write_mtt() local
[all …]
Dalloc.c615 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), in mlx4_buf_alloc()
617 if (!buf->page_list) in mlx4_buf_alloc()
621 buf->page_list[i].buf = in mlx4_buf_alloc()
625 if (!buf->page_list[i].buf) in mlx4_buf_alloc()
628 buf->page_list[i].map = t; in mlx4_buf_alloc()
630 memset(buf->page_list[i].buf, 0, PAGE_SIZE); in mlx4_buf_alloc()
639 pages[i] = virt_to_page(buf->page_list[i].buf); in mlx4_buf_alloc()
669 if (buf->page_list[i].buf) in mlx4_buf_free()
672 buf->page_list[i].buf, in mlx4_buf_free()
673 buf->page_list[i].map); in mlx4_buf_free()
[all …]
Deq.c118 …return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % … in get_eqe()
940 eq->page_list = kmalloc(npages * sizeof *eq->page_list, in mlx4_create_eq()
942 if (!eq->page_list) in mlx4_create_eq()
946 eq->page_list[i].buf = NULL; in mlx4_create_eq()
958 eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> in mlx4_create_eq()
962 if (!eq->page_list[i].buf) in mlx4_create_eq()
966 eq->page_list[i].map = t; in mlx4_create_eq()
968 memset(eq->page_list[i].buf, 0, PAGE_SIZE); in mlx4_create_eq()
1026 if (eq->page_list[i].buf) in mlx4_create_eq()
1028 eq->page_list[i].buf, in mlx4_create_eq()
[all …]
Den_resources.c97 pages[i] = virt_to_page(buf->page_list[i].buf); in mlx4_en_map_buffer()
Dmlx4.h391 struct mlx4_buf_list *page_list; member
997 int start_index, int npages, u64 *page_list);
Dresource_tracker.c3016 __be64 *page_list = inbox->buf; in mlx4_WRITE_MTT_wrapper() local
3017 u64 *pg_list = (u64 *)page_list; in mlx4_WRITE_MTT_wrapper()
3020 int start = be64_to_cpu(page_list[0]); in mlx4_WRITE_MTT_wrapper()
3036 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL); in mlx4_WRITE_MTT_wrapper()
3038 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, in mlx4_WRITE_MTT_wrapper()
3039 ((u64 *)page_list + 2)); in mlx4_WRITE_MTT_wrapper()
/linux-4.1.27/drivers/md/
Ddm-kcopyd.c41 struct page_list *pages;
72 static struct page_list zero_page_list;
193 static struct page_list *alloc_pl(gfp_t gfp) in alloc_pl()
195 struct page_list *pl; in alloc_pl()
210 static void free_pl(struct page_list *pl) in free_pl()
220 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) in kcopyd_put_pages()
222 struct page_list *next; in kcopyd_put_pages()
240 unsigned int nr, struct page_list **pages) in kcopyd_get_pages()
242 struct page_list *pl; in kcopyd_get_pages()
271 static void drop_pages(struct page_list *pl) in drop_pages()
[all …]
Ddm-io.c178 struct page_list *pl = (struct page_list *) dp->context_ptr; in list_get_page()
187 struct page_list *pl = (struct page_list *) dp->context_ptr; in list_next_page()
192 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) in list_dp_init()
/linux-4.1.27/drivers/infiniband/hw/mlx4/
Dmr.c400 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL); in mlx4_ib_alloc_fast_reg_page_list()
401 if (!mfrpl->ibfrpl.page_list) in mlx4_ib_alloc_fast_reg_page_list()
416 kfree(mfrpl->ibfrpl.page_list); in mlx4_ib_alloc_fast_reg_page_list()
421 void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) in mlx4_ib_free_fast_reg_page_list() argument
423 struct mlx4_ib_dev *dev = to_mdev(page_list->device); in mlx4_ib_free_fast_reg_page_list()
424 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); in mlx4_ib_free_fast_reg_page_list()
425 int size = page_list->max_page_list_len * sizeof (u64); in mlx4_ib_free_fast_reg_page_list()
430 kfree(mfrpl->ibfrpl.page_list); in mlx4_ib_free_fast_reg_page_list()
468 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, in mlx4_ib_map_phys_fmr() argument
474 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, in mlx4_ib_map_phys_fmr()
Dmlx4_ib.h667 void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
719 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
Dqp.c2405 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); in set_fmr_seg()
2410 cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] | in set_fmr_seg()
/linux-4.1.27/net/rds/
Diw_rdma.c50 struct ib_fast_reg_page_list *page_list; member
666 struct ib_fast_reg_page_list *page_list = NULL; local
681 page_list = ib_alloc_fast_reg_page_list(rds_iwdev->dev, pool->max_message_size);
682 if (IS_ERR(page_list)) {
683 err = PTR_ERR(page_list);
690 ibmr->page_list = page_list;
715 f_wr.wr.fast_reg.page_list = ibmr->page_list;
783 ibmr->page_list->page_list[i] = dma_pages[i];
871 if (ibmr->page_list)
872 ib_free_fast_reg_page_list(ibmr->page_list);
Diw_send.c779 send->s_wr.wr.fast_reg.page_list = send->s_page_list; in rds_iw_build_send_fastreg()
901 send->s_page_list->page_list[j] = ib_sg_dma_address(ic->i_cm_id->device, scat); in rds_iw_xmit_rdma()
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
Dmem.c437 int *shift, __be64 **page_list) in build_phys_page_list() argument
481 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL); in build_phys_page_list()
482 if (!*page_list) in build_phys_page_list()
490 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr + in build_phys_page_list()
510 __be64 *page_list = NULL; in c4iw_reregister_phys_mem() local
543 &shift, &page_list); in c4iw_reregister_phys_mem()
549 kfree(page_list); in c4iw_reregister_phys_mem()
554 kfree(page_list); in c4iw_reregister_phys_mem()
576 __be64 *page_list; in c4iw_register_phys_mem() local
609 &page_list); in c4iw_register_phys_mem()
[all …]
Dqp.c615 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list); in build_fastreg()
619 wr->wr.fast_reg.page_list->page_list[i] = (__force u64) in build_fastreg()
621 wr->wr.fast_reg.page_list->page_list[i]); in build_fastreg()
642 (u64)wr->wr.fast_reg.page_list->page_list[i]); in build_fastreg()
Diw_cxgb4.h971 void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
/linux-4.1.27/drivers/gpu/drm/ttm/
Dttm_page_alloc_dma.c129 struct list_head page_list; member
389 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { in ttm_dma_pages_put()
390 list_del(&d_page->page_list); in ttm_dma_pages_put()
402 list_del(&d_page->page_list); in ttm_dma_page_put()
453 page_list) { in ttm_dma_page_pool_free()
458 list_move(&dma_p->page_list, &d_pages); in ttm_dma_page_pool_free()
693 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { in ttm_dma_handle_caching_state_failure()
697 list_del(&d_page->page_list); in ttm_dma_handle_caching_state_failure()
780 list_add(&dma_p->page_list, d_pages); in ttm_dma_pool_alloc_new_pages()
828 list_for_each_entry(d_page, &d_pages, page_list) { in ttm_dma_page_pool_fill_locked()
[all …]
/linux-4.1.27/include/rdma/
Dib_fmr_pool.h76 u64 page_list[0]; member
87 u64 *page_list,
Dib_umem_odp.h51 struct page **page_list; member
Dib_verbs.h1007 u64 *page_list; member
1064 struct ib_fast_reg_page_list *page_list; member
1609 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1627 u64 *page_list, int list_len,
2505 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2587 u64 *page_list, int list_len, in ib_map_phys_fmr() argument
2590 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); in ib_map_phys_fmr()
/linux-4.1.27/arch/arm/kernel/
Dmachine_kexec.c146 unsigned long page_list; in machine_kexec() local
160 page_list = image->head & PAGE_MASK; in machine_kexec()
170 kexec_indirection_page = page_list; in machine_kexec()
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
Diwch_mem.c140 __be64 **page_list) in build_phys_page_list() argument
184 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL); in build_phys_page_list()
185 if (!*page_list) in build_phys_page_list()
193 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr + in build_phys_page_list()
Diwch_provider.c478 __be64 *page_list; in iwch_register_phys_mem() local
510 &total_size, &npages, &shift, &page_list); in iwch_register_phys_mem()
516 kfree(page_list); in iwch_register_phys_mem()
520 ret = iwch_write_pbl(mhp, page_list, npages, 0); in iwch_register_phys_mem()
521 kfree(page_list); in iwch_register_phys_mem()
560 __be64 *page_list = NULL; in iwch_reregister_phys_mem() local
590 &shift, &page_list); in iwch_reregister_phys_mem()
596 kfree(page_list); in iwch_reregister_phys_mem()
840 struct ib_fast_reg_page_list *page_list; in iwch_alloc_fastreg_pbl() local
842 page_list = kmalloc(sizeof *page_list + page_list_len * sizeof(u64), in iwch_alloc_fastreg_pbl()
[all …]
Diwch_provider.h355 __be64 **page_list);
Diwch_qp.c183 *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]); in build_fastreg()
/linux-4.1.27/arch/sh/kernel/
Dmachine_kexec.c75 unsigned long page_list; in machine_kexec() local
105 page_list = image->head; in machine_kexec()
122 (*rnk)(page_list, reboot_code_buffer, in machine_kexec()
/linux-4.1.27/drivers/staging/comedi/
Dcomedi_buf.c38 if (bm->page_list) { in comedi_buf_map_kref_release()
40 buf = &bm->page_list[i]; in comedi_buf_map_kref_release()
54 vfree(bm->page_list); in comedi_buf_map_kref_release()
111 bm->page_list = vzalloc(sizeof(*buf) * n_pages); in __comedi_buf_alloc()
112 if (bm->page_list) in __comedi_buf_alloc()
119 buf = &bm->page_list[i]; in __comedi_buf_alloc()
Dcomedidev.h102 struct comedi_buf_page *page_list; member
Dcomedi_fops.c2203 struct comedi_buf_page *buf = &bm->page_list[i]; in comedi_mmap()
/linux-4.1.27/Documentation/device-mapper/
Ddm-io.txt24 struct page_list {
25 struct page_list *next;
30 struct page_list *pl, unsigned int offset,
33 struct page_list *pl, unsigned int offset,
/linux-4.1.27/drivers/infiniband/hw/qib/
Dqib_keys.c350 u64 *page_list; in qib_fast_reg_mr() local
375 page_list = wr->wr.fast_reg.page_list->page_list; in qib_fast_reg_mr()
379 mr->map[m]->segs[n].vaddr = (void *) page_list[i]; in qib_fast_reg_mr()
Dqib_mr.c354 pl->page_list = kzalloc(size, GFP_KERNEL); in qib_alloc_fast_reg_page_list()
355 if (!pl->page_list) in qib_alloc_fast_reg_page_list()
367 kfree(pl->page_list); in qib_free_fast_reg_page_list()
436 int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, in qib_map_phys_fmr() argument
463 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; in qib_map_phys_fmr()
Dqib_verbs.h1046 int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
/linux-4.1.27/drivers/infiniband/hw/amso1100/
Dc2_provider.c337 u64 *page_list; in c2_reg_phys_mr() local
370 page_list = vmalloc(sizeof(u64) * pbl_depth); in c2_reg_phys_mr()
371 if (!page_list) { in c2_reg_phys_mr()
384 page_list[j++] = (buffer_list[i].addr + in c2_reg_phys_mr()
390 vfree(page_list); in c2_reg_phys_mr()
400 (unsigned long long) page_list[0], in c2_reg_phys_mr()
401 (unsigned long long) page_list[pbl_depth-1]); in c2_reg_phys_mr()
402 err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list, in c2_reg_phys_mr()
406 vfree(page_list); in c2_reg_phys_mr()
Dc2.h241 } *page_list; member
/linux-4.1.27/net/sunrpc/xprtrdma/
Dsvc_rdma_recvfrom.c268 frmr->page_list->page_list[pno] = in rdma_read_chunk_frmr()
273 frmr->page_list->page_list[pno]); in rdma_read_chunk_frmr()
307 fastreg_wr.wr.fast_reg.page_list = frmr->page_list; in rdma_read_chunk_frmr()
Dsvc_rdma_transport.c764 frmr->page_list = pl; in rdma_alloc_frmr()
785 ib_free_fast_reg_page_list(frmr->page_list); in rdma_dealloc_frmr_q()
814 dma_addr_t addr = frmr->page_list->page_list[page_no]; in frmr_unmap_dma()
1250 fastreg_wr.wr.fast_reg.page_list = frmr->page_list; in svc_rdma_fastreg()
Dfrwr_ops.c205 frmr->fr_pgl->page_list[page_no++] = pa; in frwr_op_map()
225 fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl; in frwr_op_map()
/linux-4.1.27/block/
Dblk-mq-tag.h44 struct list_head page_list; member
Dblk-mq.c1418 while (!list_empty(&tags->page_list)) { in blk_mq_free_rq_map()
1419 page = list_first_entry(&tags->page_list, struct page, lru); in blk_mq_free_rq_map()
1447 INIT_LIST_HEAD(&tags->page_list); in blk_mq_init_rq_map()
1490 list_add_tail(&page->lru, &tags->page_list); in blk_mq_init_rq_map()
/linux-4.1.27/fs/ceph/
Daddr.c307 static int start_read(struct inode *inode, struct list_head *page_list, int max) in start_read() argument
312 struct page *page = list_entry(page_list->prev, struct page, lru); in start_read()
327 list_for_each_entry_reverse(page, page_list, lru) { in start_read()
354 page = list_entry(page_list->prev, struct page, lru); in start_read()
398 struct list_head *page_list, unsigned nr_pages) in ceph_readpages() argument
408 rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list, in ceph_readpages()
421 while (!list_empty(page_list)) { in ceph_readpages()
422 rc = start_read(inode, page_list, max); in ceph_readpages()
428 ceph_fscache_readpages_cancel(inode, page_list); in ceph_readpages()
/linux-4.1.27/drivers/infiniband/hw/ehca/
Dehca_mrmw.h107 u64 *page_list,
Dehca_mrmw.c856 u64 *page_list, in ehca_map_phys_fmr() argument
874 ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len); in ehca_map_phys_fmr()
897 pginfo.u.fmr.page_list = page_list; in ehca_map_phys_fmr()
917 "iova=%llx", ret, fmr, page_list, list_len, iova); in ehca_map_phys_fmr()
1822 u64 *page_list, in ehca_fmr_check_page_list() argument
1836 page = page_list; in ehca_fmr_check_page_list()
2067 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem; in ehca_set_pagebuf_fmr()
Dehca_iverbs.h110 u64 *page_list, int list_len, u64 iova);
Dehca_classes.h330 u64 *page_list; member
/linux-4.1.27/drivers/infiniband/hw/mlx5/
Dmr.c1396 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL); in mlx5_ib_alloc_fast_reg_page_list()
1397 if (!mfrpl->ibfrpl.page_list) in mlx5_ib_alloc_fast_reg_page_list()
1411 kfree(mfrpl->ibfrpl.page_list); in mlx5_ib_alloc_fast_reg_page_list()
1416 void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) in mlx5_ib_free_fast_reg_page_list() argument
1418 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); in mlx5_ib_free_fast_reg_page_list()
1419 struct mlx5_ib_dev *dev = to_mdev(page_list->device); in mlx5_ib_free_fast_reg_page_list()
1420 int size = page_list->max_page_list_len * sizeof(u64); in mlx5_ib_free_fast_reg_page_list()
1424 kfree(mfrpl->ibfrpl.page_list); in mlx5_ib_free_fast_reg_page_list()
Dmlx5_ib.h375 struct ib_fast_reg_page_list page_list; member
581 void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
584 int mlx5_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
Dqp.c2049 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); in set_frwr_pages()
2050 u64 *page_list = wr->wr.fast_reg.page_list->page_list; in set_frwr_pages() local
2055 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); in set_frwr_pages()
2470 wr->wr.fast_reg.page_list->max_page_list_len)) in set_frwr_li_wr()
/linux-4.1.27/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.h97 void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list);
Docrdma_verbs.c2082 buf_addr = wr->wr.fast_reg.page_list->page_list[i]; in build_frmr_pbes()
2139 (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK); in ocrdma_build_fr()
3024 frmr_list->page_list = (u64 *)(frmr_list + 1); in ocrdma_alloc_frmr_page_list()
3028 void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list) in ocrdma_free_frmr_page_list() argument
3030 kfree(page_list); in ocrdma_free_frmr_page_list()
/linux-4.1.27/arch/x86/include/asm/
Dkexec.h148 unsigned long page_list,
/linux-4.1.27/drivers/infiniband/hw/ipath/
Dipath_mr.c347 int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list, in ipath_map_phys_fmr() argument
371 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; in ipath_map_phys_fmr()
Dipath_verbs.h835 int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
/linux-4.1.27/drivers/infiniband/ulp/iser/
Diser_memory.c738 plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list, in iser_fast_reg_mr()
754 fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset; in iser_fast_reg_mr()
755 fastreg_wr.wr.fast_reg.page_list = frpl; in iser_fast_reg_mr()
778 reg->sge.addr = frpl->page_list[0] + offset; in iser_fast_reg_mr()
/linux-4.1.27/include/linux/sunrpc/
Dsvc_rdma.h109 struct ib_fast_reg_page_list *page_list; member
/linux-4.1.27/include/linux/mlx4/
Ddevice.h598 struct mlx4_buf_list *page_list; member
1020 return buf->page_list[offset >> PAGE_SHIFT].buf + in mlx4_buf_offset()
1048 int start_index, int npages, u64 *page_list);
1325 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
/linux-4.1.27/drivers/dma/
Dste_dma40.c3407 unsigned long *page_list; in d40_lcla_allocate() local
3416 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, in d40_lcla_allocate()
3419 if (!page_list) { in d40_lcla_allocate()
3428 page_list[i] = __get_free_pages(GFP_KERNEL, in d40_lcla_allocate()
3430 if (!page_list[i]) { in d40_lcla_allocate()
3437 free_pages(page_list[j], base->lcla_pool.pages); in d40_lcla_allocate()
3441 if ((virt_to_phys((void *)page_list[i]) & in d40_lcla_allocate()
3447 free_pages(page_list[j], base->lcla_pool.pages); in d40_lcla_allocate()
3450 base->lcla_pool.base = (void *)page_list[i]; in d40_lcla_allocate()
3484 kfree(page_list); in d40_lcla_allocate()
/linux-4.1.27/include/linux/mlx5/
Ddriver.h337 struct mlx5_buf_list *page_list; member
592 return buf->page_list[offset >> PAGE_SHIFT].buf + in mlx5_buf_offset()
/linux-4.1.27/fs/cifs/
Dfile.c3376 readpages_get_pages(struct address_space *mapping, struct list_head *page_list, in readpages_get_pages() argument
3386 page = list_entry(page_list->prev, struct page, lru); in readpages_get_pages()
3411 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) { in readpages_get_pages()
3435 struct list_head *page_list, unsigned num_pages) in cifs_readpages() argument
3451 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, in cifs_readpages()
3478 while (!list_empty(page_list)) { in cifs_readpages()
3501 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist, in cifs_readpages()
3559 cifs_fscache_readpages_cancel(mapping->host, page_list); in cifs_readpages()
/linux-4.1.27/fs/nfs/
Dwrite.c1629 void nfs_retry_commit(struct list_head *page_list, in nfs_retry_commit() argument
1636 while (!list_empty(page_list)) { in nfs_retry_commit()
1637 req = nfs_list_entry(page_list->next); in nfs_retry_commit()
Dinternal.h465 void nfs_retry_commit(struct list_head *page_list,
/linux-4.1.27/drivers/staging/comedi/drivers/
Dmite.c332 cpu_to_le32(async->buf_map->page_list[i].dma_addr); in mite_buf_change()
/linux-4.1.27/drivers/scsi/lpfc/
Dlpfc_sli4.h140 struct list_head page_list; member
Dlpfc_sli.c12757 while (!list_empty(&queue->page_list)) { in lpfc_sli4_queue_free()
12758 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, in lpfc_sli4_queue_free()
12798 INIT_LIST_HEAD(&queue->page_list); in lpfc_sli4_queue_alloc()
12812 list_add_tail(&dmabuf->list, &queue->page_list); in lpfc_sli4_queue_alloc()
13039 list_for_each_entry(dmabuf, &eq->page_list, list) { in lpfc_eq_create()
13159 list_for_each_entry(dmabuf, &cq->page_list, list) { in lpfc_cq_create()
13250 list_for_each_entry(dmabuf, &mq->page_list, list) { in lpfc_mq_create_fb_init()
13361 list_for_each_entry(dmabuf, &mq->page_list, list) { in lpfc_mq_create()
13537 list_for_each_entry(dmabuf, &wq->page_list, list) { in lpfc_wq_create()
13755 list_for_each_entry(dmabuf, &hrq->page_list, list) { in lpfc_rq_create()
[all …]
/linux-4.1.27/drivers/infiniband/ulp/isert/
Dib_isert.c2613 &frpl->page_list[0]); in isert_fast_reg_mr()
2624 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off; in isert_fast_reg_mr()
2625 fr_wr.wr.fast_reg.page_list = frpl; in isert_fast_reg_mr()
2645 sge->addr = frpl->page_list[0] + page_off; in isert_fast_reg_mr()
/linux-4.1.27/drivers/infiniband/hw/nes/
Dnes_verbs.c467 pifrpl->page_list = &pnesfrpl->pbl; in nes_alloc_fast_reg_page_list()
483 pnesfrpl->ibfrpl.page_list, pnesfrpl->nes_wqe_pbl.kva, in nes_alloc_fast_reg_page_list()
3414 container_of(ib_wr->wr.fast_reg.page_list, in nes_post_send()
3417 u64 *src_page_list = pnesfrpl->ibfrpl.page_list; in nes_post_send()
/linux-4.1.27/drivers/infiniband/ulp/srp/
Dib_srp.c1309 memcpy(desc->frpl->page_list, state->pages, in srp_map_finish_fr()
1316 wr.wr.fast_reg.page_list = desc->frpl; in srp_map_finish_fr()