Home
last modified time | relevance | path

Searched refs:page_list (Results 1 – 74 of 74) sorted by relevance

/linux-4.4.14/drivers/infiniband/hw/mthca/
Dmthca_allocator.c122 if (array->page_list[p].page) in mthca_array_get()
123 return array->page_list[p].page[index & MTHCA_ARRAY_MASK]; in mthca_array_get()
133 if (!array->page_list[p].page) in mthca_array_set()
134 array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC); in mthca_array_set()
136 if (!array->page_list[p].page) in mthca_array_set()
139 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value; in mthca_array_set()
140 ++array->page_list[p].used; in mthca_array_set()
149 if (--array->page_list[p].used == 0) { in mthca_array_clear()
150 free_page((unsigned long) array->page_list[p].page); in mthca_array_clear()
151 array->page_list[p].page = NULL; in mthca_array_clear()
[all …]
Dmthca_eq.c231 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; in get_eqe()
482 eq->page_list = kmalloc(npages * sizeof *eq->page_list, in mthca_create_eq()
484 if (!eq->page_list) in mthca_create_eq()
488 eq->page_list[i].buf = NULL; in mthca_create_eq()
500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, in mthca_create_eq()
502 if (!eq->page_list[i].buf) in mthca_create_eq()
506 dma_unmap_addr_set(&eq->page_list[i], mapping, t); in mthca_create_eq()
508 clear_page(eq->page_list[i].buf); in mthca_create_eq()
572 if (eq->page_list[i].buf) in mthca_create_eq()
574 eq->page_list[i].buf, in mthca_create_eq()
[all …]
Dmthca_provider.h54 struct mthca_buf_list *page_list; member
114 struct mthca_buf_list *page_list; member
Dmthca_mr.c689 static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, in mthca_check_fmr() argument
706 if (page_list[i] & ~page_mask) in mthca_check_fmr()
717 int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, in mthca_tavor_map_phys_fmr() argument
726 err = mthca_check_fmr(fmr, page_list, list_len, iova); in mthca_tavor_map_phys_fmr()
739 __be64 mtt_entry = cpu_to_be64(page_list[i] | in mthca_tavor_map_phys_fmr()
758 int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, in mthca_arbel_map_phys_fmr() argument
766 err = mthca_check_fmr(fmr, page_list, list_len, iova); in mthca_arbel_map_phys_fmr()
787 fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] | in mthca_arbel_map_phys_fmr()
Dmthca_provider.c908 u64 *page_list; in mthca_reg_phys_mr() local
946 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL); in mthca_reg_phys_mr()
947 if (!page_list) { in mthca_reg_phys_mr()
957 page_list[n++] = buffer_list[i].addr + ((u64) j << shift); in mthca_reg_phys_mr()
968 page_list, shift, npages, in mthca_reg_phys_mr()
973 kfree(page_list); in mthca_reg_phys_mr()
978 kfree(page_list); in mthca_reg_phys_mr()
Dmthca_dev.h191 } *page_list; member
483 int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
486 int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
Dmthca_srq.c77 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + in get_wqe()
Dmthca_cq.c165 return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf in get_cqe_from_buf()
Dmthca_qp.c212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + in get_recv_wqe()
222 return qp->queue.page_list[(qp->send_wqe_offset + in get_send_wqe()
/linux-4.4.14/mm/
Ddmapool.c46 struct list_head page_list; member
57 struct list_head page_list; member
89 list_for_each_entry(page, &pool->page_list, page_list) { in show_pools()
166 INIT_LIST_HEAD(&retval->page_list); in dma_pool_create()
258 list_del(&page->page_list); in pool_free_page()
287 while (!list_empty(&pool->page_list)) { in dma_pool_destroy()
289 page = list_entry(pool->page_list.next, in dma_pool_destroy()
290 struct dma_page, page_list); in dma_pool_destroy()
301 list_del(&page->page_list); in dma_pool_destroy()
332 list_for_each_entry(page, &pool->page_list, page_list) { in dma_pool_alloc()
[all …]
Dvmscan.c880 static unsigned long shrink_page_list(struct list_head *page_list, in shrink_page_list() argument
903 while (!list_empty(page_list)) { in shrink_page_list()
912 page = lru_to_page(page_list); in shrink_page_list()
1025 list_add_tail(&page->lru, page_list); in shrink_page_list()
1050 if (!add_to_swap(page, page_list)) in shrink_page_list()
1223 list_splice(&ret_pages, page_list); in shrink_page_list()
1235 struct list_head *page_list) in reclaim_clean_pages_from_list() argument
1246 list_for_each_entry_safe(page, next, page_list, lru) { in reclaim_clean_pages_from_list()
1257 list_splice(&clean_pages, page_list); in reclaim_clean_pages_from_list()
1486 putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) in putback_inactive_pages() argument
[all …]
Dinternal.h433 struct list_head *page_list);
Dmemcontrol.c5489 static void uncharge_list(struct list_head *page_list) in uncharge_list() argument
5499 next = page_list->next; in uncharge_list()
5541 } while (next != page_list); in uncharge_list()
5575 void mem_cgroup_uncharge_list(struct list_head *page_list) in mem_cgroup_uncharge_list() argument
5580 if (!list_empty(page_list)) in mem_cgroup_uncharge_list()
5581 uncharge_list(page_list); in mem_cgroup_uncharge_list()
/linux-4.4.14/drivers/infiniband/core/
Dfmr_pool.c115 u64 *page_list, in ib_fmr_cache_lookup() argument
125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); in ib_fmr_cache_lookup()
130 !memcmp(page_list, fmr->page_list, in ib_fmr_cache_lookup()
131 page_list_len * sizeof *page_list)) in ib_fmr_cache_lookup()
435 u64 *page_list, in ib_fmr_pool_map_phys() argument
449 page_list, in ib_fmr_pool_map_phys()
474 result = ib_map_phys_fmr(fmr->fmr, page_list, list_len, in ib_fmr_pool_map_phys()
493 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list)); in ib_fmr_pool_map_phys()
497 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0])); in ib_fmr_pool_map_phys()
Dumem_odp.c273 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) * in ib_umem_odp_get()
274 sizeof(*umem->odp_data->page_list)); in ib_umem_odp_get()
275 if (!umem->odp_data->page_list) { in ib_umem_odp_get()
342 vfree(umem->odp_data->page_list); in ib_umem_odp_get()
412 vfree(umem->odp_data->page_list); in ib_umem_odp_release()
468 umem->odp_data->page_list[page_index] = page; in ib_umem_odp_map_dma_single_page()
470 } else if (umem->odp_data->page_list[page_index] == page) { in ib_umem_odp_map_dma_single_page()
474 umem->odp_data->page_list[page_index], page); in ib_umem_odp_map_dma_single_page()
638 if (umem->odp_data->page_list[idx]) { in ib_umem_odp_unmap_dma_pages()
639 struct page *page = umem->odp_data->page_list[idx]; in ib_umem_odp_unmap_dma_pages()
[all …]
Dumem.c87 struct page **page_list; in ib_umem_get() local
150 page_list = (struct page **) __get_free_page(GFP_KERNEL); in ib_umem_get()
151 if (!page_list) { in ib_umem_get()
194 1, !umem->writable, page_list, vma_list); in ib_umem_get()
207 sg_set_page(sg, page_list[i], PAGE_SIZE, 0); in ib_umem_get()
239 free_page((unsigned long) page_list); in ib_umem_get()
/linux-4.4.14/drivers/infiniband/hw/usnic/
Dusnic_uiom.c52 ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
53 ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
54 (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
88 for_each_sg(chunk->page_list, sg, chunk->nents, i) { in usnic_uiom_put_pages()
103 struct page **page_list; in usnic_uiom_get_pages() local
125 page_list = (struct page **) __get_free_page(GFP_KERNEL); in usnic_uiom_get_pages()
126 if (!page_list) in usnic_uiom_get_pages()
150 1, !writable, page_list, NULL); in usnic_uiom_get_pages()
169 sg_init_table(chunk->page_list, chunk->nents); in usnic_uiom_get_pages()
170 for_each_sg(chunk->page_list, sg, chunk->nents, i) { in usnic_uiom_get_pages()
[all …]
Dusnic_uiom.h79 struct scatterlist page_list[0]; member
/linux-4.4.14/arch/powerpc/kernel/
Dmachine_kexec_32.c35 unsigned long page_list; in default_machine_kexec() local
46 page_list = image->head; in default_machine_kexec()
63 (*rnk)(page_list, reboot_code_buffer_phys, image->start); in default_machine_kexec()
/linux-4.4.14/include/linux/
Ddm-io.h23 struct page_list { struct
24 struct page_list *next; argument
43 struct page_list *pl; argument
Dmemcontrol.h302 void mem_cgroup_uncharge_list(struct list_head *page_list);
537 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) in mem_cgroup_uncharge_list() argument
/linux-4.4.14/arch/x86/kernel/
Dmachine_kexec_32.c184 unsigned long page_list[PAGES_NR]; in machine_kexec() local
222 page_list[PA_CONTROL_PAGE] = __pa(control_page); in machine_kexec()
223 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page; in machine_kexec()
224 page_list[PA_PGD] = __pa(image->arch.pgd); in machine_kexec()
227 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) in machine_kexec()
250 (unsigned long)page_list, in machine_kexec()
Dmachine_kexec_64.c257 unsigned long page_list[PAGES_NR]; in machine_kexec() local
288 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page); in machine_kexec()
289 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page; in machine_kexec()
290 page_list[PA_TABLE_PAGE] = in machine_kexec()
294 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) in machine_kexec()
317 (unsigned long)page_list, in machine_kexec()
/linux-4.4.14/drivers/misc/genwqe/
Dcard_utils.c250 struct page **page_list, int num_pages, in genwqe_map_pages() argument
261 daddr = pci_map_page(pci_dev, page_list[i], in genwqe_map_pages()
520 static int free_user_pages(struct page **page_list, unsigned int nr_pages, in free_user_pages() argument
526 if (page_list[i] != NULL) { in free_user_pages()
528 set_page_dirty_lock(page_list[i]); in free_user_pages()
529 put_page(page_list[i]); in free_user_pages()
577 m->page_list = kcalloc(m->nr_pages, in genwqe_user_vmap()
580 if (!m->page_list) { in genwqe_user_vmap()
587 m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages); in genwqe_user_vmap()
593 m->page_list); /* ptrs to pages */ in genwqe_user_vmap()
[all …]
Dcard_base.h179 struct page **page_list; /* list of pages used by user buff */ member
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/
Dmr.c695 int start_index, int npages, u64 *page_list) in mlx4_write_mtt_chunk() argument
712 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); in mlx4_write_mtt_chunk()
721 int start_index, int npages, u64 *page_list) in __mlx4_write_mtt() argument
736 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); in __mlx4_write_mtt()
741 page_list += chunk; in __mlx4_write_mtt()
749 int start_index, int npages, u64 *page_list) in mlx4_write_mtt() argument
772 inbox[i + 2] = cpu_to_be64(page_list[i] | in mlx4_write_mtt()
782 page_list += chunk; in mlx4_write_mtt()
788 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); in mlx4_write_mtt()
795 u64 *page_list; in mlx4_buf_write_mtt() local
[all …]
Dalloc.c615 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), in mlx4_buf_alloc()
617 if (!buf->page_list) in mlx4_buf_alloc()
621 buf->page_list[i].buf = in mlx4_buf_alloc()
625 if (!buf->page_list[i].buf) in mlx4_buf_alloc()
628 buf->page_list[i].map = t; in mlx4_buf_alloc()
630 memset(buf->page_list[i].buf, 0, PAGE_SIZE); in mlx4_buf_alloc()
639 pages[i] = virt_to_page(buf->page_list[i].buf); in mlx4_buf_alloc()
669 if (buf->page_list[i].buf) in mlx4_buf_free()
672 buf->page_list[i].buf, in mlx4_buf_free()
673 buf->page_list[i].map); in mlx4_buf_free()
[all …]
Deq.c118 …return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % … in get_eqe()
972 eq->page_list = kmalloc(npages * sizeof *eq->page_list, in mlx4_create_eq()
974 if (!eq->page_list) in mlx4_create_eq()
978 eq->page_list[i].buf = NULL; in mlx4_create_eq()
990 eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> in mlx4_create_eq()
994 if (!eq->page_list[i].buf) in mlx4_create_eq()
998 eq->page_list[i].map = t; in mlx4_create_eq()
1000 memset(eq->page_list[i].buf, 0, PAGE_SIZE); in mlx4_create_eq()
1058 if (eq->page_list[i].buf) in mlx4_create_eq()
1060 eq->page_list[i].buf, in mlx4_create_eq()
[all …]
Den_resources.c122 pages[i] = virt_to_page(buf->page_list[i].buf); in mlx4_en_map_buffer()
Dmlx4.h399 struct mlx4_buf_list *page_list; member
1013 int start_index, int npages, u64 *page_list);
Dresource_tracker.c3189 __be64 *page_list = inbox->buf; in mlx4_WRITE_MTT_wrapper() local
3190 u64 *pg_list = (u64 *)page_list; in mlx4_WRITE_MTT_wrapper()
3193 int start = be64_to_cpu(page_list[0]); in mlx4_WRITE_MTT_wrapper()
3209 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL); in mlx4_WRITE_MTT_wrapper()
3211 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, in mlx4_WRITE_MTT_wrapper()
3212 ((u64 *)page_list + 2)); in mlx4_WRITE_MTT_wrapper()
/linux-4.4.14/drivers/md/
Ddm-kcopyd.c41 struct page_list *pages;
72 static struct page_list zero_page_list;
193 static struct page_list *alloc_pl(gfp_t gfp) in alloc_pl()
195 struct page_list *pl; in alloc_pl()
210 static void free_pl(struct page_list *pl) in free_pl()
220 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) in kcopyd_put_pages()
222 struct page_list *next; in kcopyd_put_pages()
240 unsigned int nr, struct page_list **pages) in kcopyd_get_pages()
242 struct page_list *pl; in kcopyd_get_pages()
271 static void drop_pages(struct page_list *pl) in drop_pages()
[all …]
Ddm-io.c179 struct page_list *pl = (struct page_list *) dp->context_ptr; in list_get_page()
188 struct page_list *pl = (struct page_list *) dp->context_ptr; in list_next_page()
193 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) in list_dp_init()
/linux-4.4.14/drivers/gpu/drm/ttm/
Dttm_page_alloc_dma.c129 struct list_head page_list; member
392 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { in ttm_dma_pages_put()
393 list_del(&d_page->page_list); in ttm_dma_pages_put()
405 list_del(&d_page->page_list); in ttm_dma_page_put()
456 page_list) { in ttm_dma_page_pool_free()
461 list_move(&dma_p->page_list, &d_pages); in ttm_dma_page_pool_free()
696 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { in ttm_dma_handle_caching_state_failure()
700 list_del(&d_page->page_list); in ttm_dma_handle_caching_state_failure()
783 list_add(&dma_p->page_list, d_pages); in ttm_dma_pool_alloc_new_pages()
831 list_for_each_entry(d_page, &d_pages, page_list) { in ttm_dma_page_pool_fill_locked()
[all …]
/linux-4.4.14/include/rdma/
Dib_fmr_pool.h76 u64 page_list[0]; member
87 u64 *page_list,
Dib_umem_odp.h51 struct page **page_list; member
Dib_verbs.h1770 u64 *page_list, int list_len,
2941 u64 *page_list, int list_len, in ib_map_phys_fmr() argument
2944 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); in ib_map_phys_fmr()
/linux-4.4.14/arch/arm/kernel/
Dmachine_kexec.c146 unsigned long page_list; in machine_kexec() local
160 page_list = image->head & PAGE_MASK; in machine_kexec()
170 kexec_indirection_page = page_list; in machine_kexec()
/linux-4.4.14/drivers/infiniband/hw/cxgb3/
Diwch_mem.c140 __be64 **page_list) in build_phys_page_list() argument
184 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL); in build_phys_page_list()
185 if (!*page_list) in build_phys_page_list()
193 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr + in build_phys_page_list()
Diwch_provider.c488 __be64 *page_list; in iwch_register_phys_mem() local
520 &total_size, &npages, &shift, &page_list); in iwch_register_phys_mem()
526 kfree(page_list); in iwch_register_phys_mem()
530 ret = iwch_write_pbl(mhp, page_list, npages, 0); in iwch_register_phys_mem()
531 kfree(page_list); in iwch_register_phys_mem()
570 __be64 *page_list = NULL; in iwch_reregister_phys_mem() local
600 &shift, &page_list); in iwch_reregister_phys_mem()
606 kfree(page_list); in iwch_reregister_phys_mem()
Diwch_provider.h357 __be64 **page_list);
/linux-4.4.14/arch/sh/kernel/
Dmachine_kexec.c75 unsigned long page_list; in machine_kexec() local
105 page_list = image->head; in machine_kexec()
122 (*rnk)(page_list, reboot_code_buffer, in machine_kexec()
/linux-4.4.14/drivers/staging/comedi/
Dcomedi_buf.c38 if (bm->page_list) { in comedi_buf_map_kref_release()
40 buf = &bm->page_list[i]; in comedi_buf_map_kref_release()
54 vfree(bm->page_list); in comedi_buf_map_kref_release()
111 bm->page_list = vzalloc(sizeof(*buf) * n_pages); in __comedi_buf_alloc()
112 if (bm->page_list) in __comedi_buf_alloc()
119 buf = &bm->page_list[i]; in __comedi_buf_alloc()
Dcomedidev.h256 struct comedi_buf_page *page_list; member
Dcomedi_fops.c2237 struct comedi_buf_page *buf = &bm->page_list[i]; in comedi_mmap()
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
Dmem.c437 int *shift, __be64 **page_list) in build_phys_page_list() argument
481 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL); in build_phys_page_list()
482 if (!*page_list) in build_phys_page_list()
490 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr + in build_phys_page_list()
510 __be64 *page_list = NULL; in c4iw_reregister_phys_mem() local
543 &shift, &page_list); in c4iw_reregister_phys_mem()
549 kfree(page_list); in c4iw_reregister_phys_mem()
554 kfree(page_list); in c4iw_reregister_phys_mem()
576 __be64 *page_list; in c4iw_register_phys_mem() local
609 &page_list); in c4iw_register_phys_mem()
[all …]
/linux-4.4.14/Documentation/device-mapper/
Ddm-io.txt24 struct page_list {
25 struct page_list *next;
30 struct page_list *pl, unsigned int offset,
33 struct page_list *pl, unsigned int offset,
/linux-4.4.14/drivers/staging/rdma/amso1100/
Dc2_provider.c345 u64 *page_list; in c2_reg_phys_mr() local
378 page_list = vmalloc(sizeof(u64) * pbl_depth); in c2_reg_phys_mr()
379 if (!page_list) { in c2_reg_phys_mr()
392 page_list[j++] = (buffer_list[i].addr + in c2_reg_phys_mr()
398 vfree(page_list); in c2_reg_phys_mr()
408 (unsigned long long) page_list[0], in c2_reg_phys_mr()
409 (unsigned long long) page_list[pbl_depth-1]); in c2_reg_phys_mr()
410 err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list, in c2_reg_phys_mr()
414 vfree(page_list); in c2_reg_phys_mr()
Dc2.h241 } *page_list; member
/linux-4.4.14/drivers/infiniband/hw/qib/
Dqib_keys.c351 u64 *page_list; in qib_reg_mr() local
376 page_list = mr->pages; in qib_reg_mr()
380 mrg->map[m]->segs[n].vaddr = (void *) page_list[i]; in qib_reg_mr()
Dqib_mr.c443 int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, in qib_map_phys_fmr() argument
470 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; in qib_map_phys_fmr()
Dqib_verbs.h1058 int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
/linux-4.4.14/block/
Dblk-mq-tag.h44 struct list_head page_list; member
Dblk-mq.c1437 while (!list_empty(&tags->page_list)) { in blk_mq_free_rq_map()
1438 page = list_first_entry(&tags->page_list, struct page, lru); in blk_mq_free_rq_map()
1471 INIT_LIST_HEAD(&tags->page_list); in blk_mq_init_rq_map()
1514 list_add_tail(&page->lru, &tags->page_list); in blk_mq_init_rq_map()
/linux-4.4.14/drivers/staging/rdma/ehca/
Dehca_mrmw.h107 u64 *page_list,
Dehca_mrmw.c856 u64 *page_list, in ehca_map_phys_fmr() argument
874 ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len); in ehca_map_phys_fmr()
897 pginfo.u.fmr.page_list = page_list; in ehca_map_phys_fmr()
917 "iova=%llx", ret, fmr, page_list, list_len, iova); in ehca_map_phys_fmr()
1822 u64 *page_list, in ehca_fmr_check_page_list() argument
1836 page = page_list; in ehca_fmr_check_page_list()
2067 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem; in ehca_set_pagebuf_fmr()
Dehca_iverbs.h114 u64 *page_list, int list_len, u64 iova);
Dehca_classes.h330 u64 *page_list; member
/linux-4.4.14/fs/ceph/
Daddr.c311 static int start_read(struct inode *inode, struct list_head *page_list, int max) in start_read() argument
316 struct page *page = list_entry(page_list->prev, struct page, lru); in start_read()
331 list_for_each_entry_reverse(page, page_list, lru) { in start_read()
358 page = list_entry(page_list->prev, struct page, lru); in start_read()
402 struct list_head *page_list, unsigned nr_pages) in ceph_readpages() argument
412 rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list, in ceph_readpages()
425 while (!list_empty(page_list)) { in ceph_readpages()
426 rc = start_read(inode, page_list, max); in ceph_readpages()
432 ceph_fscache_readpages_cancel(inode, page_list); in ceph_readpages()
/linux-4.4.14/arch/x86/include/asm/
Dkexec.h148 unsigned long page_list,
/linux-4.4.14/drivers/staging/rdma/ipath/
Dipath_mr.c347 int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list, in ipath_map_phys_fmr() argument
371 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; in ipath_map_phys_fmr()
Dipath_verbs.h844 int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
/linux-4.4.14/drivers/staging/rdma/hfi1/
Dmr.c433 int hfi1_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, in hfi1_map_phys_fmr() argument
460 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; in hfi1_map_phys_fmr()
Dverbs.h1032 int hfi1_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
/linux-4.4.14/include/linux/mlx4/
Ddevice.h615 struct mlx4_buf_list *page_list; member
1053 return buf->page_list[offset >> PAGE_SHIFT].buf + in mlx4_buf_offset()
1081 int start_index, int npages, u64 *page_list);
1360 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
/linux-4.4.14/drivers/dma/
Dste_dma40.c3407 unsigned long *page_list; in d40_lcla_allocate() local
3416 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, in d40_lcla_allocate()
3419 if (!page_list) { in d40_lcla_allocate()
3428 page_list[i] = __get_free_pages(GFP_KERNEL, in d40_lcla_allocate()
3430 if (!page_list[i]) { in d40_lcla_allocate()
3437 free_pages(page_list[j], base->lcla_pool.pages); in d40_lcla_allocate()
3441 if ((virt_to_phys((void *)page_list[i]) & in d40_lcla_allocate()
3447 free_pages(page_list[j], base->lcla_pool.pages); in d40_lcla_allocate()
3450 base->lcla_pool.base = (void *)page_list[i]; in d40_lcla_allocate()
3484 kfree(page_list); in d40_lcla_allocate()
/linux-4.4.14/drivers/infiniband/hw/mlx4/
Dmr.c480 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, in mlx4_ib_map_phys_fmr() argument
486 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, in mlx4_ib_map_phys_fmr()
Dmlx4_ib.h769 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
/linux-4.4.14/fs/cifs/
Dfile.c3376 readpages_get_pages(struct address_space *mapping, struct list_head *page_list, in readpages_get_pages() argument
3387 page = list_entry(page_list->prev, struct page, lru); in readpages_get_pages()
3412 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) { in readpages_get_pages()
3435 struct list_head *page_list, unsigned num_pages) in cifs_readpages() argument
3451 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, in cifs_readpages()
3478 while (!list_empty(page_list)) { in cifs_readpages()
3501 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist, in cifs_readpages()
3559 cifs_fscache_readpages_cancel(mapping->host, page_list); in cifs_readpages()
/linux-4.4.14/fs/nfs/
Dwrite.c1651 void nfs_retry_commit(struct list_head *page_list, in nfs_retry_commit() argument
1658 while (!list_empty(page_list)) { in nfs_retry_commit()
1659 req = nfs_list_entry(page_list->next); in nfs_retry_commit()
Dinternal.h481 void nfs_retry_commit(struct list_head *page_list,
/linux-4.4.14/drivers/staging/comedi/drivers/
Dmite.c332 cpu_to_le32(async->buf_map->page_list[i].dma_addr); in mite_buf_change()
/linux-4.4.14/drivers/scsi/lpfc/
Dlpfc_sli4.h140 struct list_head page_list; member
Dlpfc_sli.c12737 while (!list_empty(&queue->page_list)) { in lpfc_sli4_queue_free()
12738 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, in lpfc_sli4_queue_free()
12778 INIT_LIST_HEAD(&queue->page_list); in lpfc_sli4_queue_alloc()
12792 list_add_tail(&dmabuf->list, &queue->page_list); in lpfc_sli4_queue_alloc()
13015 list_for_each_entry(dmabuf, &eq->page_list, list) { in lpfc_eq_create()
13135 list_for_each_entry(dmabuf, &cq->page_list, list) { in lpfc_cq_create()
13226 list_for_each_entry(dmabuf, &mq->page_list, list) { in lpfc_mq_create_fb_init()
13337 list_for_each_entry(dmabuf, &mq->page_list, list) { in lpfc_mq_create()
13513 list_for_each_entry(dmabuf, &wq->page_list, list) { in lpfc_wq_create()
13731 list_for_each_entry(dmabuf, &hrq->page_list, list) { in lpfc_rq_create()
[all …]