/linux-4.4.14/drivers/infiniband/core/ |
D | umem.c | 47 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument 53 if (umem->nmap > 0) in __ib_umem_release() 54 ib_dma_unmap_sg(dev, umem->sg_head.sgl, in __ib_umem_release() 55 umem->nmap, in __ib_umem_release() 58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { in __ib_umem_release() 61 if (umem->writable && dirty) in __ib_umem_release() 66 sg_free_table(&umem->sg_head); in __ib_umem_release() 86 struct ib_umem *umem; in ib_umem_get() local 116 umem = kzalloc(sizeof *umem, GFP_KERNEL); in ib_umem_get() 117 if (!umem) in ib_umem_get() [all …]
|
D | umem_odp.c | 242 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem) in ib_umem_odp_get() argument 261 umem->hugetlb = 0; in ib_umem_odp_get() 262 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL); in ib_umem_odp_get() 263 if (!umem->odp_data) { in ib_umem_odp_get() 267 umem->odp_data->umem = umem; in ib_umem_odp_get() 269 mutex_init(&umem->odp_data->umem_mutex); in ib_umem_odp_get() 271 init_completion(&umem->odp_data->notifier_completion); in ib_umem_odp_get() 273 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) * in ib_umem_odp_get() 274 sizeof(*umem->odp_data->page_list)); in ib_umem_odp_get() 275 if (!umem->odp_data->page_list) { in ib_umem_odp_get() [all …]
|
D | umem_rbtree.c | 53 return ib_umem_start(umem_odp->umem); in node_start() 66 return ib_umem_end(umem_odp->umem) - 1; in node_last() 82 struct ib_umem_odp *umem; in rbt_ib_umem_for_each_in_range() local 89 umem = container_of(node, struct ib_umem_odp, interval_tree); in rbt_ib_umem_for_each_in_range() 90 ret_val = cb(umem->umem, start, last, cookie) || ret_val; in rbt_ib_umem_for_each_in_range()
|
D | Makefile | 14 ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
|
/linux-4.4.14/include/rdma/ |
D | ib_umem.h | 61 static inline int ib_umem_offset(struct ib_umem *umem) in ib_umem_offset() argument 63 return umem->address & ((unsigned long)umem->page_size - 1); in ib_umem_offset() 67 static inline unsigned long ib_umem_start(struct ib_umem *umem) in ib_umem_start() argument 69 return umem->address - ib_umem_offset(umem); in ib_umem_start() 73 static inline unsigned long ib_umem_end(struct ib_umem *umem) in ib_umem_end() argument 75 return PAGE_ALIGN(umem->address + umem->length); in ib_umem_end() 78 static inline size_t ib_umem_num_pages(struct ib_umem *umem) in ib_umem_num_pages() argument 80 return (ib_umem_end(umem) - ib_umem_start(umem)) >> PAGE_SHIFT; in ib_umem_num_pages() 87 void ib_umem_release(struct ib_umem *umem); 88 int ib_umem_page_count(struct ib_umem *umem); [all …]
|
D | ib_umem_odp.h | 75 struct ib_umem *umem; member 86 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem); 88 void ib_umem_odp_release(struct ib_umem *umem); 103 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt, 106 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset, 151 struct ib_umem *umem) in ib_umem_odp_get() argument 156 static inline void ib_umem_odp_release(struct ib_umem *umem) {} in ib_umem_odp_release() argument
|
D | ib_verbs.h | 1255 void (*invalidate_range)(struct ib_umem *umem,
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
D | mem.c | 45 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, in mlx5_ib_cont_pages() argument 59 unsigned long page_shift = ilog2(umem->page_size); in mlx5_ib_cont_pages() 62 if (umem->odp_data) { in mlx5_ib_cont_pages() 63 *count = ib_umem_page_count(umem); in mlx5_ib_cont_pages() 78 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in mlx5_ib_cont_pages() 150 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, in __mlx5_ib_populate_pas() argument 154 unsigned long umem_page_shift = ilog2(umem->page_size); in __mlx5_ib_populate_pas() 164 const bool odp = umem->odp_data != NULL; in __mlx5_ib_populate_pas() 171 dma_addr_t pa = umem->odp_data->dma_list[offset + i]; in __mlx5_ib_populate_pas() 180 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in __mlx5_ib_populate_pas() [all …]
|
D | doorbell.c | 41 struct ib_umem *umem; member 66 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx5_ib_db_map_user() 68 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user() 69 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user() 77 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx5_ib_db_map_user() 93 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
|
D | odp.c | 46 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, in mlx5_ib_invalidate_range() argument 55 if (!umem || !umem->odp_data) { in mlx5_ib_invalidate_range() 60 mr = umem->odp_data->private; in mlx5_ib_invalidate_range() 65 start = max_t(u64, ib_umem_start(umem), start); in mlx5_ib_invalidate_range() 66 end = min_t(u64, ib_umem_end(umem), end); in mlx5_ib_invalidate_range() 75 for (addr = start; addr < end; addr += (u64)umem->page_size) { in mlx5_ib_invalidate_range() 76 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; in mlx5_ib_invalidate_range() 83 if (umem->odp_data->dma_list[idx] & in mlx5_ib_invalidate_range() 109 ib_umem_odp_unmap_dma_pages(umem, start, end); in mlx5_ib_invalidate_range() 204 if (!mr->umem->odp_data) { in pagefault_single_data_segment() [all …]
|
D | mr.c | 668 mr->umem = NULL; in mlx5_ib_get_dma_mr() 759 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, in reg_umr() argument 804 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT); in reg_umr() 863 struct ib_umem *umem = mr->umem; in mlx5_ib_update_mtt() local 919 ib_umem_num_pages(umem) - start_page_index); in mlx5_ib_update_mtt() 922 __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT, in mlx5_ib_update_mtt() 978 u64 length, struct ib_umem *umem, in reg_create() argument 999 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, in reg_create() 1022 mr->umem = umem; in reg_create() 1046 struct ib_umem *umem; in mlx5_ib_reg_user_mr() local [all …]
|
D | mlx5_ib.h | 189 struct ib_umem *umem; member 237 struct ib_umem *umem; member 268 struct ib_umem *umem; member 301 struct ib_umem *umem; member 328 struct ib_umem *umem; member 592 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, 594 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 597 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 603 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift); 621 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
|
D | srq.c | 105 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, in create_srq_user() 107 if (IS_ERR(srq->umem)) { in create_srq_user() 109 err = PTR_ERR(srq->umem); in create_srq_user() 113 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, in create_srq_user() 129 mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0); in create_srq_user() 147 ib_umem_release(srq->umem); in create_srq_user() 223 ib_umem_release(srq->umem); in destroy_srq_user() 402 ib_umem_release(msrq->umem); in mlx5_ib_destroy_srq()
|
D | cq.c | 640 cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, in create_cq_user() 643 if (IS_ERR(cq->buf.umem)) { in create_cq_user() 644 err = PTR_ERR(cq->buf.umem); in create_cq_user() 653 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, in create_cq_user() 664 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); in create_cq_user() 675 ib_umem_release(cq->buf.umem); in create_cq_user() 682 ib_umem_release(cq->buf.umem); in destroy_cq_user() 963 struct ib_umem *umem; in resize_user() local 966 struct ib_ucontext *context = cq->buf.umem->context; in resize_user() 975 umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, in resize_user() [all …]
|
D | qp.c | 124 struct ib_umem *umem = qp->umem; in mlx5_ib_read_user_wqe() local 141 if (offset > umem->length || in mlx5_ib_read_user_wqe() 142 (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length)) in mlx5_ib_read_user_wqe() 146 ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length); in mlx5_ib_read_user_wqe() 162 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset, in mlx5_ib_read_user_wqe() 646 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in create_user_qp() 648 if (IS_ERR(qp->umem)) { in create_user_qp() 650 err = PTR_ERR(qp->umem); in create_user_qp() 654 qp->umem = NULL; in create_user_qp() 657 if (qp->umem) { in create_user_qp() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
D | mr.c | 76 mr->umem = NULL; in mlx4_ib_get_dma_mr() 90 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument 105 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in mlx4_ib_umem_write_mtt() 109 umem->page_size * k; in mlx4_ib_umem_write_mtt() 149 mr->umem = ib_umem_get(pd->uobject->context, start, length, in mlx4_ib_reg_user_mr() 151 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr() 152 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr() 156 n = ib_umem_page_count(mr->umem); in mlx4_ib_reg_user_mr() 157 shift = ilog2(mr->umem->page_size); in mlx4_ib_reg_user_mr() 164 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); in mlx4_ib_reg_user_mr() [all …]
|
D | doorbell.c | 39 struct ib_umem *umem; member 64 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx4_ib_db_map_user() 66 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user() 67 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user() 75 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx4_ib_db_map_user() 91 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
|
D | srq.c | 117 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in mlx4_ib_create_srq() 119 if (IS_ERR(srq->umem)) { in mlx4_ib_create_srq() 120 err = PTR_ERR(srq->umem); in mlx4_ib_create_srq() 124 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem), in mlx4_ib_create_srq() 125 ilog2(srq->umem->page_size), &srq->mtt); in mlx4_ib_create_srq() 129 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); in mlx4_ib_create_srq() 219 ib_umem_release(srq->umem); in mlx4_ib_create_srq() 287 ib_umem_release(msrq->umem); in mlx4_ib_destroy_srq()
|
D | cq.c | 138 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, in mlx4_ib_get_cq_umem() argument 144 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem() 146 if (IS_ERR(*umem)) in mlx4_ib_get_cq_umem() 147 return PTR_ERR(*umem); in mlx4_ib_get_cq_umem() 149 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem), in mlx4_ib_get_cq_umem() 150 ilog2((*umem)->page_size), &buf->mtt); in mlx4_ib_get_cq_umem() 154 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); in mlx4_ib_get_cq_umem() 164 ib_umem_release(*umem); in mlx4_ib_get_cq_umem() 210 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem, in mlx4_ib_create_cq() 269 ib_umem_release(cq->umem); in mlx4_ib_create_cq() [all …]
|
D | mlx4_ib.h | 124 struct ib_umem *umem; member 141 struct ib_umem *umem; member 302 struct ib_umem *umem; member 337 struct ib_umem *umem; member 701 struct ib_umem *umem);
|
D | qp.c | 737 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in create_qp_common() 739 if (IS_ERR(qp->umem)) { in create_qp_common() 740 err = PTR_ERR(qp->umem); in create_qp_common() 744 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), in create_qp_common() 745 ilog2(qp->umem->page_size), &qp->mtt); in create_qp_common() 749 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_qp_common() 905 ib_umem_release(qp->umem); in create_qp_common() 1070 ib_umem_release(qp->umem); in destroy_qp_common()
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_mr.c | 153 mr->umem = NULL; in ipath_reg_phys_mr() 190 struct ib_umem *umem; in ipath_reg_user_mr() local 200 umem = ib_umem_get(pd->uobject->context, start, length, in ipath_reg_user_mr() 202 if (IS_ERR(umem)) in ipath_reg_user_mr() 203 return (void *) umem; in ipath_reg_user_mr() 205 n = umem->nmap; in ipath_reg_user_mr() 209 ib_umem_release(umem); in ipath_reg_user_mr() 217 mr->mr.offset = ib_umem_offset(umem); in ipath_reg_user_mr() 220 mr->umem = umem; in ipath_reg_user_mr() 224 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in ipath_reg_user_mr() [all …]
|
D | ipath_verbs.h | 270 struct ib_umem *umem; member
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | mr.c | 251 struct ib_umem *umem; in hfi1_reg_user_mr() local 261 umem = ib_umem_get(pd->uobject->context, start, length, in hfi1_reg_user_mr() 263 if (IS_ERR(umem)) in hfi1_reg_user_mr() 264 return (void *) umem; in hfi1_reg_user_mr() 266 n = umem->nmap; in hfi1_reg_user_mr() 271 ib_umem_release(umem); in hfi1_reg_user_mr() 278 mr->mr.offset = ib_umem_offset(umem); in hfi1_reg_user_mr() 280 mr->umem = umem; in hfi1_reg_user_mr() 282 if (is_power_of_2(umem->page_size)) in hfi1_reg_user_mr() 283 mr->mr.page_shift = ilog2(umem->page_size); in hfi1_reg_user_mr() [all …]
|
D | verbs.h | 341 struct ib_umem *umem; member
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_mr.c | 234 struct ib_umem *umem; in qib_reg_user_mr() local 244 umem = ib_umem_get(pd->uobject->context, start, length, in qib_reg_user_mr() 246 if (IS_ERR(umem)) in qib_reg_user_mr() 247 return (void *) umem; in qib_reg_user_mr() 249 n = umem->nmap; in qib_reg_user_mr() 254 ib_umem_release(umem); in qib_reg_user_mr() 261 mr->mr.offset = ib_umem_offset(umem); in qib_reg_user_mr() 263 mr->umem = umem; in qib_reg_user_mr() 265 if (is_power_of_2(umem->page_size)) in qib_reg_user_mr() 266 mr->mr.page_shift = ilog2(umem->page_size); in qib_reg_user_mr() [all …]
|
D | qib_verbs.h | 331 struct ib_umem *umem; member
|
/linux-4.4.14/drivers/staging/rdma/amso1100/ |
D | c2_provider.c | 403 mr->umem = NULL; in c2_reg_phys_mr() 455 c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in c2_reg_user_mr() 456 if (IS_ERR(c2mr->umem)) { in c2_reg_user_mr() 457 err = PTR_ERR(c2mr->umem); in c2_reg_user_mr() 462 shift = ffs(c2mr->umem->page_size) - 1; in c2_reg_user_mr() 463 n = c2mr->umem->nmap; in c2_reg_user_mr() 472 for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) { in c2_reg_user_mr() 477 (c2mr->umem->page_size * k); in c2_reg_user_mr() 484 c2mr->umem->page_size, in c2_reg_user_mr() 487 ib_umem_offset(c2mr->umem), in c2_reg_user_mr() [all …]
|
D | c2_provider.h | 76 struct ib_umem *umem; member
|
/linux-4.4.14/drivers/infiniband/hw/usnic/ |
D | usnic_uiom.c | 58 struct usnic_uiom_reg *umem = container_of(work, in usnic_uiom_reg_account() local 61 down_write(&umem->mm->mmap_sem); in usnic_uiom_reg_account() 62 umem->mm->locked_vm -= umem->diff; in usnic_uiom_reg_account() 63 up_write(&umem->mm->mmap_sem); in usnic_uiom_reg_account() 64 mmput(umem->mm); in usnic_uiom_reg_account() 65 kfree(umem); in usnic_uiom_reg_account()
|
D | usnic_ib_verbs.c | 631 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length, in usnic_ib_reg_mr() 633 if (IS_ERR_OR_NULL(mr->umem)) { in usnic_ib_reg_mr() 634 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT; in usnic_ib_reg_mr() 650 usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length); in usnic_ib_dereg_mr() 652 usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing); in usnic_ib_dereg_mr()
|
D | usnic_ib.h | 66 struct usnic_uiom_reg *umem; member
|
/linux-4.4.14/drivers/infiniband/hw/cxgb4/ |
D | mem.c | 732 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in c4iw_reg_user_mr() 733 if (IS_ERR(mhp->umem)) { in c4iw_reg_user_mr() 734 err = PTR_ERR(mhp->umem); in c4iw_reg_user_mr() 739 shift = ffs(mhp->umem->page_size) - 1; in c4iw_reg_user_mr() 741 n = mhp->umem->nmap; in c4iw_reg_user_mr() 754 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { in c4iw_reg_user_mr() 758 mhp->umem->page_size * k); in c4iw_reg_user_mr() 798 ib_umem_release(mhp->umem); in c4iw_reg_user_mr() 973 if (mhp->umem) in c4iw_dereg_mr() 974 ib_umem_release(mhp->umem); in c4iw_dereg_mr()
|
D | iw_cxgb4.h | 385 struct ib_umem *umem; member
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
D | mthca_provider.c | 896 mr->umem = NULL; in mthca_get_dma_mr() 979 mr->umem = NULL; in mthca_reg_phys_mr() 1012 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, in mthca_reg_user_mr() 1015 if (IS_ERR(mr->umem)) { in mthca_reg_user_mr() 1016 err = PTR_ERR(mr->umem); in mthca_reg_user_mr() 1020 shift = ffs(mr->umem->page_size) - 1; in mthca_reg_user_mr() 1021 n = mr->umem->nmap; in mthca_reg_user_mr() 1039 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) { in mthca_reg_user_mr() 1043 mr->umem->page_size * k; in mthca_reg_user_mr() 1077 ib_umem_release(mr->umem); in mthca_reg_user_mr() [all …]
|
D | mthca_provider.h | 75 struct ib_umem *umem; member
|
/linux-4.4.14/Documentation/devicetree/bindings/remoteproc/ |
D | wkup_m3_rproc.txt | 26 regions. These should be named "umem" & "dmem". 45 reg-names = "umem", "dmem";
|
/linux-4.4.14/drivers/infiniband/hw/cxgb3/ |
D | iwch_provider.c | 475 if (mhp->umem) in iwch_dereg_mr() 476 ib_umem_release(mhp->umem); in iwch_dereg_mr() 648 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in iwch_reg_user_mr() 649 if (IS_ERR(mhp->umem)) { in iwch_reg_user_mr() 650 err = PTR_ERR(mhp->umem); in iwch_reg_user_mr() 655 shift = ffs(mhp->umem->page_size) - 1; in iwch_reg_user_mr() 657 n = mhp->umem->nmap; in iwch_reg_user_mr() 671 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { in iwch_reg_user_mr() 675 mhp->umem->page_size * k); in iwch_reg_user_mr() 724 ib_umem_release(mhp->umem); in iwch_reg_user_mr()
|
D | iwch_provider.h | 76 struct ib_umem *umem; member
|
/linux-4.4.14/drivers/block/ |
D | Makefile | 28 obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
|
D | Kconfig | 167 <http://www.umem.com/> 173 module will be called umem. 175 The umem driver has not yet been allocated a MAJOR number, so
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
D | ehca_mrmw.c | 361 e_mr->umem = ib_umem_get(pd->uobject->context, start, length, in ehca_reg_user_mr() 363 if (IS_ERR(e_mr->umem)) { in ehca_reg_user_mr() 364 ib_mr = (void *)e_mr->umem; in ehca_reg_user_mr() 368 if (e_mr->umem->page_size != PAGE_SIZE) { in ehca_reg_user_mr() 370 "e_mr->umem->page_size=%x", e_mr->umem->page_size); in ehca_reg_user_mr() 379 if (e_mr->umem->hugetlb) { in ehca_reg_user_mr() 401 pginfo.u.usr.region = e_mr->umem; in ehca_reg_user_mr() 402 pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size; in ehca_reg_user_mr() 428 ib_umem_release(e_mr->umem); in ehca_reg_user_mr() 674 if (e_mr->umem) in ehca_dereg_mr() [all …]
|
D | ehca_classes.h | 271 struct ib_umem *umem; member
|
/linux-4.4.14/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 907 struct ib_umem *umem = mr->umem; in build_user_pbes() local 916 shift = ilog2(umem->page_size); in build_user_pbes() 918 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in build_user_pbes() 925 (umem->page_size * pg_cnt)); in build_user_pbes() 930 umem->page_size * pg_cnt))); in build_user_pbes() 970 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); in ocrdma_reg_user_mr() 971 if (IS_ERR(mr->umem)) { in ocrdma_reg_user_mr() 975 num_pbes = ib_umem_page_count(mr->umem); in ocrdma_reg_user_mr() 980 mr->hwmr.pbe_size = mr->umem->page_size; in ocrdma_reg_user_mr() 981 mr->hwmr.fbo = ib_umem_offset(mr->umem); in ocrdma_reg_user_mr() [all …]
|
D | ocrdma.h | 194 struct ib_umem *umem; member
|
/linux-4.4.14/arch/arm/boot/dts/ |
D | am33xx.dtsi | 110 reg-names = "umem", "dmem";
|
D | am4372.dtsi | 116 reg-names = "umem", "dmem";
|
/linux-4.4.14/Documentation/ |
D | devices.txt | 2005 0 = /dev/umem/d0 Whole of first board 2006 1 = /dev/umem/d0p1 First partition of first board 2007 2 = /dev/umem/d0p2 Second partition of first board 2008 15 = /dev/umem/d0p15 15th partition of first board 2010 16 = /dev/umem/d1 Whole of second board 2011 17 = /dev/umem/d1p1 First partition of second board 2013 255= /dev/umem/d15p15 15th partition of 16th board.
|