Home
last modified time | relevance | path

Searched refs:umem (Results 1 – 43 of 43) sorted by relevance

/linux-4.1.27/drivers/infiniband/core/
Dumem.c47 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument
53 if (umem->nmap > 0) in __ib_umem_release()
54 ib_dma_unmap_sg(dev, umem->sg_head.sgl, in __ib_umem_release()
55 umem->nmap, in __ib_umem_release()
58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { in __ib_umem_release()
61 if (umem->writable && dirty) in __ib_umem_release()
66 sg_free_table(&umem->sg_head); in __ib_umem_release()
86 struct ib_umem *umem; in ib_umem_get() local
116 umem = kzalloc(sizeof *umem, GFP_KERNEL); in ib_umem_get()
117 if (!umem) in ib_umem_get()
[all …]
Dumem_odp.c242 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem) in ib_umem_odp_get() argument
261 umem->hugetlb = 0; in ib_umem_odp_get()
262 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL); in ib_umem_odp_get()
263 if (!umem->odp_data) { in ib_umem_odp_get()
267 umem->odp_data->umem = umem; in ib_umem_odp_get()
269 mutex_init(&umem->odp_data->umem_mutex); in ib_umem_odp_get()
271 init_completion(&umem->odp_data->notifier_completion); in ib_umem_odp_get()
273 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) * in ib_umem_odp_get()
274 sizeof(*umem->odp_data->page_list)); in ib_umem_odp_get()
275 if (!umem->odp_data->page_list) { in ib_umem_odp_get()
[all …]
Dumem_rbtree.c53 return ib_umem_start(umem_odp->umem); in node_start()
66 return ib_umem_end(umem_odp->umem) - 1; in node_last()
82 struct ib_umem_odp *umem; in rbt_ib_umem_for_each_in_range() local
89 umem = container_of(node, struct ib_umem_odp, interval_tree); in rbt_ib_umem_for_each_in_range()
90 ret_val = cb(umem->umem, start, last, cookie) || ret_val; in rbt_ib_umem_for_each_in_range()
DMakefile13 ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
/linux-4.1.27/include/rdma/
Dib_umem.h61 static inline int ib_umem_offset(struct ib_umem *umem) in ib_umem_offset() argument
63 return umem->address & ((unsigned long)umem->page_size - 1); in ib_umem_offset()
67 static inline unsigned long ib_umem_start(struct ib_umem *umem) in ib_umem_start() argument
69 return umem->address - ib_umem_offset(umem); in ib_umem_start()
73 static inline unsigned long ib_umem_end(struct ib_umem *umem) in ib_umem_end() argument
75 return PAGE_ALIGN(umem->address + umem->length); in ib_umem_end()
78 static inline size_t ib_umem_num_pages(struct ib_umem *umem) in ib_umem_num_pages() argument
80 return (ib_umem_end(umem) - ib_umem_start(umem)) >> PAGE_SHIFT; in ib_umem_num_pages()
87 void ib_umem_release(struct ib_umem *umem);
88 int ib_umem_page_count(struct ib_umem *umem);
[all …]
Dib_umem_odp.h75 struct ib_umem *umem; member
86 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem);
88 void ib_umem_odp_release(struct ib_umem *umem);
103 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt,
106 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset,
151 struct ib_umem *umem) in ib_umem_odp_get() argument
156 static inline void ib_umem_odp_release(struct ib_umem *umem) {} in ib_umem_odp_release() argument
Dib_verbs.h1166 void (*invalidate_range)(struct ib_umem *umem,
/linux-4.1.27/drivers/infiniband/hw/mlx5/
Dmem.c45 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, in mlx5_ib_cont_pages() argument
59 unsigned long page_shift = ilog2(umem->page_size); in mlx5_ib_cont_pages()
62 if (umem->odp_data) { in mlx5_ib_cont_pages()
63 *count = ib_umem_page_count(umem); in mlx5_ib_cont_pages()
78 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in mlx5_ib_cont_pages()
150 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, in __mlx5_ib_populate_pas() argument
154 unsigned long umem_page_shift = ilog2(umem->page_size); in __mlx5_ib_populate_pas()
164 const bool odp = umem->odp_data != NULL; in __mlx5_ib_populate_pas()
171 dma_addr_t pa = umem->odp_data->dma_list[offset + i]; in __mlx5_ib_populate_pas()
180 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in __mlx5_ib_populate_pas()
[all …]
Ddoorbell.c41 struct ib_umem *umem; member
66 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx5_ib_db_map_user()
68 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user()
69 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user()
77 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx5_ib_db_map_user()
93 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
Dodp.c46 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, in mlx5_ib_invalidate_range() argument
55 if (!umem || !umem->odp_data) { in mlx5_ib_invalidate_range()
60 mr = umem->odp_data->private; in mlx5_ib_invalidate_range()
65 start = max_t(u64, ib_umem_start(umem), start); in mlx5_ib_invalidate_range()
66 end = min_t(u64, ib_umem_end(umem), end); in mlx5_ib_invalidate_range()
75 for (addr = start; addr < end; addr += (u64)umem->page_size) { in mlx5_ib_invalidate_range()
76 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; in mlx5_ib_invalidate_range()
83 if (umem->odp_data->dma_list[idx] & in mlx5_ib_invalidate_range()
109 ib_umem_odp_unmap_dma_pages(umem, start, end); in mlx5_ib_invalidate_range()
211 if (!mr->umem->odp_data) { in pagefault_single_data_segment()
[all …]
Dmr.c659 mr->umem = NULL; in mlx5_ib_get_dma_mr()
751 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, in reg_umr() argument
795 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT); in reg_umr()
854 struct ib_umem *umem = mr->umem; in mlx5_ib_update_mtt() local
910 ib_umem_num_pages(umem) - start_page_index); in mlx5_ib_update_mtt()
913 __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT, in mlx5_ib_update_mtt()
969 u64 length, struct ib_umem *umem, in reg_create() argument
991 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, in reg_create()
1014 mr->umem = umem; in reg_create()
1038 struct ib_umem *umem; in mlx5_ib_reg_user_mr() local
[all …]
Dmlx5_ib.h190 struct ib_umem *umem; member
239 struct ib_umem *umem; member
264 struct ib_umem *umem; member
297 struct ib_umem *umem; member
319 struct ib_umem *umem; member
601 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
603 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
606 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
612 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
630 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
Dsrq.c105 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, in create_srq_user()
107 if (IS_ERR(srq->umem)) { in create_srq_user()
109 err = PTR_ERR(srq->umem); in create_srq_user()
113 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, in create_srq_user()
129 mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0); in create_srq_user()
147 ib_umem_release(srq->umem); in create_srq_user()
223 ib_umem_release(srq->umem); in destroy_srq_user()
403 ib_umem_release(msrq->umem); in mlx5_ib_destroy_srq()
Dcq.c633 cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, in create_cq_user()
636 if (IS_ERR(cq->buf.umem)) { in create_cq_user()
637 err = PTR_ERR(cq->buf.umem); in create_cq_user()
646 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, in create_cq_user()
657 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); in create_cq_user()
668 ib_umem_release(cq->buf.umem); in create_cq_user()
675 ib_umem_release(cq->buf.umem); in destroy_cq_user()
950 struct ib_umem *umem; in resize_user() local
953 struct ib_ucontext *context = cq->buf.umem->context; in resize_user()
962 umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, in resize_user()
[all …]
Dqp.c129 struct ib_umem *umem = qp->umem; in mlx5_ib_read_user_wqe() local
146 if (offset > umem->length || in mlx5_ib_read_user_wqe()
147 (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length)) in mlx5_ib_read_user_wqe()
151 ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length); in mlx5_ib_read_user_wqe()
167 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset, in mlx5_ib_read_user_wqe()
654 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in create_user_qp()
656 if (IS_ERR(qp->umem)) { in create_user_qp()
658 err = PTR_ERR(qp->umem); in create_user_qp()
662 qp->umem = NULL; in create_user_qp()
665 if (qp->umem) { in create_user_qp()
[all …]
/linux-4.1.27/drivers/infiniband/hw/mlx4/
Dmr.c76 mr->umem = NULL; in mlx4_ib_get_dma_mr()
90 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument
105 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in mlx4_ib_umem_write_mtt()
109 umem->page_size * k; in mlx4_ib_umem_write_mtt()
149 mr->umem = ib_umem_get(pd->uobject->context, start, length, in mlx4_ib_reg_user_mr()
151 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr()
152 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr()
156 n = ib_umem_page_count(mr->umem); in mlx4_ib_reg_user_mr()
157 shift = ilog2(mr->umem->page_size); in mlx4_ib_reg_user_mr()
164 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); in mlx4_ib_reg_user_mr()
[all …]
Ddoorbell.c39 struct ib_umem *umem; member
64 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx4_ib_db_map_user()
66 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user()
67 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user()
75 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx4_ib_db_map_user()
91 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
Dsrq.c116 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in mlx4_ib_create_srq()
118 if (IS_ERR(srq->umem)) { in mlx4_ib_create_srq()
119 err = PTR_ERR(srq->umem); in mlx4_ib_create_srq()
123 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem), in mlx4_ib_create_srq()
124 ilog2(srq->umem->page_size), &srq->mtt); in mlx4_ib_create_srq()
128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); in mlx4_ib_create_srq()
214 ib_umem_release(srq->umem); in mlx4_ib_create_srq()
282 ib_umem_release(msrq->umem); in mlx4_ib_destroy_srq()
Dcq.c138 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, in mlx4_ib_get_cq_umem() argument
144 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem()
146 if (IS_ERR(*umem)) in mlx4_ib_get_cq_umem()
147 return PTR_ERR(*umem); in mlx4_ib_get_cq_umem()
149 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem), in mlx4_ib_get_cq_umem()
150 ilog2((*umem)->page_size), &buf->mtt); in mlx4_ib_get_cq_umem()
154 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); in mlx4_ib_get_cq_umem()
164 ib_umem_release(*umem); in mlx4_ib_get_cq_umem()
202 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem, in mlx4_ib_create_cq()
260 ib_umem_release(cq->umem); in mlx4_ib_create_cq()
[all …]
Dmlx4_ib.h111 struct ib_umem *umem; member
121 struct ib_umem *umem; member
287 struct ib_umem *umem; member
321 struct ib_umem *umem; member
654 struct ib_umem *umem);
Dqp.c724 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in create_qp_common()
726 if (IS_ERR(qp->umem)) { in create_qp_common()
727 err = PTR_ERR(qp->umem); in create_qp_common()
731 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), in create_qp_common()
732 ilog2(qp->umem->page_size), &qp->mtt); in create_qp_common()
736 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_qp_common()
886 ib_umem_release(qp->umem); in create_qp_common()
1051 ib_umem_release(qp->umem); in destroy_qp_common()
/linux-4.1.27/drivers/infiniband/hw/ipath/
Dipath_mr.c153 mr->umem = NULL; in ipath_reg_phys_mr()
190 struct ib_umem *umem; in ipath_reg_user_mr() local
200 umem = ib_umem_get(pd->uobject->context, start, length, in ipath_reg_user_mr()
202 if (IS_ERR(umem)) in ipath_reg_user_mr()
203 return (void *) umem; in ipath_reg_user_mr()
205 n = umem->nmap; in ipath_reg_user_mr()
209 ib_umem_release(umem); in ipath_reg_user_mr()
217 mr->mr.offset = ib_umem_offset(umem); in ipath_reg_user_mr()
220 mr->umem = umem; in ipath_reg_user_mr()
224 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in ipath_reg_user_mr()
[all …]
Dipath_verbs.h270 struct ib_umem *umem; member
/linux-4.1.27/drivers/infiniband/hw/qib/
Dqib_mr.c234 struct ib_umem *umem; in qib_reg_user_mr() local
244 umem = ib_umem_get(pd->uobject->context, start, length, in qib_reg_user_mr()
246 if (IS_ERR(umem)) in qib_reg_user_mr()
247 return (void *) umem; in qib_reg_user_mr()
249 n = umem->nmap; in qib_reg_user_mr()
254 ib_umem_release(umem); in qib_reg_user_mr()
261 mr->mr.offset = ib_umem_offset(umem); in qib_reg_user_mr()
263 mr->umem = umem; in qib_reg_user_mr()
265 if (is_power_of_2(umem->page_size)) in qib_reg_user_mr()
266 mr->mr.page_shift = ilog2(umem->page_size); in qib_reg_user_mr()
[all …]
Dqib_verbs.h331 struct ib_umem *umem; member
/linux-4.1.27/drivers/infiniband/hw/amso1100/
Dc2_provider.c395 mr->umem = NULL; in c2_reg_phys_mr()
447 c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in c2_reg_user_mr()
448 if (IS_ERR(c2mr->umem)) { in c2_reg_user_mr()
449 err = PTR_ERR(c2mr->umem); in c2_reg_user_mr()
454 shift = ffs(c2mr->umem->page_size) - 1; in c2_reg_user_mr()
455 n = c2mr->umem->nmap; in c2_reg_user_mr()
464 for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) { in c2_reg_user_mr()
469 (c2mr->umem->page_size * k); in c2_reg_user_mr()
476 c2mr->umem->page_size, in c2_reg_user_mr()
479 ib_umem_offset(c2mr->umem), in c2_reg_user_mr()
[all …]
Dc2_provider.h76 struct ib_umem *umem; member
/linux-4.1.27/drivers/infiniband/hw/usnic/
Dusnic_uiom.c58 struct usnic_uiom_reg *umem = container_of(work, in usnic_uiom_reg_account() local
61 down_write(&umem->mm->mmap_sem); in usnic_uiom_reg_account()
62 umem->mm->locked_vm -= umem->diff; in usnic_uiom_reg_account()
63 up_write(&umem->mm->mmap_sem); in usnic_uiom_reg_account()
64 mmput(umem->mm); in usnic_uiom_reg_account()
65 kfree(umem); in usnic_uiom_reg_account()
Dusnic_ib_verbs.c608 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length, in usnic_ib_reg_mr()
610 if (IS_ERR_OR_NULL(mr->umem)) { in usnic_ib_reg_mr()
611 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT; in usnic_ib_reg_mr()
627 usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length); in usnic_ib_dereg_mr()
629 usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing); in usnic_ib_dereg_mr()
Dusnic_ib.h51 struct usnic_uiom_reg *umem; member
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
Dmem.c732 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in c4iw_reg_user_mr()
733 if (IS_ERR(mhp->umem)) { in c4iw_reg_user_mr()
734 err = PTR_ERR(mhp->umem); in c4iw_reg_user_mr()
739 shift = ffs(mhp->umem->page_size) - 1; in c4iw_reg_user_mr()
741 n = mhp->umem->nmap; in c4iw_reg_user_mr()
754 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { in c4iw_reg_user_mr()
758 mhp->umem->page_size * k); in c4iw_reg_user_mr()
798 ib_umem_release(mhp->umem); in c4iw_reg_user_mr()
974 if (mhp->umem) in c4iw_dereg_mr()
975 ib_umem_release(mhp->umem); in c4iw_dereg_mr()
Diw_cxgb4.h387 struct ib_umem *umem; member
/linux-4.1.27/drivers/infiniband/hw/mthca/
Dmthca_provider.c888 mr->umem = NULL; in mthca_get_dma_mr()
971 mr->umem = NULL; in mthca_reg_phys_mr()
1004 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, in mthca_reg_user_mr()
1007 if (IS_ERR(mr->umem)) { in mthca_reg_user_mr()
1008 err = PTR_ERR(mr->umem); in mthca_reg_user_mr()
1012 shift = ffs(mr->umem->page_size) - 1; in mthca_reg_user_mr()
1013 n = mr->umem->nmap; in mthca_reg_user_mr()
1031 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) { in mthca_reg_user_mr()
1035 mr->umem->page_size * k; in mthca_reg_user_mr()
1069 ib_umem_release(mr->umem); in mthca_reg_user_mr()
[all …]
Dmthca_provider.h75 struct ib_umem *umem; member
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
Diwch_provider.c465 if (mhp->umem) in iwch_dereg_mr()
466 ib_umem_release(mhp->umem); in iwch_dereg_mr()
638 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in iwch_reg_user_mr()
639 if (IS_ERR(mhp->umem)) { in iwch_reg_user_mr()
640 err = PTR_ERR(mhp->umem); in iwch_reg_user_mr()
645 shift = ffs(mhp->umem->page_size) - 1; in iwch_reg_user_mr()
647 n = mhp->umem->nmap; in iwch_reg_user_mr()
661 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { in iwch_reg_user_mr()
665 mhp->umem->page_size * k); in iwch_reg_user_mr()
714 ib_umem_release(mhp->umem); in iwch_reg_user_mr()
Diwch_provider.h76 struct ib_umem *umem; member
/linux-4.1.27/drivers/block/
DMakefile30 obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
DKconfig167 <http://www.umem.com/>
173 module will be called umem.
175 The umem driver has not yet been allocated a MAJOR number, so
/linux-4.1.27/drivers/infiniband/hw/ehca/
Dehca_mrmw.c361 e_mr->umem = ib_umem_get(pd->uobject->context, start, length, in ehca_reg_user_mr()
363 if (IS_ERR(e_mr->umem)) { in ehca_reg_user_mr()
364 ib_mr = (void *)e_mr->umem; in ehca_reg_user_mr()
368 if (e_mr->umem->page_size != PAGE_SIZE) { in ehca_reg_user_mr()
370 "e_mr->umem->page_size=%x", e_mr->umem->page_size); in ehca_reg_user_mr()
379 if (e_mr->umem->hugetlb) { in ehca_reg_user_mr()
401 pginfo.u.usr.region = e_mr->umem; in ehca_reg_user_mr()
402 pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size; in ehca_reg_user_mr()
428 ib_umem_release(e_mr->umem); in ehca_reg_user_mr()
674 if (e_mr->umem) in ehca_dereg_mr()
[all …]
Dehca_classes.h271 struct ib_umem *umem; member
/linux-4.1.27/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c842 struct ib_umem *umem = mr->umem; in build_user_pbes() local
851 shift = ilog2(umem->page_size); in build_user_pbes()
853 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in build_user_pbes()
860 (umem->page_size * pg_cnt)); in build_user_pbes()
865 umem->page_size * pg_cnt))); in build_user_pbes()
905 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); in ocrdma_reg_user_mr()
906 if (IS_ERR(mr->umem)) { in ocrdma_reg_user_mr()
910 num_pbes = ib_umem_page_count(mr->umem); in ocrdma_reg_user_mr()
915 mr->hwmr.pbe_size = mr->umem->page_size; in ocrdma_reg_user_mr()
916 mr->hwmr.fbo = ib_umem_offset(mr->umem); in ocrdma_reg_user_mr()
[all …]
Docrdma.h179 struct ib_umem *umem; member
/linux-4.1.27/Documentation/
Ddevices.txt2005 0 = /dev/umem/d0 Whole of first board
2006 1 = /dev/umem/d0p1 First partition of first board
2007 2 = /dev/umem/d0p2 Second partition of first board
2008 15 = /dev/umem/d0p15 15th partition of first board
2010 16 = /dev/umem/d1 Whole of second board
2011 17 = /dev/umem/d1p1 First partition of second board
2013 255= /dev/umem/d15p15 15th partition of 16th board.