/linux-4.1.27/drivers/infiniband/core/ |
D | umem.c | 47 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument 53 if (umem->nmap > 0) in __ib_umem_release() 54 ib_dma_unmap_sg(dev, umem->sg_head.sgl, in __ib_umem_release() 55 umem->nmap, in __ib_umem_release() 58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { in __ib_umem_release() 61 if (umem->writable && dirty) in __ib_umem_release() 66 sg_free_table(&umem->sg_head); in __ib_umem_release() 86 struct ib_umem *umem; in ib_umem_get() local 116 umem = kzalloc(sizeof *umem, GFP_KERNEL); in ib_umem_get() 117 if (!umem) in ib_umem_get() [all …]
|
D | umem_odp.c | 242 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem) in ib_umem_odp_get() argument 261 umem->hugetlb = 0; in ib_umem_odp_get() 262 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL); in ib_umem_odp_get() 263 if (!umem->odp_data) { in ib_umem_odp_get() 267 umem->odp_data->umem = umem; in ib_umem_odp_get() 269 mutex_init(&umem->odp_data->umem_mutex); in ib_umem_odp_get() 271 init_completion(&umem->odp_data->notifier_completion); in ib_umem_odp_get() 273 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) * in ib_umem_odp_get() 274 sizeof(*umem->odp_data->page_list)); in ib_umem_odp_get() 275 if (!umem->odp_data->page_list) { in ib_umem_odp_get() [all …]
|
D | umem_rbtree.c | 53 return ib_umem_start(umem_odp->umem); in node_start() 66 return ib_umem_end(umem_odp->umem) - 1; in node_last() 82 struct ib_umem_odp *umem; in rbt_ib_umem_for_each_in_range() local 89 umem = container_of(node, struct ib_umem_odp, interval_tree); in rbt_ib_umem_for_each_in_range() 90 ret_val = cb(umem->umem, start, last, cookie) || ret_val; in rbt_ib_umem_for_each_in_range()
|
D | Makefile | 13 ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
|
/linux-4.1.27/include/rdma/ |
D | ib_umem.h | 61 static inline int ib_umem_offset(struct ib_umem *umem) in ib_umem_offset() argument 63 return umem->address & ((unsigned long)umem->page_size - 1); in ib_umem_offset() 67 static inline unsigned long ib_umem_start(struct ib_umem *umem) in ib_umem_start() argument 69 return umem->address - ib_umem_offset(umem); in ib_umem_start() 73 static inline unsigned long ib_umem_end(struct ib_umem *umem) in ib_umem_end() argument 75 return PAGE_ALIGN(umem->address + umem->length); in ib_umem_end() 78 static inline size_t ib_umem_num_pages(struct ib_umem *umem) in ib_umem_num_pages() argument 80 return (ib_umem_end(umem) - ib_umem_start(umem)) >> PAGE_SHIFT; in ib_umem_num_pages() 87 void ib_umem_release(struct ib_umem *umem); 88 int ib_umem_page_count(struct ib_umem *umem); [all …]
|
D | ib_umem_odp.h | 75 struct ib_umem *umem; member 86 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem); 88 void ib_umem_odp_release(struct ib_umem *umem); 103 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt, 106 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset, 151 struct ib_umem *umem) in ib_umem_odp_get() argument 156 static inline void ib_umem_odp_release(struct ib_umem *umem) {} in ib_umem_odp_release() argument
|
D | ib_verbs.h | 1166 void (*invalidate_range)(struct ib_umem *umem,
|
/linux-4.1.27/drivers/infiniband/hw/mlx5/ |
D | mem.c | 45 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, in mlx5_ib_cont_pages() argument 59 unsigned long page_shift = ilog2(umem->page_size); in mlx5_ib_cont_pages() 62 if (umem->odp_data) { in mlx5_ib_cont_pages() 63 *count = ib_umem_page_count(umem); in mlx5_ib_cont_pages() 78 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in mlx5_ib_cont_pages() 150 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, in __mlx5_ib_populate_pas() argument 154 unsigned long umem_page_shift = ilog2(umem->page_size); in __mlx5_ib_populate_pas() 164 const bool odp = umem->odp_data != NULL; in __mlx5_ib_populate_pas() 171 dma_addr_t pa = umem->odp_data->dma_list[offset + i]; in __mlx5_ib_populate_pas() 180 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in __mlx5_ib_populate_pas() [all …]
|
D | doorbell.c | 41 struct ib_umem *umem; member 66 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx5_ib_db_map_user() 68 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user() 69 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user() 77 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx5_ib_db_map_user() 93 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
|
D | odp.c | 46 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, in mlx5_ib_invalidate_range() argument 55 if (!umem || !umem->odp_data) { in mlx5_ib_invalidate_range() 60 mr = umem->odp_data->private; in mlx5_ib_invalidate_range() 65 start = max_t(u64, ib_umem_start(umem), start); in mlx5_ib_invalidate_range() 66 end = min_t(u64, ib_umem_end(umem), end); in mlx5_ib_invalidate_range() 75 for (addr = start; addr < end; addr += (u64)umem->page_size) { in mlx5_ib_invalidate_range() 76 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; in mlx5_ib_invalidate_range() 83 if (umem->odp_data->dma_list[idx] & in mlx5_ib_invalidate_range() 109 ib_umem_odp_unmap_dma_pages(umem, start, end); in mlx5_ib_invalidate_range() 211 if (!mr->umem->odp_data) { in pagefault_single_data_segment() [all …]
|
D | mr.c | 659 mr->umem = NULL; in mlx5_ib_get_dma_mr() 751 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, in reg_umr() argument 795 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT); in reg_umr() 854 struct ib_umem *umem = mr->umem; in mlx5_ib_update_mtt() local 910 ib_umem_num_pages(umem) - start_page_index); in mlx5_ib_update_mtt() 913 __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT, in mlx5_ib_update_mtt() 969 u64 length, struct ib_umem *umem, in reg_create() argument 991 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, in reg_create() 1014 mr->umem = umem; in reg_create() 1038 struct ib_umem *umem; in mlx5_ib_reg_user_mr() local [all …]
|
D | mlx5_ib.h | 190 struct ib_umem *umem; member 239 struct ib_umem *umem; member 264 struct ib_umem *umem; member 297 struct ib_umem *umem; member 319 struct ib_umem *umem; member 601 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, 603 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 606 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 612 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift); 630 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
|
D | srq.c | 105 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, in create_srq_user() 107 if (IS_ERR(srq->umem)) { in create_srq_user() 109 err = PTR_ERR(srq->umem); in create_srq_user() 113 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, in create_srq_user() 129 mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0); in create_srq_user() 147 ib_umem_release(srq->umem); in create_srq_user() 223 ib_umem_release(srq->umem); in destroy_srq_user() 403 ib_umem_release(msrq->umem); in mlx5_ib_destroy_srq()
|
D | cq.c | 633 cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, in create_cq_user() 636 if (IS_ERR(cq->buf.umem)) { in create_cq_user() 637 err = PTR_ERR(cq->buf.umem); in create_cq_user() 646 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, in create_cq_user() 657 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); in create_cq_user() 668 ib_umem_release(cq->buf.umem); in create_cq_user() 675 ib_umem_release(cq->buf.umem); in destroy_cq_user() 950 struct ib_umem *umem; in resize_user() local 953 struct ib_ucontext *context = cq->buf.umem->context; in resize_user() 962 umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, in resize_user() [all …]
|
D | qp.c | 129 struct ib_umem *umem = qp->umem; in mlx5_ib_read_user_wqe() local 146 if (offset > umem->length || in mlx5_ib_read_user_wqe() 147 (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length)) in mlx5_ib_read_user_wqe() 151 ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length); in mlx5_ib_read_user_wqe() 167 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset, in mlx5_ib_read_user_wqe() 654 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in create_user_qp() 656 if (IS_ERR(qp->umem)) { in create_user_qp() 658 err = PTR_ERR(qp->umem); in create_user_qp() 662 qp->umem = NULL; in create_user_qp() 665 if (qp->umem) { in create_user_qp() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
D | mr.c | 76 mr->umem = NULL; in mlx4_ib_get_dma_mr() 90 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument 105 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in mlx4_ib_umem_write_mtt() 109 umem->page_size * k; in mlx4_ib_umem_write_mtt() 149 mr->umem = ib_umem_get(pd->uobject->context, start, length, in mlx4_ib_reg_user_mr() 151 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr() 152 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr() 156 n = ib_umem_page_count(mr->umem); in mlx4_ib_reg_user_mr() 157 shift = ilog2(mr->umem->page_size); in mlx4_ib_reg_user_mr() 164 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); in mlx4_ib_reg_user_mr() [all …]
|
D | doorbell.c | 39 struct ib_umem *umem; member 64 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx4_ib_db_map_user() 66 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user() 67 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user() 75 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx4_ib_db_map_user() 91 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
|
D | srq.c | 116 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in mlx4_ib_create_srq() 118 if (IS_ERR(srq->umem)) { in mlx4_ib_create_srq() 119 err = PTR_ERR(srq->umem); in mlx4_ib_create_srq() 123 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem), in mlx4_ib_create_srq() 124 ilog2(srq->umem->page_size), &srq->mtt); in mlx4_ib_create_srq() 128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); in mlx4_ib_create_srq() 214 ib_umem_release(srq->umem); in mlx4_ib_create_srq() 282 ib_umem_release(msrq->umem); in mlx4_ib_destroy_srq()
|
D | cq.c | 138 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, in mlx4_ib_get_cq_umem() argument 144 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem() 146 if (IS_ERR(*umem)) in mlx4_ib_get_cq_umem() 147 return PTR_ERR(*umem); in mlx4_ib_get_cq_umem() 149 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem), in mlx4_ib_get_cq_umem() 150 ilog2((*umem)->page_size), &buf->mtt); in mlx4_ib_get_cq_umem() 154 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); in mlx4_ib_get_cq_umem() 164 ib_umem_release(*umem); in mlx4_ib_get_cq_umem() 202 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem, in mlx4_ib_create_cq() 260 ib_umem_release(cq->umem); in mlx4_ib_create_cq() [all …]
|
D | mlx4_ib.h | 111 struct ib_umem *umem; member 121 struct ib_umem *umem; member 287 struct ib_umem *umem; member 321 struct ib_umem *umem; member 654 struct ib_umem *umem);
|
D | qp.c | 724 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in create_qp_common() 726 if (IS_ERR(qp->umem)) { in create_qp_common() 727 err = PTR_ERR(qp->umem); in create_qp_common() 731 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), in create_qp_common() 732 ilog2(qp->umem->page_size), &qp->mtt); in create_qp_common() 736 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_qp_common() 886 ib_umem_release(qp->umem); in create_qp_common() 1051 ib_umem_release(qp->umem); in destroy_qp_common()
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
D | ipath_mr.c | 153 mr->umem = NULL; in ipath_reg_phys_mr() 190 struct ib_umem *umem; in ipath_reg_user_mr() local 200 umem = ib_umem_get(pd->uobject->context, start, length, in ipath_reg_user_mr() 202 if (IS_ERR(umem)) in ipath_reg_user_mr() 203 return (void *) umem; in ipath_reg_user_mr() 205 n = umem->nmap; in ipath_reg_user_mr() 209 ib_umem_release(umem); in ipath_reg_user_mr() 217 mr->mr.offset = ib_umem_offset(umem); in ipath_reg_user_mr() 220 mr->umem = umem; in ipath_reg_user_mr() 224 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in ipath_reg_user_mr() [all …]
|
D | ipath_verbs.h | 270 struct ib_umem *umem; member
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
D | qib_mr.c | 234 struct ib_umem *umem; in qib_reg_user_mr() local 244 umem = ib_umem_get(pd->uobject->context, start, length, in qib_reg_user_mr() 246 if (IS_ERR(umem)) in qib_reg_user_mr() 247 return (void *) umem; in qib_reg_user_mr() 249 n = umem->nmap; in qib_reg_user_mr() 254 ib_umem_release(umem); in qib_reg_user_mr() 261 mr->mr.offset = ib_umem_offset(umem); in qib_reg_user_mr() 263 mr->umem = umem; in qib_reg_user_mr() 265 if (is_power_of_2(umem->page_size)) in qib_reg_user_mr() 266 mr->mr.page_shift = ilog2(umem->page_size); in qib_reg_user_mr() [all …]
|
D | qib_verbs.h | 331 struct ib_umem *umem; member
|
/linux-4.1.27/drivers/infiniband/hw/amso1100/ |
D | c2_provider.c | 395 mr->umem = NULL; in c2_reg_phys_mr() 447 c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in c2_reg_user_mr() 448 if (IS_ERR(c2mr->umem)) { in c2_reg_user_mr() 449 err = PTR_ERR(c2mr->umem); in c2_reg_user_mr() 454 shift = ffs(c2mr->umem->page_size) - 1; in c2_reg_user_mr() 455 n = c2mr->umem->nmap; in c2_reg_user_mr() 464 for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) { in c2_reg_user_mr() 469 (c2mr->umem->page_size * k); in c2_reg_user_mr() 476 c2mr->umem->page_size, in c2_reg_user_mr() 479 ib_umem_offset(c2mr->umem), in c2_reg_user_mr() [all …]
|
D | c2_provider.h | 76 struct ib_umem *umem; member
|
/linux-4.1.27/drivers/infiniband/hw/usnic/ |
D | usnic_uiom.c | 58 struct usnic_uiom_reg *umem = container_of(work, in usnic_uiom_reg_account() local 61 down_write(&umem->mm->mmap_sem); in usnic_uiom_reg_account() 62 umem->mm->locked_vm -= umem->diff; in usnic_uiom_reg_account() 63 up_write(&umem->mm->mmap_sem); in usnic_uiom_reg_account() 64 mmput(umem->mm); in usnic_uiom_reg_account() 65 kfree(umem); in usnic_uiom_reg_account()
|
D | usnic_ib_verbs.c | 608 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length, in usnic_ib_reg_mr() 610 if (IS_ERR_OR_NULL(mr->umem)) { in usnic_ib_reg_mr() 611 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT; in usnic_ib_reg_mr() 627 usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length); in usnic_ib_dereg_mr() 629 usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing); in usnic_ib_dereg_mr()
|
D | usnic_ib.h | 51 struct usnic_uiom_reg *umem; member
|
/linux-4.1.27/drivers/infiniband/hw/cxgb4/ |
D | mem.c | 732 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in c4iw_reg_user_mr() 733 if (IS_ERR(mhp->umem)) { in c4iw_reg_user_mr() 734 err = PTR_ERR(mhp->umem); in c4iw_reg_user_mr() 739 shift = ffs(mhp->umem->page_size) - 1; in c4iw_reg_user_mr() 741 n = mhp->umem->nmap; in c4iw_reg_user_mr() 754 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { in c4iw_reg_user_mr() 758 mhp->umem->page_size * k); in c4iw_reg_user_mr() 798 ib_umem_release(mhp->umem); in c4iw_reg_user_mr() 974 if (mhp->umem) in c4iw_dereg_mr() 975 ib_umem_release(mhp->umem); in c4iw_dereg_mr()
|
D | iw_cxgb4.h | 387 struct ib_umem *umem; member
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
D | mthca_provider.c | 888 mr->umem = NULL; in mthca_get_dma_mr() 971 mr->umem = NULL; in mthca_reg_phys_mr() 1004 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, in mthca_reg_user_mr() 1007 if (IS_ERR(mr->umem)) { in mthca_reg_user_mr() 1008 err = PTR_ERR(mr->umem); in mthca_reg_user_mr() 1012 shift = ffs(mr->umem->page_size) - 1; in mthca_reg_user_mr() 1013 n = mr->umem->nmap; in mthca_reg_user_mr() 1031 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) { in mthca_reg_user_mr() 1035 mr->umem->page_size * k; in mthca_reg_user_mr() 1069 ib_umem_release(mr->umem); in mthca_reg_user_mr() [all …]
|
D | mthca_provider.h | 75 struct ib_umem *umem; member
|
/linux-4.1.27/drivers/infiniband/hw/cxgb3/ |
D | iwch_provider.c | 465 if (mhp->umem) in iwch_dereg_mr() 466 ib_umem_release(mhp->umem); in iwch_dereg_mr() 638 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in iwch_reg_user_mr() 639 if (IS_ERR(mhp->umem)) { in iwch_reg_user_mr() 640 err = PTR_ERR(mhp->umem); in iwch_reg_user_mr() 645 shift = ffs(mhp->umem->page_size) - 1; in iwch_reg_user_mr() 647 n = mhp->umem->nmap; in iwch_reg_user_mr() 661 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { in iwch_reg_user_mr() 665 mhp->umem->page_size * k); in iwch_reg_user_mr() 714 ib_umem_release(mhp->umem); in iwch_reg_user_mr()
|
D | iwch_provider.h | 76 struct ib_umem *umem; member
|
/linux-4.1.27/drivers/block/ |
D | Makefile | 30 obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
|
D | Kconfig | 167 <http://www.umem.com/> 173 module will be called umem. 175 The umem driver has not yet been allocated a MAJOR number, so
|
/linux-4.1.27/drivers/infiniband/hw/ehca/ |
D | ehca_mrmw.c | 361 e_mr->umem = ib_umem_get(pd->uobject->context, start, length, in ehca_reg_user_mr() 363 if (IS_ERR(e_mr->umem)) { in ehca_reg_user_mr() 364 ib_mr = (void *)e_mr->umem; in ehca_reg_user_mr() 368 if (e_mr->umem->page_size != PAGE_SIZE) { in ehca_reg_user_mr() 370 "e_mr->umem->page_size=%x", e_mr->umem->page_size); in ehca_reg_user_mr() 379 if (e_mr->umem->hugetlb) { in ehca_reg_user_mr() 401 pginfo.u.usr.region = e_mr->umem; in ehca_reg_user_mr() 402 pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size; in ehca_reg_user_mr() 428 ib_umem_release(e_mr->umem); in ehca_reg_user_mr() 674 if (e_mr->umem) in ehca_dereg_mr() [all …]
|
D | ehca_classes.h | 271 struct ib_umem *umem; member
|
/linux-4.1.27/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 842 struct ib_umem *umem = mr->umem; in build_user_pbes() local 851 shift = ilog2(umem->page_size); in build_user_pbes() 853 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in build_user_pbes() 860 (umem->page_size * pg_cnt)); in build_user_pbes() 865 umem->page_size * pg_cnt))); in build_user_pbes() 905 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); in ocrdma_reg_user_mr() 906 if (IS_ERR(mr->umem)) { in ocrdma_reg_user_mr() 910 num_pbes = ib_umem_page_count(mr->umem); in ocrdma_reg_user_mr() 915 mr->hwmr.pbe_size = mr->umem->page_size; in ocrdma_reg_user_mr() 916 mr->hwmr.fbo = ib_umem_offset(mr->umem); in ocrdma_reg_user_mr() [all …]
|
D | ocrdma.h | 179 struct ib_umem *umem; member
|
/linux-4.1.27/Documentation/ |
D | devices.txt | 2005 0 = /dev/umem/d0 Whole of first board 2006 1 = /dev/umem/d0p1 First partition of first board 2007 2 = /dev/umem/d0p2 Second partition of first board 2008 15 = /dev/umem/d0p15 15th partition of first board 2010 16 = /dev/umem/d1 Whole of second board 2011 17 = /dev/umem/d1p1 First partition of second board 2013 255= /dev/umem/d15p15 15th partition of 16th board.
|