Searched refs:umem (Results 1 - 47 of 47) sorted by relevance

/linux-4.4.14/drivers/infiniband/core/
H A Dumem.c47 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) __ib_umem_release() argument
53 if (umem->nmap > 0) __ib_umem_release()
54 ib_dma_unmap_sg(dev, umem->sg_head.sgl, __ib_umem_release()
55 umem->nmap, __ib_umem_release()
58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { __ib_umem_release()
61 if (umem->writable && dirty) __ib_umem_release()
66 sg_free_table(&umem->sg_head); __ib_umem_release()
86 struct ib_umem *umem; ib_umem_get() local
116 umem = kzalloc(sizeof *umem, GFP_KERNEL); ib_umem_get()
117 if (!umem) ib_umem_get()
120 umem->context = context; ib_umem_get()
121 umem->length = size; ib_umem_get()
122 umem->address = addr; ib_umem_get()
123 umem->page_size = PAGE_SIZE; ib_umem_get()
124 umem->pid = get_task_pid(current, PIDTYPE_PID); ib_umem_get()
132 umem->writable = !!(access & ib_umem_get()
137 ret = ib_umem_odp_get(context, umem); ib_umem_get()
139 kfree(umem); ib_umem_get()
142 return umem; ib_umem_get()
145 umem->odp_data = NULL; ib_umem_get()
148 umem->hugetlb = 1; ib_umem_get()
152 kfree(umem); ib_umem_get()
162 umem->hugetlb = 0; ib_umem_get()
164 npages = ib_umem_num_pages(umem); ib_umem_get()
183 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); ib_umem_get()
188 sg_list_start = umem->sg_head.sgl; ib_umem_get()
194 1, !umem->writable, page_list, vma_list); ib_umem_get()
199 umem->npages += ret; ib_umem_get()
205 umem->hugetlb = 0; for_each_sg()
214 umem->nmap = ib_dma_map_sg_attrs(context->device,
215 umem->sg_head.sgl,
216 umem->npages,
220 if (umem->nmap <= 0) {
230 __ib_umem_release(context->device, umem, 0);
231 put_pid(umem->pid);
232 kfree(umem);
241 return ret < 0 ? ERR_PTR(ret) : umem;
247 struct ib_umem *umem = container_of(work, struct ib_umem, work); ib_umem_account() local
249 down_write(&umem->mm->mmap_sem); ib_umem_account()
250 umem->mm->pinned_vm -= umem->diff; ib_umem_account()
251 up_write(&umem->mm->mmap_sem); ib_umem_account()
252 mmput(umem->mm); ib_umem_account()
253 kfree(umem); ib_umem_account()
258 * @umem: umem struct to release
260 void ib_umem_release(struct ib_umem *umem) ib_umem_release() argument
262 struct ib_ucontext *context = umem->context; ib_umem_release()
267 if (umem->odp_data) { ib_umem_release()
268 ib_umem_odp_release(umem); ib_umem_release()
272 __ib_umem_release(umem->context->device, umem, 1); ib_umem_release()
274 task = get_pid_task(umem->pid, PIDTYPE_PID); ib_umem_release()
275 put_pid(umem->pid); ib_umem_release()
283 diff = ib_umem_num_pages(umem); ib_umem_release()
295 INIT_WORK(&umem->work, ib_umem_account); ib_umem_release()
296 umem->mm = mm; ib_umem_release()
297 umem->diff = diff; ib_umem_release()
299 queue_work(ib_wq, &umem->work); ib_umem_release()
309 kfree(umem); ib_umem_release()
313 int ib_umem_page_count(struct ib_umem *umem) ib_umem_page_count() argument
320 if (umem->odp_data) ib_umem_page_count()
321 return ib_umem_num_pages(umem); ib_umem_page_count()
323 shift = ilog2(umem->page_size); ib_umem_page_count()
326 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) ib_umem_page_count()
336 * umem - the umem to copy from
343 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, ib_umem_copy_from() argument
349 if (offset > umem->length || length > umem->length - offset) { ib_umem_copy_from()
350 pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n", ib_umem_copy_from()
351 offset, umem->length, end); ib_umem_copy_from()
355 ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length, ib_umem_copy_from()
356 offset + ib_umem_offset(umem)); ib_umem_copy_from()
H A Dumem_odp.c48 /* Only update private counters for this umem if it has them. ib_umem_notifier_start_account()
49 * Otherwise skip it. All page faults will be delayed for this umem. */ ib_umem_notifier_start_account()
66 /* Only update private counters for this umem if it has them. ib_umem_notifier_end_account()
67 * Otherwise skip it. All page faults will be delayed for this umem. */ ib_umem_notifier_end_account()
131 /* Make sure that the fact the umem is dying is out before we release ib_umem_notifier_release_trampoline()
242 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem) ib_umem_odp_get() argument
261 umem->hugetlb = 0; ib_umem_odp_get()
262 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL); ib_umem_odp_get()
263 if (!umem->odp_data) { ib_umem_odp_get()
267 umem->odp_data->umem = umem; ib_umem_odp_get()
269 mutex_init(&umem->odp_data->umem_mutex); ib_umem_odp_get()
271 init_completion(&umem->odp_data->notifier_completion); ib_umem_odp_get()
273 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) * ib_umem_odp_get()
274 sizeof(*umem->odp_data->page_list)); ib_umem_odp_get()
275 if (!umem->odp_data->page_list) { ib_umem_odp_get()
280 umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) * ib_umem_odp_get()
281 sizeof(*umem->odp_data->dma_list)); ib_umem_odp_get()
282 if (!umem->odp_data->dma_list) { ib_umem_odp_get()
294 if (likely(ib_umem_start(umem) != ib_umem_end(umem))) ib_umem_odp_get()
295 rbt_ib_umem_insert(&umem->odp_data->interval_tree, ib_umem_odp_get()
299 umem->odp_data->mn_counters_active = true; ib_umem_odp_get()
301 list_add(&umem->odp_data->no_private_counters, ib_umem_odp_get()
340 vfree(umem->odp_data->dma_list); ib_umem_odp_get()
342 vfree(umem->odp_data->page_list); ib_umem_odp_get()
344 kfree(umem->odp_data); ib_umem_odp_get()
350 void ib_umem_odp_release(struct ib_umem *umem) ib_umem_odp_release() argument
352 struct ib_ucontext *context = umem->context; ib_umem_odp_release()
355 * Ensure that no more pages are mapped in the umem. ib_umem_odp_release()
360 ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem), ib_umem_odp_release()
361 ib_umem_end(umem)); ib_umem_odp_release()
364 if (likely(ib_umem_start(umem) != ib_umem_end(umem))) ib_umem_odp_release()
365 rbt_ib_umem_remove(&umem->odp_data->interval_tree, ib_umem_odp_release()
368 if (!umem->odp_data->mn_counters_active) { ib_umem_odp_release()
369 list_del(&umem->odp_data->no_private_counters); ib_umem_odp_release()
370 complete_all(&umem->odp_data->notifier_completion); ib_umem_odp_release()
411 vfree(umem->odp_data->dma_list); ib_umem_odp_release()
412 vfree(umem->odp_data->page_list); ib_umem_odp_release()
413 kfree(umem->odp_data); ib_umem_odp_release()
414 kfree(umem); ib_umem_odp_release()
420 * @umem: the umem to insert the page to.
421 * @page_index: index in the umem to add the page to.
426 * umem->odp_data->notifiers_seq.
433 * umem.
436 struct ib_umem *umem, ib_umem_odp_map_dma_single_page()
443 struct ib_device *dev = umem->context->device; ib_umem_odp_map_dma_single_page()
454 if (ib_umem_mmu_notifier_retry(umem, current_seq)) { ib_umem_odp_map_dma_single_page()
458 if (!(umem->odp_data->dma_list[page_index])) { ib_umem_odp_map_dma_single_page()
467 umem->odp_data->dma_list[page_index] = dma_addr | access_mask; ib_umem_odp_map_dma_single_page()
468 umem->odp_data->page_list[page_index] = page; ib_umem_odp_map_dma_single_page()
470 } else if (umem->odp_data->page_list[page_index] == page) { ib_umem_odp_map_dma_single_page()
471 umem->odp_data->dma_list[page_index] |= access_mask; ib_umem_odp_map_dma_single_page()
474 umem->odp_data->page_list[page_index], page); ib_umem_odp_map_dma_single_page()
482 if (umem->context->invalidate_range || !stored_page) ib_umem_odp_map_dma_single_page()
485 if (remove_existing_mapping && umem->context->invalidate_range) { ib_umem_odp_map_dma_single_page()
487 umem, ib_umem_odp_map_dma_single_page()
502 * umem->odp_data->dma_list.
509 * @umem: the umem to map and pin
519 * umem->odp_data->notifiers_seq before calling this function
521 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, ib_umem_odp_map_dma_pages() argument
534 if (user_virt < ib_umem_start(umem) || ib_umem_odp_map_dma_pages()
535 user_virt + bcnt > ib_umem_end(umem)) ib_umem_odp_map_dma_pages()
547 owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID); ib_umem_odp_map_dma_pages()
559 start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT; ib_umem_odp_map_dma_pages()
586 mutex_lock(&umem->odp_data->umem_mutex); ib_umem_odp_map_dma_pages()
589 umem, k, base_virt_addr, local_page_list[j], ib_umem_odp_map_dma_pages()
595 mutex_unlock(&umem->odp_data->umem_mutex); ib_umem_odp_map_dma_pages()
621 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, ib_umem_odp_unmap_dma_pages() argument
626 struct ib_device *dev = umem->context->device; ib_umem_odp_unmap_dma_pages()
628 virt = max_t(u64, virt, ib_umem_start(umem)); ib_umem_odp_unmap_dma_pages()
629 bound = min_t(u64, bound, ib_umem_end(umem)); ib_umem_odp_unmap_dma_pages()
635 mutex_lock(&umem->odp_data->umem_mutex); ib_umem_odp_unmap_dma_pages()
636 for (addr = virt; addr < bound; addr += (u64)umem->page_size) { ib_umem_odp_unmap_dma_pages()
637 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; ib_umem_odp_unmap_dma_pages()
638 if (umem->odp_data->page_list[idx]) { ib_umem_odp_unmap_dma_pages()
639 struct page *page = umem->odp_data->page_list[idx]; ib_umem_odp_unmap_dma_pages()
640 dma_addr_t dma = umem->odp_data->dma_list[idx]; ib_umem_odp_unmap_dma_pages()
661 if (!umem->context->invalidate_range) ib_umem_odp_unmap_dma_pages()
663 umem->odp_data->page_list[idx] = NULL; ib_umem_odp_unmap_dma_pages()
664 umem->odp_data->dma_list[idx] = 0; ib_umem_odp_unmap_dma_pages()
667 mutex_unlock(&umem->odp_data->umem_mutex); ib_umem_odp_unmap_dma_pages()
435 ib_umem_odp_map_dma_single_page( struct ib_umem *umem, int page_index, u64 base_virt_addr, struct page *page, u64 access_mask, unsigned long current_seq) ib_umem_odp_map_dma_single_page() argument
H A Dumem_rbtree.c53 return ib_umem_start(umem_odp->umem); node_start()
59 * in the umem.
66 return ib_umem_end(umem_odp->umem) - 1; node_last()
82 struct ib_umem_odp *umem; rbt_ib_umem_for_each_in_range() local
89 umem = container_of(node, struct ib_umem_odp, interval_tree); rbt_ib_umem_for_each_in_range()
90 ret_val = cb(umem->umem, start, last, cookie) || ret_val; rbt_ib_umem_for_each_in_range()
H A DMakefile14 ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
H A Ddevice.c986 /* Make sure that any pending umem accounting work is done. */ ib_core_cleanup()
/linux-4.4.14/include/rdma/
H A Dib_umem.h60 /* Returns the offset of the umem start relative to the first page. */ ib_umem_offset()
61 static inline int ib_umem_offset(struct ib_umem *umem) ib_umem_offset() argument
63 return umem->address & ((unsigned long)umem->page_size - 1); ib_umem_offset()
66 /* Returns the first page of an ODP umem. */ ib_umem_start()
67 static inline unsigned long ib_umem_start(struct ib_umem *umem) ib_umem_start() argument
69 return umem->address - ib_umem_offset(umem); ib_umem_start()
72 /* Returns the address of the page after the last one of an ODP umem. */ ib_umem_end()
73 static inline unsigned long ib_umem_end(struct ib_umem *umem) ib_umem_end() argument
75 return PAGE_ALIGN(umem->address + umem->length); ib_umem_end()
78 static inline size_t ib_umem_num_pages(struct ib_umem *umem) ib_umem_num_pages() argument
80 return (ib_umem_end(umem) - ib_umem_start(umem)) >> PAGE_SHIFT; ib_umem_num_pages()
87 void ib_umem_release(struct ib_umem *umem);
88 int ib_umem_page_count(struct ib_umem *umem);
89 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
101 static inline void ib_umem_release(struct ib_umem *umem) { } ib_umem_page_count() argument
102 static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; } ib_umem_copy_from() argument
103 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, ib_umem_copy_from() argument
H A Dib_umem_odp.h47 * An array of the pages included in the on-demand paging umem.
61 * umem, allowing only a single thread to map/unmap pages. The mutex
75 struct ib_umem *umem; member in struct:ib_umem_odp
86 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem);
88 void ib_umem_odp_release(struct ib_umem *umem);
103 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt,
106 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset,
151 struct ib_umem *umem) ib_umem_odp_get()
156 static inline void ib_umem_odp_release(struct ib_umem *umem) {} argument
150 ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem) ib_umem_odp_get() argument
H A Dib_verbs.h1255 void (*invalidate_range)(struct ib_umem *umem,
/linux-4.4.14/drivers/infiniband/hw/mlx5/
H A Dmem.c38 /* @umem: umem object to scan
40 * @count: number of PAGE_SIZE pages covered by umem
45 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, mlx5_ib_cont_pages() argument
59 unsigned long page_shift = ilog2(umem->page_size); mlx5_ib_cont_pages()
62 if (umem->odp_data) { mlx5_ib_cont_pages()
63 *count = ib_umem_page_count(umem); mlx5_ib_cont_pages()
78 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { mlx5_ib_cont_pages()
138 * Populate the given array with bus addresses from the umem.
141 * umem - umem to use to fill the pages
143 * offset - offset into the umem to start from,
150 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, __mlx5_ib_populate_pas() argument
154 unsigned long umem_page_shift = ilog2(umem->page_size); __mlx5_ib_populate_pas()
164 const bool odp = umem->odp_data != NULL; __mlx5_ib_populate_pas()
171 dma_addr_t pa = umem->odp_data->dma_list[offset + i]; __mlx5_ib_populate_pas()
180 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { __mlx5_ib_populate_pas()
199 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, mlx5_ib_populate_pas() argument
202 return __mlx5_ib_populate_pas(dev, umem, page_shift, 0, mlx5_ib_populate_pas()
203 ib_umem_num_pages(umem), pas, mlx5_ib_populate_pas()
H A Ddoorbell.c41 struct ib_umem *umem; member in struct:mlx5_ib_user_db_page
66 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, mlx5_ib_db_map_user()
68 if (IS_ERR(page->umem)) { mlx5_ib_db_map_user()
69 err = PTR_ERR(page->umem); mlx5_ib_db_map_user()
77 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); mlx5_ib_db_map_user()
93 ib_umem_release(db->u.user_page->umem); mlx5_ib_db_unmap_user()
H A Dmr.c668 mr->umem = NULL; mlx5_ib_get_dma_mr()
759 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, reg_umr() argument
804 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT); reg_umr()
863 struct ib_umem *umem = mr->umem; mlx5_ib_update_mtt() local
919 ib_umem_num_pages(umem) - start_page_index); mlx5_ib_update_mtt()
922 __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT, mlx5_ib_update_mtt()
926 * umem. */ mlx5_ib_update_mtt()
978 u64 length, struct ib_umem *umem, reg_create()
999 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, reg_create()
1022 mr->umem = umem; reg_create()
1046 struct ib_umem *umem; mlx5_ib_reg_user_mr() local
1055 umem = ib_umem_get(pd->uobject->context, start, length, access_flags, mlx5_ib_reg_user_mr()
1057 if (IS_ERR(umem)) { mlx5_ib_reg_user_mr()
1058 mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem)); mlx5_ib_reg_user_mr()
1059 return (void *)umem; mlx5_ib_reg_user_mr()
1062 mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order); mlx5_ib_reg_user_mr()
1073 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift, mlx5_ib_reg_user_mr()
1086 mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift, mlx5_ib_reg_user_mr()
1096 mr->umem = umem; mlx5_ib_reg_user_mr()
1103 if (umem->odp_data) { mlx5_ib_reg_user_mr()
1106 * setting of umem->odp_data->private to point to our mlx5_ib_reg_user_mr()
1112 mr->umem->odp_data->private = mr; mlx5_ib_reg_user_mr()
1115 * umem->odp_data->private value in the invalidation mlx5_ib_reg_user_mr()
1120 * before umem->odp_data->private == mr is visible to mlx5_ib_reg_user_mr()
1130 ib_umem_release(umem); mlx5_ib_reg_user_mr()
1262 struct ib_umem *umem = mr->umem; mlx5_ib_dereg_mr() local
1265 if (umem && umem->odp_data) { mlx5_ib_dereg_mr()
1271 mlx5_ib_invalidate_range(umem, ib_umem_start(umem), mlx5_ib_dereg_mr()
1272 ib_umem_end(umem)); mlx5_ib_dereg_mr()
1274 * We kill the umem before the MR for ODP, mlx5_ib_dereg_mr()
1278 ib_umem_release(umem); mlx5_ib_dereg_mr()
1281 /* Avoid double-freeing the umem. */ mlx5_ib_dereg_mr()
1282 umem = NULL; mlx5_ib_dereg_mr()
1288 if (umem) { mlx5_ib_dereg_mr()
1289 ib_umem_release(umem); mlx5_ib_dereg_mr()
1372 mr->umem = NULL; mlx5_ib_alloc_mr()
977 reg_create(struct ib_pd *pd, u64 virt_addr, u64 length, struct ib_umem *umem, int npages, int page_shift, int access_flags) reg_create() argument
H A Dodp.c46 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, mlx5_ib_invalidate_range() argument
55 if (!umem || !umem->odp_data) { mlx5_ib_invalidate_range()
56 pr_err("invalidation called on NULL umem or non-ODP umem\n"); mlx5_ib_invalidate_range()
60 mr = umem->odp_data->private; mlx5_ib_invalidate_range()
65 start = max_t(u64, ib_umem_start(umem), start); mlx5_ib_invalidate_range()
66 end = min_t(u64, ib_umem_end(umem), end); mlx5_ib_invalidate_range()
75 for (addr = start; addr < end; addr += (u64)umem->page_size) { mlx5_ib_invalidate_range()
76 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; mlx5_ib_invalidate_range()
83 if (umem->odp_data->dma_list[idx] & mlx5_ib_invalidate_range()
109 ib_umem_odp_unmap_dma_pages(umem, start, end); mlx5_ib_invalidate_range()
204 if (!mr->umem->odp_data) { pagefault_single_data_segment()
218 current_seq = ACCESS_ONCE(mr->umem->odp_data->notifiers_seq); pagefault_single_data_segment()
235 if (mr->umem->writable) pagefault_single_data_segment()
237 npages = ib_umem_odp_map_dma_pages(mr->umem, io_virt, bcnt, pagefault_single_data_segment()
245 mutex_lock(&mr->umem->odp_data->umem_mutex); pagefault_single_data_segment()
246 if (!ib_umem_mmu_notifier_retry(mr->umem, current_seq)) { pagefault_single_data_segment()
256 mutex_unlock(&mr->umem->odp_data->umem_mutex); pagefault_single_data_segment()
272 if (!mr->umem->odp_data->dying) { pagefault_single_data_segment()
273 struct ib_umem_odp *odp_data = mr->umem->odp_data; pagefault_single_data_segment()
H A Dsrq.c105 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, create_srq_user()
107 if (IS_ERR(srq->umem)) { create_srq_user()
108 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size); create_srq_user()
109 err = PTR_ERR(srq->umem); create_srq_user()
113 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, create_srq_user()
129 mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0); create_srq_user()
147 ib_umem_release(srq->umem); create_srq_user()
223 ib_umem_release(srq->umem); destroy_srq_user()
402 ib_umem_release(msrq->umem); mlx5_ib_destroy_srq()
H A Dcq.c640 cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, create_cq_user()
643 if (IS_ERR(cq->buf.umem)) { create_cq_user()
644 err = PTR_ERR(cq->buf.umem); create_cq_user()
653 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, create_cq_user()
664 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); create_cq_user()
675 ib_umem_release(cq->buf.umem); create_cq_user()
682 ib_umem_release(cq->buf.umem); destroy_cq_user()
963 struct ib_umem *umem; resize_user() local
966 struct ib_ucontext *context = cq->buf.umem->context; resize_user()
975 umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, resize_user()
977 if (IS_ERR(umem)) { resize_user()
978 err = PTR_ERR(umem); resize_user()
982 mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, resize_user()
985 cq->resize_umem = umem; resize_user()
1152 ib_umem_release(cq->buf.umem); mlx5_ib_resize_cq()
1153 cq->buf.umem = cq->resize_umem; mlx5_ib_resize_cq()
H A Dmlx5_ib.h189 struct ib_umem *umem; member in struct:mlx5_ib_qp
237 struct ib_umem *umem; member in struct:mlx5_ib_cq_buf
268 struct ib_umem *umem; member in struct:mlx5_shared_mr_info
301 struct ib_umem *umem; member in struct:mlx5_ib_srq
328 struct ib_umem *umem; member in struct:mlx5_ib_mr
592 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
594 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
597 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
603 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
621 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
H A Dqp.c124 struct ib_umem *umem = qp->umem; mlx5_ib_read_user_wqe() local
141 if (offset > umem->length || mlx5_ib_read_user_wqe()
142 (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length)) mlx5_ib_read_user_wqe()
146 ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length); mlx5_ib_read_user_wqe()
162 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset, mlx5_ib_read_user_wqe()
646 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, create_user_qp()
648 if (IS_ERR(qp->umem)) { create_user_qp()
650 err = PTR_ERR(qp->umem); create_user_qp()
654 qp->umem = NULL; create_user_qp()
657 if (qp->umem) { create_user_qp()
658 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, create_user_qp()
675 if (qp->umem) create_user_qp()
676 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0); create_user_qp()
707 if (qp->umem) create_user_qp()
708 ib_umem_release(qp->umem); create_user_qp()
721 if (qp->umem) destroy_qp_user()
722 ib_umem_release(qp->umem); destroy_qp_user()
/linux-4.4.14/drivers/infiniband/hw/mlx4/
H A Dmr.c76 mr->umem = NULL; mlx4_ib_get_dma_mr()
90 struct ib_umem *umem) mlx4_ib_umem_write_mtt()
105 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { mlx4_ib_umem_write_mtt()
109 umem->page_size * k; mlx4_ib_umem_write_mtt()
149 mr->umem = ib_umem_get(pd->uobject->context, start, length, mlx4_ib_reg_user_mr()
151 if (IS_ERR(mr->umem)) { mlx4_ib_reg_user_mr()
152 err = PTR_ERR(mr->umem); mlx4_ib_reg_user_mr()
156 n = ib_umem_page_count(mr->umem); mlx4_ib_reg_user_mr()
157 shift = ilog2(mr->umem->page_size); mlx4_ib_reg_user_mr()
164 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); mlx4_ib_reg_user_mr()
180 ib_umem_release(mr->umem); mlx4_ib_reg_user_mr()
229 ib_umem_release(mmr->umem); mlx4_ib_rereg_user_mr()
230 mmr->umem = ib_umem_get(mr->uobject->context, start, length, mlx4_ib_rereg_user_mr()
234 if (IS_ERR(mmr->umem)) { mlx4_ib_rereg_user_mr()
235 err = PTR_ERR(mmr->umem); mlx4_ib_rereg_user_mr()
237 mmr->umem = NULL; mlx4_ib_rereg_user_mr()
240 n = ib_umem_page_count(mmr->umem); mlx4_ib_rereg_user_mr()
241 shift = ilog2(mmr->umem->page_size); mlx4_ib_rereg_user_mr()
247 ib_umem_release(mmr->umem); mlx4_ib_rereg_user_mr()
253 err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); mlx4_ib_rereg_user_mr()
256 ib_umem_release(mmr->umem); mlx4_ib_rereg_user_mr()
330 if (mr->umem) mlx4_ib_dereg_mr()
331 ib_umem_release(mr->umem); mlx4_ib_dereg_mr()
433 mr->umem = NULL; mlx4_ib_alloc_mr()
89 mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, struct ib_umem *umem) mlx4_ib_umem_write_mtt() argument
H A Ddoorbell.c39 struct ib_umem *umem; member in struct:mlx4_ib_user_db_page
64 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, mlx4_ib_db_map_user()
66 if (IS_ERR(page->umem)) { mlx4_ib_db_map_user()
67 err = PTR_ERR(page->umem); mlx4_ib_db_map_user()
75 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); mlx4_ib_db_map_user()
91 ib_umem_release(db->u.user_page->umem); mlx4_ib_db_unmap_user()
H A Dsrq.c117 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, mlx4_ib_create_srq()
119 if (IS_ERR(srq->umem)) { mlx4_ib_create_srq()
120 err = PTR_ERR(srq->umem); mlx4_ib_create_srq()
124 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem), mlx4_ib_create_srq()
125 ilog2(srq->umem->page_size), &srq->mtt); mlx4_ib_create_srq()
129 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); mlx4_ib_create_srq()
219 ib_umem_release(srq->umem); mlx4_ib_create_srq()
287 ib_umem_release(msrq->umem); mlx4_ib_destroy_srq()
H A Dcq.c138 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, mlx4_ib_get_cq_umem()
144 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size, mlx4_ib_get_cq_umem()
146 if (IS_ERR(*umem)) mlx4_ib_get_cq_umem()
147 return PTR_ERR(*umem); mlx4_ib_get_cq_umem()
149 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem), mlx4_ib_get_cq_umem()
150 ilog2((*umem)->page_size), &buf->mtt); mlx4_ib_get_cq_umem()
154 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); mlx4_ib_get_cq_umem()
164 ib_umem_release(*umem); mlx4_ib_get_cq_umem()
210 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem, mlx4_ib_create_cq()
269 ib_umem_release(cq->umem); mlx4_ib_create_cq()
323 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf, mlx4_alloc_resize_umem()
424 ib_umem_release(cq->umem); mlx4_ib_resize_cq()
425 cq->umem = cq->resize_umem; mlx4_ib_resize_cq()
483 ib_umem_release(mcq->umem); mlx4_ib_destroy_cq()
137 mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context, struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, u64 buf_addr, int cqe) mlx4_ib_get_cq_umem() argument
H A Dmlx4_ib.h124 struct ib_umem *umem; member in struct:mlx4_ib_cq
141 struct ib_umem *umem; member in struct:mlx4_ib_mr
302 struct ib_umem *umem; member in struct:mlx4_ib_qp
337 struct ib_umem *umem; member in struct:mlx4_ib_srq
701 struct ib_umem *umem);
H A Dqp.c737 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, create_qp_common()
739 if (IS_ERR(qp->umem)) { create_qp_common()
740 err = PTR_ERR(qp->umem); create_qp_common()
744 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), create_qp_common()
745 ilog2(qp->umem->page_size), &qp->mtt); create_qp_common()
749 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); create_qp_common()
905 ib_umem_release(qp->umem); create_qp_common()
1070 ib_umem_release(qp->umem); destroy_qp_common()
/linux-4.4.14/drivers/staging/rdma/ipath/
H A Dipath_mr.c153 mr->umem = NULL; ipath_reg_phys_mr()
190 struct ib_umem *umem; ipath_reg_user_mr() local
200 umem = ib_umem_get(pd->uobject->context, start, length, ipath_reg_user_mr()
202 if (IS_ERR(umem)) ipath_reg_user_mr()
203 return (void *) umem; ipath_reg_user_mr()
205 n = umem->nmap; ipath_reg_user_mr()
209 ib_umem_release(umem); ipath_reg_user_mr()
217 mr->mr.offset = ib_umem_offset(umem); ipath_reg_user_mr()
220 mr->umem = umem; ipath_reg_user_mr()
224 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { ipath_reg_user_mr()
233 mr->mr.map[m]->segs[n].length = umem->page_size; ipath_reg_user_mr()
267 if (mr->umem) ipath_dereg_mr()
268 ib_umem_release(mr->umem); ipath_dereg_mr()
H A Dipath_verbs.h270 struct ib_umem *umem; member in struct:ipath_mr
/linux-4.4.14/drivers/staging/rdma/hfi1/
H A Dmr.c251 struct ib_umem *umem; hfi1_reg_user_mr() local
261 umem = ib_umem_get(pd->uobject->context, start, length, hfi1_reg_user_mr()
263 if (IS_ERR(umem)) hfi1_reg_user_mr()
264 return (void *) umem; hfi1_reg_user_mr()
266 n = umem->nmap; hfi1_reg_user_mr()
271 ib_umem_release(umem); hfi1_reg_user_mr()
278 mr->mr.offset = ib_umem_offset(umem); hfi1_reg_user_mr()
280 mr->umem = umem; hfi1_reg_user_mr()
282 if (is_power_of_2(umem->page_size)) hfi1_reg_user_mr()
283 mr->mr.page_shift = ilog2(umem->page_size); hfi1_reg_user_mr()
286 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { hfi1_reg_user_mr()
295 mr->mr.map[m]->segs[n].length = umem->page_size; hfi1_reg_user_mr()
338 if (mr->umem) hfi1_dereg_mr()
339 ib_umem_release(mr->umem); hfi1_dereg_mr()
H A Dverbs.h341 struct ib_umem *umem; member in struct:hfi1_mr
/linux-4.4.14/drivers/infiniband/hw/qib/
H A Dqib_mr.c234 struct ib_umem *umem; qib_reg_user_mr() local
244 umem = ib_umem_get(pd->uobject->context, start, length, qib_reg_user_mr()
246 if (IS_ERR(umem)) qib_reg_user_mr()
247 return (void *) umem; qib_reg_user_mr()
249 n = umem->nmap; qib_reg_user_mr()
254 ib_umem_release(umem); qib_reg_user_mr()
261 mr->mr.offset = ib_umem_offset(umem); qib_reg_user_mr()
263 mr->umem = umem; qib_reg_user_mr()
265 if (is_power_of_2(umem->page_size)) qib_reg_user_mr()
266 mr->mr.page_shift = ilog2(umem->page_size); qib_reg_user_mr()
269 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { qib_reg_user_mr()
278 mr->mr.map[m]->segs[n].length = umem->page_size; qib_reg_user_mr()
318 if (mr->umem) qib_dereg_mr()
319 ib_umem_release(mr->umem); qib_dereg_mr()
H A Dqib_verbs.h331 struct ib_umem *umem; member in struct:qib_mr
/linux-4.4.14/drivers/block/
H A DMakefile28 obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
H A Dumem.c21 * 08nov2001:NeilBrown - change driver name from "mm" to "umem"
27 * - unregister umem from devfs at mod unload
55 #include "umem.h"
70 #define DRIVER_NAME "umem"
1093 sprintf(disk->disk_name, "umem%c", 'a'+i); mm_init()
/linux-4.4.14/drivers/staging/rdma/amso1100/
H A Dc2_provider.c403 mr->umem = NULL; c2_reg_phys_mr()
455 c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); c2_reg_user_mr()
456 if (IS_ERR(c2mr->umem)) { c2_reg_user_mr()
457 err = PTR_ERR(c2mr->umem); c2_reg_user_mr()
462 shift = ffs(c2mr->umem->page_size) - 1; c2_reg_user_mr()
463 n = c2mr->umem->nmap; c2_reg_user_mr()
472 for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) { c2_reg_user_mr()
477 (c2mr->umem->page_size * k); c2_reg_user_mr()
484 c2mr->umem->page_size, c2_reg_user_mr()
487 ib_umem_offset(c2mr->umem), c2_reg_user_mr()
497 ib_umem_release(c2mr->umem); c2_reg_user_mr()
513 if (mr->umem) c2_dereg_mr()
514 ib_umem_release(mr->umem); c2_dereg_mr()
H A Dc2_provider.h76 struct ib_umem *umem; member in struct:c2_mr
/linux-4.4.14/drivers/remoteproc/
H A Dwkup_m3_rproc.c130 /* umem always needs to be processed first */ wkup_m3_rproc_probe()
131 const char *mem_names[WKUPM3_MEM_MAX] = { "umem", "dmem" }; wkup_m3_rproc_probe()
187 * The wkupm3 has umem at address 0 in its view, so the device wkup_m3_rproc_probe()
189 * offset of the bus address for umem, and therefore needs to be wkup_m3_rproc_probe()
192 if (!strcmp(mem_names[i], "umem")) wkup_m3_rproc_probe()
/linux-4.4.14/drivers/infiniband/hw/usnic/
H A Dusnic_uiom.c58 struct usnic_uiom_reg *umem = container_of(work, usnic_uiom_reg_account() local
61 down_write(&umem->mm->mmap_sem); usnic_uiom_reg_account()
62 umem->mm->locked_vm -= umem->diff; usnic_uiom_reg_account()
63 up_write(&umem->mm->mmap_sem); usnic_uiom_reg_account()
64 mmput(umem->mm); usnic_uiom_reg_account()
65 kfree(umem); usnic_uiom_reg_account()
H A Dusnic_ib_verbs.c631 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length, usnic_ib_reg_mr()
633 if (IS_ERR_OR_NULL(mr->umem)) { usnic_ib_reg_mr()
634 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT; usnic_ib_reg_mr()
650 usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length); usnic_ib_dereg_mr()
652 usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing); usnic_ib_dereg_mr()
H A Dusnic_ib.h66 struct usnic_uiom_reg *umem; member in struct:usnic_ib_mr
H A Dusnic_ib_main.c647 usnic_err("Unable to initalize umem with err %d\n", err); usnic_ib_init()
/linux-4.4.14/drivers/infiniband/hw/mthca/
H A Dmthca_provider.c896 mr->umem = NULL; mthca_get_dma_mr()
979 mr->umem = NULL; mthca_reg_phys_mr()
1012 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, mthca_reg_user_mr()
1015 if (IS_ERR(mr->umem)) { mthca_reg_user_mr()
1016 err = PTR_ERR(mr->umem); mthca_reg_user_mr()
1020 shift = ffs(mr->umem->page_size) - 1; mthca_reg_user_mr()
1021 n = mr->umem->nmap; mthca_reg_user_mr()
1039 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) { mthca_reg_user_mr()
1043 mr->umem->page_size * k; mthca_reg_user_mr()
1077 ib_umem_release(mr->umem); mthca_reg_user_mr()
1089 if (mmr->umem) mthca_dereg_mr()
1090 ib_umem_release(mmr->umem); mthca_dereg_mr()
H A Dmthca_provider.h75 struct ib_umem *umem; member in struct:mthca_mr
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
H A Dmem.c732 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); c4iw_reg_user_mr()
733 if (IS_ERR(mhp->umem)) { c4iw_reg_user_mr()
734 err = PTR_ERR(mhp->umem); c4iw_reg_user_mr()
739 shift = ffs(mhp->umem->page_size) - 1; c4iw_reg_user_mr()
741 n = mhp->umem->nmap; c4iw_reg_user_mr()
754 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { c4iw_reg_user_mr()
758 mhp->umem->page_size * k); c4iw_reg_user_mr()
798 ib_umem_release(mhp->umem); c4iw_reg_user_mr()
973 if (mhp->umem) c4iw_dereg_mr()
974 ib_umem_release(mhp->umem); c4iw_dereg_mr()
H A Diw_cxgb4.h385 struct ib_umem *umem; member in struct:c4iw_mr
/linux-4.4.14/drivers/infiniband/hw/cxgb3/
H A Diwch_provider.c475 if (mhp->umem) iwch_dereg_mr()
476 ib_umem_release(mhp->umem); iwch_dereg_mr()
648 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); iwch_reg_user_mr()
649 if (IS_ERR(mhp->umem)) { iwch_reg_user_mr()
650 err = PTR_ERR(mhp->umem); iwch_reg_user_mr()
655 shift = ffs(mhp->umem->page_size) - 1; iwch_reg_user_mr()
657 n = mhp->umem->nmap; iwch_reg_user_mr()
671 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { iwch_reg_user_mr()
675 mhp->umem->page_size * k); iwch_reg_user_mr()
724 ib_umem_release(mhp->umem); iwch_reg_user_mr()
H A Diwch_provider.h76 struct ib_umem *umem; member in struct:iwch_mr
/linux-4.4.14/drivers/staging/rdma/ehca/
H A Dehca_mrmw.c361 e_mr->umem = ib_umem_get(pd->uobject->context, start, length, ehca_reg_user_mr()
363 if (IS_ERR(e_mr->umem)) { ehca_reg_user_mr()
364 ib_mr = (void *)e_mr->umem; ehca_reg_user_mr()
368 if (e_mr->umem->page_size != PAGE_SIZE) { ehca_reg_user_mr()
370 "e_mr->umem->page_size=%x", e_mr->umem->page_size); ehca_reg_user_mr()
379 if (e_mr->umem->hugetlb) { ehca_reg_user_mr()
401 pginfo.u.usr.region = e_mr->umem; ehca_reg_user_mr()
402 pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size; ehca_reg_user_mr()
428 ib_umem_release(e_mr->umem); ehca_reg_user_mr()
674 if (e_mr->umem) ehca_dereg_mr()
675 ib_umem_release(e_mr->umem); ehca_dereg_mr()
H A Dehca_classes.h271 struct ib_umem *umem; member in struct:ehca_mr
/linux-4.4.14/drivers/infiniband/hw/ocrdma/
H A Docrdma_verbs.c907 struct ib_umem *umem = mr->umem; build_user_pbes() local
916 shift = ilog2(umem->page_size); build_user_pbes()
918 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { build_user_pbes()
925 (umem->page_size * pg_cnt)); build_user_pbes()
930 umem->page_size * pg_cnt))); build_user_pbes()
970 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); ocrdma_reg_user_mr()
971 if (IS_ERR(mr->umem)) { ocrdma_reg_user_mr()
975 num_pbes = ib_umem_page_count(mr->umem); ocrdma_reg_user_mr()
980 mr->hwmr.pbe_size = mr->umem->page_size; ocrdma_reg_user_mr()
981 mr->hwmr.fbo = ib_umem_offset(mr->umem); ocrdma_reg_user_mr()
1020 if (mr->umem) ocrdma_dereg_mr()
1021 ib_umem_release(mr->umem); ocrdma_dereg_mr()
H A Docrdma.h194 struct ib_umem *umem; member in struct:ocrdma_mr

Completed in 3248 milliseconds