Lines Matching refs:umem
47 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument
53 if (umem->nmap > 0) in __ib_umem_release()
54 ib_dma_unmap_sg(dev, umem->sg_head.sgl, in __ib_umem_release()
55 umem->nmap, in __ib_umem_release()
58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { in __ib_umem_release()
61 if (umem->writable && dirty) in __ib_umem_release()
66 sg_free_table(&umem->sg_head); in __ib_umem_release()
86 struct ib_umem *umem; in ib_umem_get() local
116 umem = kzalloc(sizeof *umem, GFP_KERNEL); in ib_umem_get()
117 if (!umem) in ib_umem_get()
120 umem->context = context; in ib_umem_get()
121 umem->length = size; in ib_umem_get()
122 umem->address = addr; in ib_umem_get()
123 umem->page_size = PAGE_SIZE; in ib_umem_get()
124 umem->pid = get_task_pid(current, PIDTYPE_PID); in ib_umem_get()
132 umem->writable = !!(access & in ib_umem_get()
137 ret = ib_umem_odp_get(context, umem); in ib_umem_get()
139 kfree(umem); in ib_umem_get()
142 return umem; in ib_umem_get()
145 umem->odp_data = NULL; in ib_umem_get()
148 umem->hugetlb = 1; in ib_umem_get()
152 kfree(umem); in ib_umem_get()
162 umem->hugetlb = 0; in ib_umem_get()
164 npages = ib_umem_num_pages(umem); in ib_umem_get()
183 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); in ib_umem_get()
188 sg_list_start = umem->sg_head.sgl; in ib_umem_get()
194 1, !umem->writable, page_list, vma_list); in ib_umem_get()
199 umem->npages += ret; in ib_umem_get()
205 umem->hugetlb = 0; in ib_umem_get()
214 umem->nmap = ib_dma_map_sg_attrs(context->device, in ib_umem_get()
215 umem->sg_head.sgl, in ib_umem_get()
216 umem->npages, in ib_umem_get()
220 if (umem->nmap <= 0) { in ib_umem_get()
230 __ib_umem_release(context->device, umem, 0); in ib_umem_get()
231 put_pid(umem->pid); in ib_umem_get()
232 kfree(umem); in ib_umem_get()
241 return ret < 0 ? ERR_PTR(ret) : umem; in ib_umem_get()
247 struct ib_umem *umem = container_of(work, struct ib_umem, work); in ib_umem_account() local
249 down_write(&umem->mm->mmap_sem); in ib_umem_account()
250 umem->mm->pinned_vm -= umem->diff; in ib_umem_account()
251 up_write(&umem->mm->mmap_sem); in ib_umem_account()
252 mmput(umem->mm); in ib_umem_account()
253 kfree(umem); in ib_umem_account()
260 void ib_umem_release(struct ib_umem *umem) in ib_umem_release() argument
262 struct ib_ucontext *context = umem->context; in ib_umem_release()
267 if (umem->odp_data) { in ib_umem_release()
268 ib_umem_odp_release(umem); in ib_umem_release()
272 __ib_umem_release(umem->context->device, umem, 1); in ib_umem_release()
274 task = get_pid_task(umem->pid, PIDTYPE_PID); in ib_umem_release()
275 put_pid(umem->pid); in ib_umem_release()
283 diff = ib_umem_num_pages(umem); in ib_umem_release()
295 INIT_WORK(&umem->work, ib_umem_account); in ib_umem_release()
296 umem->mm = mm; in ib_umem_release()
297 umem->diff = diff; in ib_umem_release()
299 queue_work(ib_wq, &umem->work); in ib_umem_release()
309 kfree(umem); in ib_umem_release()
313 int ib_umem_page_count(struct ib_umem *umem) in ib_umem_page_count() argument
320 if (umem->odp_data) in ib_umem_page_count()
321 return ib_umem_num_pages(umem); in ib_umem_page_count()
323 shift = ilog2(umem->page_size); in ib_umem_page_count()
326 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) in ib_umem_page_count()
343 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, in ib_umem_copy_from() argument
349 if (offset > umem->length || length > umem->length - offset) { in ib_umem_copy_from()
351 offset, umem->length, end); in ib_umem_copy_from()
355 ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length, in ib_umem_copy_from()
356 offset + ib_umem_offset(umem)); in ib_umem_copy_from()