Lines Matching refs:umem

242 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)  in ib_umem_odp_get()  argument
261 umem->hugetlb = 0; in ib_umem_odp_get()
262 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL); in ib_umem_odp_get()
263 if (!umem->odp_data) { in ib_umem_odp_get()
267 umem->odp_data->umem = umem; in ib_umem_odp_get()
269 mutex_init(&umem->odp_data->umem_mutex); in ib_umem_odp_get()
271 init_completion(&umem->odp_data->notifier_completion); in ib_umem_odp_get()
273 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) * in ib_umem_odp_get()
274 sizeof(*umem->odp_data->page_list)); in ib_umem_odp_get()
275 if (!umem->odp_data->page_list) { in ib_umem_odp_get()
280 umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) * in ib_umem_odp_get()
281 sizeof(*umem->odp_data->dma_list)); in ib_umem_odp_get()
282 if (!umem->odp_data->dma_list) { in ib_umem_odp_get()
294 if (likely(ib_umem_start(umem) != ib_umem_end(umem))) in ib_umem_odp_get()
295 rbt_ib_umem_insert(&umem->odp_data->interval_tree, in ib_umem_odp_get()
299 umem->odp_data->mn_counters_active = true; in ib_umem_odp_get()
301 list_add(&umem->odp_data->no_private_counters, in ib_umem_odp_get()
340 vfree(umem->odp_data->dma_list); in ib_umem_odp_get()
342 vfree(umem->odp_data->page_list); in ib_umem_odp_get()
344 kfree(umem->odp_data); in ib_umem_odp_get()
350 void ib_umem_odp_release(struct ib_umem *umem) in ib_umem_odp_release() argument
352 struct ib_ucontext *context = umem->context; in ib_umem_odp_release()
360 ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem), in ib_umem_odp_release()
361 ib_umem_end(umem)); in ib_umem_odp_release()
364 if (likely(ib_umem_start(umem) != ib_umem_end(umem))) in ib_umem_odp_release()
365 rbt_ib_umem_remove(&umem->odp_data->interval_tree, in ib_umem_odp_release()
368 if (!umem->odp_data->mn_counters_active) { in ib_umem_odp_release()
369 list_del(&umem->odp_data->no_private_counters); in ib_umem_odp_release()
370 complete_all(&umem->odp_data->notifier_completion); in ib_umem_odp_release()
411 vfree(umem->odp_data->dma_list); in ib_umem_odp_release()
412 vfree(umem->odp_data->page_list); in ib_umem_odp_release()
413 kfree(umem->odp_data); in ib_umem_odp_release()
414 kfree(umem); in ib_umem_odp_release()
436 struct ib_umem *umem, in ib_umem_odp_map_dma_single_page() argument
443 struct ib_device *dev = umem->context->device; in ib_umem_odp_map_dma_single_page()
454 if (ib_umem_mmu_notifier_retry(umem, current_seq)) { in ib_umem_odp_map_dma_single_page()
458 if (!(umem->odp_data->dma_list[page_index])) { in ib_umem_odp_map_dma_single_page()
467 umem->odp_data->dma_list[page_index] = dma_addr | access_mask; in ib_umem_odp_map_dma_single_page()
468 umem->odp_data->page_list[page_index] = page; in ib_umem_odp_map_dma_single_page()
470 } else if (umem->odp_data->page_list[page_index] == page) { in ib_umem_odp_map_dma_single_page()
471 umem->odp_data->dma_list[page_index] |= access_mask; in ib_umem_odp_map_dma_single_page()
474 umem->odp_data->page_list[page_index], page); in ib_umem_odp_map_dma_single_page()
482 if (umem->context->invalidate_range || !stored_page) in ib_umem_odp_map_dma_single_page()
485 if (remove_existing_mapping && umem->context->invalidate_range) { in ib_umem_odp_map_dma_single_page()
487 umem, in ib_umem_odp_map_dma_single_page()
521 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, in ib_umem_odp_map_dma_pages() argument
534 if (user_virt < ib_umem_start(umem) || in ib_umem_odp_map_dma_pages()
535 user_virt + bcnt > ib_umem_end(umem)) in ib_umem_odp_map_dma_pages()
547 owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID); in ib_umem_odp_map_dma_pages()
559 start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT; in ib_umem_odp_map_dma_pages()
586 mutex_lock(&umem->odp_data->umem_mutex); in ib_umem_odp_map_dma_pages()
589 umem, k, base_virt_addr, local_page_list[j], in ib_umem_odp_map_dma_pages()
595 mutex_unlock(&umem->odp_data->umem_mutex); in ib_umem_odp_map_dma_pages()
621 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, in ib_umem_odp_unmap_dma_pages() argument
626 struct ib_device *dev = umem->context->device; in ib_umem_odp_unmap_dma_pages()
628 virt = max_t(u64, virt, ib_umem_start(umem)); in ib_umem_odp_unmap_dma_pages()
629 bound = min_t(u64, bound, ib_umem_end(umem)); in ib_umem_odp_unmap_dma_pages()
635 mutex_lock(&umem->odp_data->umem_mutex); in ib_umem_odp_unmap_dma_pages()
636 for (addr = virt; addr < bound; addr += (u64)umem->page_size) { in ib_umem_odp_unmap_dma_pages()
637 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; in ib_umem_odp_unmap_dma_pages()
638 if (umem->odp_data->page_list[idx]) { in ib_umem_odp_unmap_dma_pages()
639 struct page *page = umem->odp_data->page_list[idx]; in ib_umem_odp_unmap_dma_pages()
640 dma_addr_t dma = umem->odp_data->dma_list[idx]; in ib_umem_odp_unmap_dma_pages()
661 if (!umem->context->invalidate_range) in ib_umem_odp_unmap_dma_pages()
663 umem->odp_data->page_list[idx] = NULL; in ib_umem_odp_unmap_dma_pages()
664 umem->odp_data->dma_list[idx] = 0; in ib_umem_odp_unmap_dma_pages()
667 mutex_unlock(&umem->odp_data->umem_mutex); in ib_umem_odp_unmap_dma_pages()