umem_odp 51 drivers/infiniband/core/umem_odp.c static void ib_umem_notifier_start_account(struct ib_umem_odp *umem_odp) umem_odp 53 drivers/infiniband/core/umem_odp.c mutex_lock(&umem_odp->umem_mutex); umem_odp 54 drivers/infiniband/core/umem_odp.c if (umem_odp->notifiers_count++ == 0) umem_odp 60 drivers/infiniband/core/umem_odp.c reinit_completion(&umem_odp->notifier_completion); umem_odp 61 drivers/infiniband/core/umem_odp.c mutex_unlock(&umem_odp->umem_mutex); umem_odp 64 drivers/infiniband/core/umem_odp.c static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp) umem_odp 66 drivers/infiniband/core/umem_odp.c mutex_lock(&umem_odp->umem_mutex); umem_odp 71 drivers/infiniband/core/umem_odp.c ++umem_odp->notifiers_seq; umem_odp 72 drivers/infiniband/core/umem_odp.c if (--umem_odp->notifiers_count == 0) umem_odp 73 drivers/infiniband/core/umem_odp.c complete_all(&umem_odp->notifier_completion); umem_odp 74 drivers/infiniband/core/umem_odp.c mutex_unlock(&umem_odp->umem_mutex); umem_odp 90 drivers/infiniband/core/umem_odp.c struct ib_umem_odp *umem_odp = umem_odp 97 drivers/infiniband/core/umem_odp.c ib_umem_notifier_start_account(umem_odp); umem_odp 98 drivers/infiniband/core/umem_odp.c complete_all(&umem_odp->notifier_completion); umem_odp 99 drivers/infiniband/core/umem_odp.c umem_odp->umem.ibdev->ops.invalidate_range( umem_odp 100 drivers/infiniband/core/umem_odp.c umem_odp, ib_umem_start(umem_odp), umem_odp 101 drivers/infiniband/core/umem_odp.c ib_umem_end(umem_odp)); umem_odp 207 drivers/infiniband/core/umem_odp.c static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp) umem_odp 213 drivers/infiniband/core/umem_odp.c umem_odp->umem.is_odp = 1; umem_odp 214 drivers/infiniband/core/umem_odp.c if (!umem_odp->is_implicit_odp) { umem_odp 215 drivers/infiniband/core/umem_odp.c size_t page_size = 1UL << umem_odp->page_shift; umem_odp 218 drivers/infiniband/core/umem_odp.c umem_odp->interval_tree.start = umem_odp 219 drivers/infiniband/core/umem_odp.c ALIGN_DOWN(umem_odp->umem.address, page_size); umem_odp 220 drivers/infiniband/core/umem_odp.c if (check_add_overflow(umem_odp->umem.address, umem_odp 221 drivers/infiniband/core/umem_odp.c (unsigned long)umem_odp->umem.length, umem_odp 222 drivers/infiniband/core/umem_odp.c &umem_odp->interval_tree.last)) umem_odp 224 drivers/infiniband/core/umem_odp.c umem_odp->interval_tree.last = umem_odp 225 drivers/infiniband/core/umem_odp.c ALIGN(umem_odp->interval_tree.last, page_size); umem_odp 226 drivers/infiniband/core/umem_odp.c if (unlikely(umem_odp->interval_tree.last < page_size)) umem_odp 229 drivers/infiniband/core/umem_odp.c pages = (umem_odp->interval_tree.last - umem_odp 230 drivers/infiniband/core/umem_odp.c umem_odp->interval_tree.start) >> umem_odp 231 drivers/infiniband/core/umem_odp.c umem_odp->page_shift; umem_odp 240 drivers/infiniband/core/umem_odp.c umem_odp->interval_tree.last--; umem_odp 242 drivers/infiniband/core/umem_odp.c umem_odp->page_list = kvcalloc( umem_odp 243 drivers/infiniband/core/umem_odp.c pages, sizeof(*umem_odp->page_list), GFP_KERNEL); umem_odp 244 drivers/infiniband/core/umem_odp.c if (!umem_odp->page_list) umem_odp 247 drivers/infiniband/core/umem_odp.c umem_odp->dma_list = kvcalloc( umem_odp 248 drivers/infiniband/core/umem_odp.c pages, sizeof(*umem_odp->dma_list), GFP_KERNEL); umem_odp 249 drivers/infiniband/core/umem_odp.c if (!umem_odp->dma_list) { umem_odp 255 drivers/infiniband/core/umem_odp.c mn = mmu_notifier_get(&ib_umem_notifiers, umem_odp->umem.owning_mm); umem_odp 260 drivers/infiniband/core/umem_odp.c umem_odp->per_mm = per_mm = umem_odp 263 drivers/infiniband/core/umem_odp.c mutex_init(&umem_odp->umem_mutex); umem_odp 264 drivers/infiniband/core/umem_odp.c init_completion(&umem_odp->notifier_completion); umem_odp 266 drivers/infiniband/core/umem_odp.c if (!umem_odp->is_implicit_odp) { umem_odp 268 drivers/infiniband/core/umem_odp.c interval_tree_insert(&umem_odp->interval_tree, umem_odp 272 drivers/infiniband/core/umem_odp.c mmgrab(umem_odp->umem.owning_mm); umem_odp 277 drivers/infiniband/core/umem_odp.c kvfree(umem_odp->dma_list); umem_odp 279 drivers/infiniband/core/umem_odp.c kvfree(umem_odp->page_list); umem_odp 300 drivers/infiniband/core/umem_odp.c struct ib_umem_odp *umem_odp; umem_odp 311 drivers/infiniband/core/umem_odp.c umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL); umem_odp 312 drivers/infiniband/core/umem_odp.c if (!umem_odp) umem_odp 314 drivers/infiniband/core/umem_odp.c umem = &umem_odp->umem; umem_odp 318 drivers/infiniband/core/umem_odp.c umem_odp->is_implicit_odp = 1; umem_odp 319 drivers/infiniband/core/umem_odp.c umem_odp->page_shift = PAGE_SHIFT; umem_odp 321 drivers/infiniband/core/umem_odp.c ret = ib_init_umem_odp(umem_odp); umem_odp 323 drivers/infiniband/core/umem_odp.c kfree(umem_odp); umem_odp 326 drivers/infiniband/core/umem_odp.c return umem_odp; umem_odp 388 drivers/infiniband/core/umem_odp.c struct ib_umem_odp *umem_odp; umem_odp 405 drivers/infiniband/core/umem_odp.c umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL); umem_odp 406 drivers/infiniband/core/umem_odp.c if (!umem_odp) umem_odp 409 drivers/infiniband/core/umem_odp.c umem_odp->umem.ibdev = context->device; umem_odp 410 drivers/infiniband/core/umem_odp.c umem_odp->umem.length = size; umem_odp 411 drivers/infiniband/core/umem_odp.c umem_odp->umem.address = addr; umem_odp 412 drivers/infiniband/core/umem_odp.c umem_odp->umem.writable = ib_access_writable(access); umem_odp 413 drivers/infiniband/core/umem_odp.c umem_odp->umem.owning_mm = mm = current->mm; umem_odp 415 drivers/infiniband/core/umem_odp.c umem_odp->page_shift = PAGE_SHIFT; umem_odp 421 drivers/infiniband/core/umem_odp.c vma = find_vma(mm, ib_umem_start(umem_odp)); umem_odp 428 drivers/infiniband/core/umem_odp.c umem_odp->page_shift = huge_page_shift(h); umem_odp 432 drivers/infiniband/core/umem_odp.c ret = ib_init_umem_odp(umem_odp); umem_odp 435 drivers/infiniband/core/umem_odp.c return umem_odp; umem_odp 438 drivers/infiniband/core/umem_odp.c kfree(umem_odp); umem_odp 443 drivers/infiniband/core/umem_odp.c void ib_umem_odp_release(struct ib_umem_odp *umem_odp) umem_odp 445 drivers/infiniband/core/umem_odp.c struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm; umem_odp 453 drivers/infiniband/core/umem_odp.c if (!umem_odp->is_implicit_odp) { umem_odp 454 drivers/infiniband/core/umem_odp.c mutex_lock(&umem_odp->umem_mutex); umem_odp 455 drivers/infiniband/core/umem_odp.c ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp), umem_odp 456 drivers/infiniband/core/umem_odp.c ib_umem_end(umem_odp)); umem_odp 457 drivers/infiniband/core/umem_odp.c mutex_unlock(&umem_odp->umem_mutex); umem_odp 458 drivers/infiniband/core/umem_odp.c kvfree(umem_odp->dma_list); umem_odp 459 drivers/infiniband/core/umem_odp.c kvfree(umem_odp->page_list); umem_odp 463 drivers/infiniband/core/umem_odp.c if (!umem_odp->is_implicit_odp) { umem_odp 464 drivers/infiniband/core/umem_odp.c interval_tree_remove(&umem_odp->interval_tree, umem_odp 466 drivers/infiniband/core/umem_odp.c complete_all(&umem_odp->notifier_completion); umem_odp 479 drivers/infiniband/core/umem_odp.c mmdrop(umem_odp->umem.owning_mm); umem_odp 480 drivers/infiniband/core/umem_odp.c kfree(umem_odp); umem_odp 503 drivers/infiniband/core/umem_odp.c struct ib_umem_odp *umem_odp, umem_odp 509 drivers/infiniband/core/umem_odp.c struct ib_device *dev = umem_odp->umem.ibdev; umem_odp 519 drivers/infiniband/core/umem_odp.c if (ib_umem_mmu_notifier_retry(umem_odp, current_seq)) { umem_odp 523 drivers/infiniband/core/umem_odp.c if (!(umem_odp->dma_list[page_index])) { umem_odp 525 drivers/infiniband/core/umem_odp.c ib_dma_map_page(dev, page, 0, BIT(umem_odp->page_shift), umem_odp 531 drivers/infiniband/core/umem_odp.c umem_odp->dma_list[page_index] = dma_addr | access_mask; umem_odp 532 drivers/infiniband/core/umem_odp.c umem_odp->page_list[page_index] = page; umem_odp 533 drivers/infiniband/core/umem_odp.c umem_odp->npages++; umem_odp 534 drivers/infiniband/core/umem_odp.c } else if (umem_odp->page_list[page_index] == page) { umem_odp 535 drivers/infiniband/core/umem_odp.c umem_odp->dma_list[page_index] |= access_mask; umem_odp 538 drivers/infiniband/core/umem_odp.c umem_odp->page_list[page_index], page); umem_odp 548 drivers/infiniband/core/umem_odp.c ib_umem_notifier_start_account(umem_odp); umem_odp 550 drivers/infiniband/core/umem_odp.c umem_odp, umem_odp 551 drivers/infiniband/core/umem_odp.c ib_umem_start(umem_odp) + umem_odp 552 drivers/infiniband/core/umem_odp.c (page_index << umem_odp->page_shift), umem_odp 553 drivers/infiniband/core/umem_odp.c ib_umem_start(umem_odp) + umem_odp 554 drivers/infiniband/core/umem_odp.c ((page_index + 1) << umem_odp->page_shift)); umem_odp 555 drivers/infiniband/core/umem_odp.c ib_umem_notifier_end_account(umem_odp); umem_odp 587 drivers/infiniband/core/umem_odp.c int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt, umem_odp 592 drivers/infiniband/core/umem_odp.c struct mm_struct *owning_mm = umem_odp->umem.owning_mm; umem_odp 602 drivers/infiniband/core/umem_odp.c if (user_virt < ib_umem_start(umem_odp) || umem_odp 603 drivers/infiniband/core/umem_odp.c user_virt + bcnt > ib_umem_end(umem_odp)) umem_odp 610 drivers/infiniband/core/umem_odp.c page_shift = umem_odp->page_shift; umem_odp 621 drivers/infiniband/core/umem_odp.c owning_process = get_pid_task(umem_odp->per_mm->tgid, PIDTYPE_PID); umem_odp 630 drivers/infiniband/core/umem_odp.c start_idx = (user_virt - ib_umem_start(umem_odp)) >> page_shift; umem_odp 660 drivers/infiniband/core/umem_odp.c mutex_lock(&umem_odp->umem_mutex); umem_odp 673 drivers/infiniband/core/umem_odp.c umem_odp, k, local_page_list[j], umem_odp 686 drivers/infiniband/core/umem_odp.c mutex_unlock(&umem_odp->umem_mutex); umem_odp 717 drivers/infiniband/core/umem_odp.c void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt, umem_odp 722 drivers/infiniband/core/umem_odp.c struct ib_device *dev = umem_odp->umem.ibdev; umem_odp 724 drivers/infiniband/core/umem_odp.c lockdep_assert_held(&umem_odp->umem_mutex); umem_odp 726 drivers/infiniband/core/umem_odp.c virt = max_t(u64, virt, ib_umem_start(umem_odp)); umem_odp 727 drivers/infiniband/core/umem_odp.c bound = min_t(u64, bound, ib_umem_end(umem_odp)); umem_odp 733 drivers/infiniband/core/umem_odp.c for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) { umem_odp 734 drivers/infiniband/core/umem_odp.c idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; umem_odp 735 drivers/infiniband/core/umem_odp.c if (umem_odp->page_list[idx]) { umem_odp 736 drivers/infiniband/core/umem_odp.c struct page *page = umem_odp->page_list[idx]; umem_odp 737 drivers/infiniband/core/umem_odp.c dma_addr_t dma = umem_odp->dma_list[idx]; umem_odp 743 drivers/infiniband/core/umem_odp.c BIT(umem_odp->page_shift), umem_odp 758 drivers/infiniband/core/umem_odp.c umem_odp->page_list[idx] = NULL; umem_odp 759 drivers/infiniband/core/umem_odp.c umem_odp->dma_list[idx] = 0; umem_odp 760 drivers/infiniband/core/umem_odp.c umem_odp->npages--; umem_odp 1255 drivers/infiniband/hw/mlx5/mlx5_ib.h void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, umem_odp 1286 drivers/infiniband/hw/mlx5/mlx5_ib.h static inline void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, umem_odp 1579 drivers/infiniband/hw/mlx5/mr.c struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem); umem_odp 1595 drivers/infiniband/hw/mlx5/mr.c if (!umem_odp->is_implicit_odp) umem_odp 1596 drivers/infiniband/hw/mlx5/mr.c mlx5_ib_invalidate_range(umem_odp, umem_odp 1597 drivers/infiniband/hw/mlx5/mr.c ib_umem_start(umem_odp), umem_odp 1598 drivers/infiniband/hw/mlx5/mr.c ib_umem_end(umem_odp)); umem_odp 1606 drivers/infiniband/hw/mlx5/mr.c ib_umem_odp_release(umem_odp); umem_odp 250 drivers/infiniband/hw/mlx5/odp.c void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, umem_odp 260 drivers/infiniband/hw/mlx5/odp.c if (!umem_odp) { umem_odp 265 drivers/infiniband/hw/mlx5/odp.c mr = umem_odp->private; umem_odp 270 drivers/infiniband/hw/mlx5/odp.c start = max_t(u64, ib_umem_start(umem_odp), start); umem_odp 271 drivers/infiniband/hw/mlx5/odp.c end = min_t(u64, ib_umem_end(umem_odp), end); umem_odp 279 drivers/infiniband/hw/mlx5/odp.c mutex_lock(&umem_odp->umem_mutex); umem_odp 280 drivers/infiniband/hw/mlx5/odp.c for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { umem_odp 281 drivers/infiniband/hw/mlx5/odp.c idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; umem_odp 288 drivers/infiniband/hw/mlx5/odp.c if (umem_odp->dma_list[idx] & umem_odp 317 drivers/infiniband/hw/mlx5/odp.c ib_umem_odp_unmap_dma_pages(umem_odp, start, end); umem_odp 319 drivers/infiniband/hw/mlx5/odp.c if (unlikely(!umem_odp->npages && mr->parent && umem_odp 320 drivers/infiniband/hw/mlx5/odp.c !umem_odp->dying)) { umem_odp 322 drivers/infiniband/hw/mlx5/odp.c umem_odp->dying = 1; umem_odp 324 drivers/infiniband/hw/mlx5/odp.c schedule_work(&umem_odp->work); umem_odp 326 drivers/infiniband/hw/mlx5/odp.c mutex_unlock(&umem_odp->umem_mutex); umem_odp 420 drivers/infiniband/hw/mlx5/odp.c struct ib_umem_odp *umem_odp, umem_odp 438 drivers/infiniband/hw/mlx5/odp.c mr->umem = &umem_odp->umem; umem_odp 554 drivers/infiniband/hw/mlx5/odp.c struct ib_umem_odp *umem_odp; umem_odp 556 drivers/infiniband/hw/mlx5/odp.c umem_odp = ib_umem_odp_alloc_implicit(udata, access_flags); umem_odp 557 drivers/infiniband/hw/mlx5/odp.c if (IS_ERR(umem_odp)) umem_odp 558 drivers/infiniband/hw/mlx5/odp.c return ERR_CAST(umem_odp); umem_odp 560 drivers/infiniband/hw/mlx5/odp.c imr = implicit_mr_alloc(&pd->ibpd, umem_odp, 1, access_flags); umem_odp 562 drivers/infiniband/hw/mlx5/odp.c ib_umem_odp_release(umem_odp); umem_odp 566 drivers/infiniband/hw/mlx5/odp.c imr->umem = &umem_odp->umem; umem_odp 583 drivers/infiniband/hw/mlx5/odp.c struct ib_umem_odp *umem_odp = umem_odp 585 drivers/infiniband/hw/mlx5/odp.c struct mlx5_ib_mr *mr = umem_odp->private; umem_odp 590 drivers/infiniband/hw/mlx5/odp.c mutex_lock(&umem_odp->umem_mutex); umem_odp 591 drivers/infiniband/hw/mlx5/odp.c ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp), umem_odp 592 drivers/infiniband/hw/mlx5/odp.c ib_umem_end(umem_odp)); umem_odp 594 drivers/infiniband/hw/mlx5/odp.c if (umem_odp->dying) { umem_odp 595 drivers/infiniband/hw/mlx5/odp.c mutex_unlock(&umem_odp->umem_mutex); umem_odp 599 drivers/infiniband/hw/mlx5/odp.c umem_odp->dying = 1; umem_odp 601 drivers/infiniband/hw/mlx5/odp.c schedule_work(&umem_odp->work); umem_odp 602 drivers/infiniband/hw/mlx5/odp.c mutex_unlock(&umem_odp->umem_mutex); umem_odp 92 include/rdma/ib_umem_odp.h static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp) umem_odp 94 include/rdma/ib_umem_odp.h return umem_odp->interval_tree.start; umem_odp 98 include/rdma/ib_umem_odp.h static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp) umem_odp 100 include/rdma/ib_umem_odp.h return umem_odp->interval_tree.last + 1; umem_odp 103 include/rdma/ib_umem_odp.h static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp) umem_odp 105 include/rdma/ib_umem_odp.h return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >> umem_odp 106 include/rdma/ib_umem_odp.h umem_odp->page_shift; umem_odp 139 include/rdma/ib_umem_odp.h void ib_umem_odp_release(struct ib_umem_odp *umem_odp); umem_odp 141 include/rdma/ib_umem_odp.h int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, umem_odp 145 include/rdma/ib_umem_odp.h void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, umem_odp 175 include/rdma/ib_umem_odp.h static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp, umem_odp 185 include/rdma/ib_umem_odp.h if (unlikely(umem_odp->notifiers_count)) umem_odp 187 include/rdma/ib_umem_odp.h if (umem_odp->notifiers_seq != mmu_seq) umem_odp 201 include/rdma/ib_umem_odp.h static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {} umem_odp 2424 include/rdma/ib_verbs.h void (*invalidate_range)(struct ib_umem_odp *umem_odp,