umem               20 drivers/gpu/drm/nouveau/include/nvkm/core/client.h 	struct list_head umem;
umem              304 drivers/gpu/drm/nouveau/nvkm/core/client.c 	INIT_LIST_HEAD(&client->umem);
umem               40 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	struct nvkm_umem *umem;
umem               46 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 			list_for_each_entry(umem, &master->umem, head) {
umem               47 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 				if (umem->object.object == handle) {
umem               48 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 					memory = nvkm_memory_ref(umem->memory);
umem               55 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 		umem = nvkm_umem(object);
umem               56 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 		if (!umem->priv || client->super)
umem               57 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 			memory = nvkm_memory_ref(umem->memory);
umem               66 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	struct nvkm_umem *umem = nvkm_umem(object);
umem               68 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	if (!umem->map)
umem               71 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	if (umem->io) {
umem               72 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 		if (!IS_ERR(umem->bar)) {
umem               73 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 			struct nvkm_device *device = umem->mmu->subdev.device;
umem               74 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 			nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &umem->bar);
umem               76 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 			umem->bar = NULL;
umem               79 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 		vunmap(umem->map);
umem               80 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 		umem->map = NULL;
umem               90 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	struct nvkm_umem *umem = nvkm_umem(object);
umem               91 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	struct nvkm_mmu *mmu = umem->mmu;
umem               93 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	if (!umem->mappable)
umem               95 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	if (umem->map)
umem               98 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	if ((umem->type & NVKM_MEM_HOST) && !argc) {
umem               99 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 		int ret = nvkm_mem_map_host(umem->memory, &umem->map);
umem              103 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 		*handle = (unsigned long)(void *)umem->map;
umem              104 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 		*length = nvkm_memory_size(umem->memory);
umem              108 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	if ((umem->type & NVKM_MEM_VRAM) ||
umem              109 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	    (umem->type & NVKM_MEM_KIND)) {
umem              110 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 		int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc,
umem              111 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 					      handle, length, &umem->bar);
umem              120 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	umem->io = (*type == NVKM_OBJECT_MAP_IO);
umem              127 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	struct nvkm_umem *umem = nvkm_umem(object);
umem              128 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	spin_lock(&umem->object.client->lock);
umem              129 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	list_del_init(&umem->head);
umem              130 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	spin_unlock(&umem->object.client->lock);
umem              131 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	nvkm_memory_unref(&umem->memory);
umem              132 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	return umem;
umem              150 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	struct nvkm_umem *umem;
umem              165 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	if (!(umem = kzalloc(sizeof(*umem), GFP_KERNEL)))
umem              167 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
umem              168 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	umem->mmu = mmu;
umem              169 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	umem->type = mmu->type[type].type;
umem              170 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	umem->priv = oclass->client->super;
umem              171 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	INIT_LIST_HEAD(&umem->head);
umem              172 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	*pobject = &umem->object;
umem              176 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 		umem->mappable = true;
umem              180 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 				&umem->memory);
umem              184 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	spin_lock(&umem->object.client->lock);
umem              185 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	list_add(&umem->head, &umem->object.client->umem);
umem              186 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	spin_unlock(&umem->object.client->lock);
umem              188 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	args->v0.page = nvkm_memory_page(umem->memory);
umem              189 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	args->v0.addr = nvkm_memory_addr(umem->memory);
umem              190 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	args->v0.size = nvkm_memory_size(umem->memory);
umem               46 drivers/infiniband/core/umem.c static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
umem               51 drivers/infiniband/core/umem.c 	if (umem->nmap > 0)
umem               52 drivers/infiniband/core/umem.c 		ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
umem               55 drivers/infiniband/core/umem.c 	for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
umem               57 drivers/infiniband/core/umem.c 		put_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
umem               60 drivers/infiniband/core/umem.c 	sg_free_table(&umem->sg_head);
umem              144 drivers/infiniband/core/umem.c unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
umem              160 drivers/infiniband/core/umem.c 	mask = roundup_pow_of_two(umem->length);
umem              162 drivers/infiniband/core/umem.c 	pgoff = umem->address & ~PAGE_MASK;
umem              164 drivers/infiniband/core/umem.c 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
umem              174 drivers/infiniband/core/umem.c 		if (i != (umem->nmap - 1))
umem              197 drivers/infiniband/core/umem.c 	struct ib_umem *umem;
umem              234 drivers/infiniband/core/umem.c 	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
umem              235 drivers/infiniband/core/umem.c 	if (!umem)
umem              237 drivers/infiniband/core/umem.c 	umem->ibdev = context->device;
umem              238 drivers/infiniband/core/umem.c 	umem->length     = size;
umem              239 drivers/infiniband/core/umem.c 	umem->address    = addr;
umem              240 drivers/infiniband/core/umem.c 	umem->writable   = ib_access_writable(access);
umem              241 drivers/infiniband/core/umem.c 	umem->owning_mm = mm = current->mm;
umem              250 drivers/infiniband/core/umem.c 	npages = ib_umem_num_pages(umem);
umem              267 drivers/infiniband/core/umem.c 	ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
umem              271 drivers/infiniband/core/umem.c 	if (!umem->writable)
umem              274 drivers/infiniband/core/umem.c 	sg = umem->sg_head.sgl;
umem              293 drivers/infiniband/core/umem.c 			&umem->sg_nents);
umem              300 drivers/infiniband/core/umem.c 	umem->nmap = ib_dma_map_sg_attrs(context->device,
umem              301 drivers/infiniband/core/umem.c 				  umem->sg_head.sgl,
umem              302 drivers/infiniband/core/umem.c 				  umem->sg_nents,
umem              306 drivers/infiniband/core/umem.c 	if (!umem->nmap) {
umem              315 drivers/infiniband/core/umem.c 	__ib_umem_release(context->device, umem, 0);
umem              317 drivers/infiniband/core/umem.c 	atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
umem              322 drivers/infiniband/core/umem.c 		mmdrop(umem->owning_mm);
umem              323 drivers/infiniband/core/umem.c 		kfree(umem);
umem              325 drivers/infiniband/core/umem.c 	return ret ? ERR_PTR(ret) : umem;
umem              333 drivers/infiniband/core/umem.c void ib_umem_release(struct ib_umem *umem)
umem              335 drivers/infiniband/core/umem.c 	if (!umem)
umem              337 drivers/infiniband/core/umem.c 	if (umem->is_odp)
umem              338 drivers/infiniband/core/umem.c 		return ib_umem_odp_release(to_ib_umem_odp(umem));
umem              340 drivers/infiniband/core/umem.c 	__ib_umem_release(umem->ibdev, umem, 1);
umem              342 drivers/infiniband/core/umem.c 	atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
umem              343 drivers/infiniband/core/umem.c 	mmdrop(umem->owning_mm);
umem              344 drivers/infiniband/core/umem.c 	kfree(umem);
umem              348 drivers/infiniband/core/umem.c int ib_umem_page_count(struct ib_umem *umem)
umem              353 drivers/infiniband/core/umem.c 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
umem              370 drivers/infiniband/core/umem.c int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
umem              376 drivers/infiniband/core/umem.c 	if (offset > umem->length || length > umem->length - offset) {
umem              378 drivers/infiniband/core/umem.c 		       offset, umem->length, end);
umem              382 drivers/infiniband/core/umem.c 	ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->sg_nents, dst, length,
umem              383 drivers/infiniband/core/umem.c 				 offset + ib_umem_offset(umem));
umem               99 drivers/infiniband/core/umem_odp.c 		umem_odp->umem.ibdev->ops.invalidate_range(
umem              112 drivers/infiniband/core/umem_odp.c 	item->umem.ibdev->ops.invalidate_range(item, start, end);
umem              213 drivers/infiniband/core/umem_odp.c 	umem_odp->umem.is_odp = 1;
umem              219 drivers/infiniband/core/umem_odp.c 			ALIGN_DOWN(umem_odp->umem.address, page_size);
umem              220 drivers/infiniband/core/umem_odp.c 		if (check_add_overflow(umem_odp->umem.address,
umem              221 drivers/infiniband/core/umem_odp.c 				       (unsigned long)umem_odp->umem.length,
umem              255 drivers/infiniband/core/umem_odp.c 	mn = mmu_notifier_get(&ib_umem_notifiers, umem_odp->umem.owning_mm);
umem              272 drivers/infiniband/core/umem_odp.c 	mmgrab(umem_odp->umem.owning_mm);
umem              299 drivers/infiniband/core/umem_odp.c 	struct ib_umem *umem;
umem              314 drivers/infiniband/core/umem_odp.c 	umem = &umem_odp->umem;
umem              315 drivers/infiniband/core/umem_odp.c 	umem->ibdev = context->device;
umem              316 drivers/infiniband/core/umem_odp.c 	umem->writable = ib_access_writable(access);
umem              317 drivers/infiniband/core/umem_odp.c 	umem->owning_mm = current->mm;
umem              347 drivers/infiniband/core/umem_odp.c 	struct ib_umem *umem;
umem              356 drivers/infiniband/core/umem_odp.c 	umem = &odp_data->umem;
umem              357 drivers/infiniband/core/umem_odp.c 	umem->ibdev = root->umem.ibdev;
umem              358 drivers/infiniband/core/umem_odp.c 	umem->length     = size;
umem              359 drivers/infiniband/core/umem_odp.c 	umem->address    = addr;
umem              360 drivers/infiniband/core/umem_odp.c 	umem->writable   = root->umem.writable;
umem              361 drivers/infiniband/core/umem_odp.c 	umem->owning_mm  = root->umem.owning_mm;
umem              409 drivers/infiniband/core/umem_odp.c 	umem_odp->umem.ibdev = context->device;
umem              410 drivers/infiniband/core/umem_odp.c 	umem_odp->umem.length = size;
umem              411 drivers/infiniband/core/umem_odp.c 	umem_odp->umem.address = addr;
umem              412 drivers/infiniband/core/umem_odp.c 	umem_odp->umem.writable = ib_access_writable(access);
umem              413 drivers/infiniband/core/umem_odp.c 	umem_odp->umem.owning_mm = mm = current->mm;
umem              479 drivers/infiniband/core/umem_odp.c 	mmdrop(umem_odp->umem.owning_mm);
umem              509 drivers/infiniband/core/umem_odp.c 	struct ib_device *dev = umem_odp->umem.ibdev;
umem              592 drivers/infiniband/core/umem_odp.c 	struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
umem              722 drivers/infiniband/core/umem_odp.c 	struct ib_device *dev = umem_odp->umem.ibdev;
umem              777 drivers/infiniband/core/umem_odp.c 	struct ib_umem_odp *umem;
umem              788 drivers/infiniband/core/umem_odp.c 		umem = container_of(node, struct ib_umem_odp, interval_tree);
umem              789 drivers/infiniband/core/umem_odp.c 		ret_val = cb(umem, start, last, cookie) || ret_val;
umem              841 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	struct ib_umem *umem;
umem              858 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
umem              859 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	if (IS_ERR(umem))
umem              860 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		return PTR_ERR(umem);
umem              862 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	qp->sumem = umem;
umem              863 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	qplib_qp->sq.sg_info.sglist = umem->sg_head.sgl;
umem              864 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem);
umem              865 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	qplib_qp->sq.sg_info.nmap = umem->nmap;
umem              871 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		umem = ib_umem_get(udata, ureq.qprva, bytes,
umem              873 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		if (IS_ERR(umem))
umem              875 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		qp->rumem = umem;
umem              876 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		qplib_qp->rq.sg_info.sglist = umem->sg_head.sgl;
umem              877 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem);
umem              878 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		qplib_qp->rq.sg_info.nmap = umem->nmap;
umem              888 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	return PTR_ERR(umem);
umem             1302 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	ib_umem_release(srq->umem);
umem             1315 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	struct ib_umem *umem;
umem             1325 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
umem             1326 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	if (IS_ERR(umem))
umem             1327 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		return PTR_ERR(umem);
umem             1329 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	srq->umem = umem;
umem             1330 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	qplib_srq->sg_info.sglist = umem->sg_head.sgl;
umem             1331 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	qplib_srq->sg_info.npages = ib_umem_num_pages(umem);
umem             1332 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	qplib_srq->sg_info.nmap = umem->nmap;
umem             1411 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	ib_umem_release(srq->umem);
umem             2526 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	ib_umem_release(cq->umem);
umem             2566 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		cq->umem = ib_umem_get(udata, req.cq_va,
umem             2569 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		if (IS_ERR(cq->umem)) {
umem             2570 drivers/infiniband/hw/bnxt_re/ib_verbs.c 			rc = PTR_ERR(cq->umem);
umem             2573 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		cq->qplib_cq.sg_info.sglist = cq->umem->sg_head.sgl;
umem             2574 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem);
umem             2575 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
umem             2629 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	ib_umem_release(cq->umem);
umem             3487 drivers/infiniband/hw/bnxt_re/ib_verbs.c static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
umem             3494 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, page_size)
umem             3508 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	struct ib_umem *umem;
umem             3535 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
umem             3536 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	if (IS_ERR(umem)) {
umem             3541 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	mr->ib_umem = umem;
umem             3544 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	umem_pgs = ib_umem_page_count(umem);
umem             3558 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	page_shift = __ffs(ib_umem_find_best_pgsz(umem,
umem             3577 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
umem             3595 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	ib_umem_release(umem);
umem               76 drivers/infiniband/hw/bnxt_re/ib_verbs.h 	struct ib_umem		*umem;
umem              106 drivers/infiniband/hw/bnxt_re/ib_verbs.h 	struct ib_umem		*umem;
umem              349 drivers/infiniband/hw/cxgb3/iwch_provider.c 	ib_umem_release(mhp->umem);
umem              454 drivers/infiniband/hw/cxgb3/iwch_provider.c 	mhp->umem = ib_umem_get(udata, start, length, acc, 0);
umem              455 drivers/infiniband/hw/cxgb3/iwch_provider.c 	if (IS_ERR(mhp->umem)) {
umem              456 drivers/infiniband/hw/cxgb3/iwch_provider.c 		err = PTR_ERR(mhp->umem);
umem              463 drivers/infiniband/hw/cxgb3/iwch_provider.c 	n = ib_umem_num_pages(mhp->umem);
umem              477 drivers/infiniband/hw/cxgb3/iwch_provider.c 	for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
umem              526 drivers/infiniband/hw/cxgb3/iwch_provider.c 	ib_umem_release(mhp->umem);
umem               76 drivers/infiniband/hw/cxgb3/iwch_provider.h 	struct ib_umem *umem;
umem              397 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 	struct ib_umem *umem;
umem              546 drivers/infiniband/hw/cxgb4/mem.c 	mhp->umem = ib_umem_get(udata, start, length, acc, 0);
umem              547 drivers/infiniband/hw/cxgb4/mem.c 	if (IS_ERR(mhp->umem))
umem              552 drivers/infiniband/hw/cxgb4/mem.c 	n = ib_umem_num_pages(mhp->umem);
umem              565 drivers/infiniband/hw/cxgb4/mem.c 	for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
umem              605 drivers/infiniband/hw/cxgb4/mem.c 	ib_umem_release(mhp->umem);
umem              817 drivers/infiniband/hw/cxgb4/mem.c 	ib_umem_release(mhp->umem);
umem               86 drivers/infiniband/hw/efa/efa.h 	struct ib_umem *umem;
umem             1046 drivers/infiniband/hw/efa/efa_verbs.c 			     struct ib_umem *umem,
umem             1058 drivers/infiniband/hw/efa/efa_verbs.c 	rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
umem             1285 drivers/infiniband/hw/efa/efa_verbs.c 		      struct ib_umem *umem,
umem             1298 drivers/infiniband/hw/efa/efa_verbs.c 		err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
umem             1308 drivers/infiniband/hw/efa/efa_verbs.c 		err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
umem             1346 drivers/infiniband/hw/efa/efa_verbs.c 	err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
umem             1364 drivers/infiniband/hw/efa/efa_verbs.c 	err = pbl_create(dev, pbl, mr->umem, params->page_num,
umem             1426 drivers/infiniband/hw/efa/efa_verbs.c 	mr->umem = ib_umem_get(udata, start, length, access_flags, 0);
umem             1427 drivers/infiniband/hw/efa/efa_verbs.c 	if (IS_ERR(mr->umem)) {
umem             1428 drivers/infiniband/hw/efa/efa_verbs.c 		err = PTR_ERR(mr->umem);
umem             1439 drivers/infiniband/hw/efa/efa_verbs.c 	pg_sz = ib_umem_find_best_pgsz(mr->umem,
umem             1486 drivers/infiniband/hw/efa/efa_verbs.c 	ib_umem_release(mr->umem);
umem             1508 drivers/infiniband/hw/efa/efa_verbs.c 	ib_umem_release(mr->umem);
umem              266 drivers/infiniband/hw/hns/hns_roce_alloc.c 			   int buf_cnt, int start, struct ib_umem *umem,
umem              280 drivers/infiniband/hw/hns/hns_roce_alloc.c 	rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
umem              210 drivers/infiniband/hw/hns/hns_roce_cq.c 				   struct ib_umem **umem, u64 buf_addr, int cqe)
umem              216 drivers/infiniband/hw/hns/hns_roce_cq.c 	*umem = ib_umem_get(udata, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
umem              218 drivers/infiniband/hw/hns/hns_roce_cq.c 	if (IS_ERR(*umem))
umem              219 drivers/infiniband/hw/hns/hns_roce_cq.c 		return PTR_ERR(*umem);
umem              227 drivers/infiniband/hw/hns/hns_roce_cq.c 		npages = (ib_umem_page_count(*umem) +
umem              234 drivers/infiniband/hw/hns/hns_roce_cq.c 		ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
umem              240 drivers/infiniband/hw/hns/hns_roce_cq.c 	ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
umem              250 drivers/infiniband/hw/hns/hns_roce_cq.c 	ib_umem_release(*umem);
umem              318 drivers/infiniband/hw/hns/hns_roce_cq.c 				      &hr_cq->umem, ucmd.buf_addr,
umem              341 drivers/infiniband/hw/hns/hns_roce_cq.c 	ib_umem_release(hr_cq->umem);
umem              396 drivers/infiniband/hw/hns/hns_roce_cq.c 	ib_umem_release(hr_cq->umem);
umem              505 drivers/infiniband/hw/hns/hns_roce_cq.c 	ib_umem_release(hr_cq->umem);
umem               34 drivers/infiniband/hw/hns/hns_roce_db.c 	page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
umem               35 drivers/infiniband/hw/hns/hns_roce_db.c 	if (IS_ERR(page->umem)) {
umem               36 drivers/infiniband/hw/hns/hns_roce_db.c 		ret = PTR_ERR(page->umem);
umem               45 drivers/infiniband/hw/hns/hns_roce_db.c 	db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
umem               46 drivers/infiniband/hw/hns/hns_roce_db.c 	db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
umem               64 drivers/infiniband/hw/hns/hns_roce_db.c 		ib_umem_release(db->u.user_page->umem);
umem              386 drivers/infiniband/hw/hns/hns_roce_device.h 	struct ib_umem		*umem;
umem              468 drivers/infiniband/hw/hns/hns_roce_device.h 	struct ib_umem		*umem;
umem              496 drivers/infiniband/hw/hns/hns_roce_device.h 	struct ib_umem			*umem;
umem              517 drivers/infiniband/hw/hns/hns_roce_device.h 	struct ib_umem			*umem;
umem              536 drivers/infiniband/hw/hns/hns_roce_device.h 	struct ib_umem	       *umem;
umem              663 drivers/infiniband/hw/hns/hns_roce_device.h 	struct ib_umem		*umem;
umem             1215 drivers/infiniband/hw/hns/hns_roce_device.h 			       struct hns_roce_mtt *mtt, struct ib_umem *umem);
umem             1226 drivers/infiniband/hw/hns/hns_roce_device.h 			   int buf_cnt, int start, struct ib_umem *umem,
umem             1154 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		npages = ib_umem_page_count(mr->umem);
umem             1162 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	ib_umem_release(mr->umem);
umem             1890 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
umem             3634 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	ib_umem_release(hr_qp->umem);
umem             3686 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	ib_umem_release(hr_cq->umem);
umem             2241 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
umem             4710 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	ib_umem_release(hr_qp->umem);
umem              718 drivers/infiniband/hw/hns/hns_roce_mr.c 			npages = ib_umem_page_count(mr->umem);
umem             1007 drivers/infiniband/hw/hns/hns_roce_mr.c 	mr->umem = NULL;
umem             1020 drivers/infiniband/hw/hns/hns_roce_mr.c 			       struct hns_roce_mtt *mtt, struct ib_umem *umem)
umem             1060 drivers/infiniband/hw/hns/hns_roce_mr.c 	for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
umem             1092 drivers/infiniband/hw/hns/hns_roce_mr.c 				     struct ib_umem *umem)
umem             1103 drivers/infiniband/hw/hns/hns_roce_mr.c 	for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
umem             1146 drivers/infiniband/hw/hns/hns_roce_mr.c 	mr->umem = ib_umem_get(udata, start, length, access_flags, 0);
umem             1147 drivers/infiniband/hw/hns/hns_roce_mr.c 	if (IS_ERR(mr->umem)) {
umem             1148 drivers/infiniband/hw/hns/hns_roce_mr.c 		ret = PTR_ERR(mr->umem);
umem             1152 drivers/infiniband/hw/hns/hns_roce_mr.c 	n = ib_umem_page_count(mr->umem);
umem             1185 drivers/infiniband/hw/hns/hns_roce_mr.c 	ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
umem             1201 drivers/infiniband/hw/hns/hns_roce_mr.c 	ib_umem_release(mr->umem);
umem             1221 drivers/infiniband/hw/hns/hns_roce_mr.c 		npages = ib_umem_page_count(mr->umem);
umem             1229 drivers/infiniband/hw/hns/hns_roce_mr.c 	ib_umem_release(mr->umem);
umem             1231 drivers/infiniband/hw/hns/hns_roce_mr.c 	mr->umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
umem             1232 drivers/infiniband/hw/hns/hns_roce_mr.c 	if (IS_ERR(mr->umem)) {
umem             1233 drivers/infiniband/hw/hns/hns_roce_mr.c 		ret = PTR_ERR(mr->umem);
umem             1234 drivers/infiniband/hw/hns/hns_roce_mr.c 		mr->umem = NULL;
umem             1237 drivers/infiniband/hw/hns/hns_roce_mr.c 	npages = ib_umem_page_count(mr->umem);
umem             1260 drivers/infiniband/hw/hns/hns_roce_mr.c 	ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
umem             1263 drivers/infiniband/hw/hns/hns_roce_mr.c 			npages = ib_umem_page_count(mr->umem);
umem             1279 drivers/infiniband/hw/hns/hns_roce_mr.c 	ib_umem_release(mr->umem);
umem             1338 drivers/infiniband/hw/hns/hns_roce_mr.c 		ib_umem_release(mr->umem);
umem             1367 drivers/infiniband/hw/hns/hns_roce_mr.c 		ib_umem_release(mr->umem);
umem             1413 drivers/infiniband/hw/hns/hns_roce_mr.c 	mr->umem = NULL;
umem              747 drivers/infiniband/hw/hns/hns_roce_qp.c 		hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr,
umem              749 drivers/infiniband/hw/hns/hns_roce_qp.c 		if (IS_ERR(hr_qp->umem)) {
umem              751 drivers/infiniband/hw/hns/hns_roce_qp.c 			ret = PTR_ERR(hr_qp->umem);
umem              768 drivers/infiniband/hw/hns/hns_roce_qp.c 					hr_qp->umem, page_shift);
umem              998 drivers/infiniband/hw/hns/hns_roce_qp.c 	if (!hr_qp->umem)
umem             1000 drivers/infiniband/hw/hns/hns_roce_qp.c 	ib_umem_release(hr_qp->umem);
umem              189 drivers/infiniband/hw/hns/hns_roce_srq.c 	srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
umem              190 drivers/infiniband/hw/hns/hns_roce_srq.c 	if (IS_ERR(srq->umem))
umem              191 drivers/infiniband/hw/hns/hns_roce_srq.c 		return PTR_ERR(srq->umem);
umem              194 drivers/infiniband/hw/hns/hns_roce_srq.c 	buf->npages = (ib_umem_page_count(srq->umem) +
umem              203 drivers/infiniband/hw/hns/hns_roce_srq.c 	ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
umem              208 drivers/infiniband/hw/hns/hns_roce_srq.c 	srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
umem              210 drivers/infiniband/hw/hns/hns_roce_srq.c 	if (IS_ERR(srq->idx_que.umem)) {
umem              212 drivers/infiniband/hw/hns/hns_roce_srq.c 		ret = PTR_ERR(srq->idx_que.umem);
umem              217 drivers/infiniband/hw/hns/hns_roce_srq.c 	buf->npages = DIV_ROUND_UP(ib_umem_page_count(srq->idx_que.umem),
umem              228 drivers/infiniband/hw/hns/hns_roce_srq.c 					 srq->idx_que.umem);
umem              241 drivers/infiniband/hw/hns/hns_roce_srq.c 	ib_umem_release(srq->idx_que.umem);
umem              247 drivers/infiniband/hw/hns/hns_roce_srq.c 	ib_umem_release(srq->umem);
umem              344 drivers/infiniband/hw/hns/hns_roce_srq.c 	ib_umem_release(srq->idx_que.umem);
umem              346 drivers/infiniband/hw/hns/hns_roce_srq.c 	ib_umem_release(srq->umem);
umem              459 drivers/infiniband/hw/hns/hns_roce_srq.c 	ib_umem_release(srq->idx_que.umem);
umem              460 drivers/infiniband/hw/hns/hns_roce_srq.c 	ib_umem_release(srq->umem);
umem              140 drivers/infiniband/hw/mlx4/cq.c 			       struct ib_umem **umem, u64 buf_addr, int cqe)
umem              147 drivers/infiniband/hw/mlx4/cq.c 	*umem = ib_umem_get(udata, buf_addr, cqe * cqe_size,
umem              149 drivers/infiniband/hw/mlx4/cq.c 	if (IS_ERR(*umem))
umem              150 drivers/infiniband/hw/mlx4/cq.c 		return PTR_ERR(*umem);
umem              152 drivers/infiniband/hw/mlx4/cq.c 	n = ib_umem_page_count(*umem);
umem              153 drivers/infiniband/hw/mlx4/cq.c 	shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
umem              159 drivers/infiniband/hw/mlx4/cq.c 	err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
umem              169 drivers/infiniband/hw/mlx4/cq.c 	ib_umem_release(*umem);
umem              214 drivers/infiniband/hw/mlx4/cq.c 		err = mlx4_ib_get_cq_umem(dev, udata, &cq->buf, &cq->umem,
umem              280 drivers/infiniband/hw/mlx4/cq.c 	ib_umem_release(cq->umem);
umem              432 drivers/infiniband/hw/mlx4/cq.c 		ib_umem_release(cq->umem);
umem              433 drivers/infiniband/hw/mlx4/cq.c 		cq->umem     = cq->resize_umem;
umem              497 drivers/infiniband/hw/mlx4/cq.c 	ib_umem_release(mcq->umem);
umem               40 drivers/infiniband/hw/mlx4/doorbell.c 	struct ib_umem	       *umem;
umem               67 drivers/infiniband/hw/mlx4/doorbell.c 	page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
umem               68 drivers/infiniband/hw/mlx4/doorbell.c 	if (IS_ERR(page->umem)) {
umem               69 drivers/infiniband/hw/mlx4/doorbell.c 		err = PTR_ERR(page->umem);
umem               77 drivers/infiniband/hw/mlx4/doorbell.c 	db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
umem               93 drivers/infiniband/hw/mlx4/doorbell.c 		ib_umem_release(db->u.user_page->umem);
umem              123 drivers/infiniband/hw/mlx4/mlx4_ib.h 	struct ib_umem	       *umem;
umem              140 drivers/infiniband/hw/mlx4/mlx4_ib.h 	struct ib_umem	       *umem;
umem              324 drivers/infiniband/hw/mlx4/mlx4_ib.h 	struct ib_umem	       *umem;
umem              364 drivers/infiniband/hw/mlx4/mlx4_ib.h 	struct ib_umem	       *umem;
umem              732 drivers/infiniband/hw/mlx4/mlx4_ib.h 			   struct ib_umem *umem);
umem              918 drivers/infiniband/hw/mlx4/mlx4_ib.h int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
umem               77 drivers/infiniband/hw/mlx4/mr.c 	mr->umem = NULL;
umem              183 drivers/infiniband/hw/mlx4/mr.c 			   struct ib_umem *umem)
umem              203 drivers/infiniband/hw/mlx4/mr.c 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
umem              257 drivers/infiniband/hw/mlx4/mr.c int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
umem              274 drivers/infiniband/hw/mlx4/mr.c 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
umem              418 drivers/infiniband/hw/mlx4/mr.c 	mr->umem = mlx4_get_umem_mr(udata, start, length, access_flags);
umem              419 drivers/infiniband/hw/mlx4/mr.c 	if (IS_ERR(mr->umem)) {
umem              420 drivers/infiniband/hw/mlx4/mr.c 		err = PTR_ERR(mr->umem);
umem              424 drivers/infiniband/hw/mlx4/mr.c 	n = ib_umem_page_count(mr->umem);
umem              425 drivers/infiniband/hw/mlx4/mr.c 	shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
umem              432 drivers/infiniband/hw/mlx4/mr.c 	err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
umem              451 drivers/infiniband/hw/mlx4/mr.c 	ib_umem_release(mr->umem);
umem              489 drivers/infiniband/hw/mlx4/mr.c 		    !mmr->umem->writable) {
umem              506 drivers/infiniband/hw/mlx4/mr.c 		ib_umem_release(mmr->umem);
umem              507 drivers/infiniband/hw/mlx4/mr.c 		mmr->umem = mlx4_get_umem_mr(udata, start, length,
umem              509 drivers/infiniband/hw/mlx4/mr.c 		if (IS_ERR(mmr->umem)) {
umem              510 drivers/infiniband/hw/mlx4/mr.c 			err = PTR_ERR(mmr->umem);
umem              512 drivers/infiniband/hw/mlx4/mr.c 			mmr->umem = NULL;
umem              515 drivers/infiniband/hw/mlx4/mr.c 		n = ib_umem_page_count(mmr->umem);
umem              522 drivers/infiniband/hw/mlx4/mr.c 			ib_umem_release(mmr->umem);
umem              528 drivers/infiniband/hw/mlx4/mr.c 		err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
umem              531 drivers/infiniband/hw/mlx4/mr.c 			ib_umem_release(mmr->umem);
umem              607 drivers/infiniband/hw/mlx4/mr.c 	if (mr->umem)
umem              608 drivers/infiniband/hw/mlx4/mr.c 		ib_umem_release(mr->umem);
umem              687 drivers/infiniband/hw/mlx4/mr.c 	mr->umem = NULL;
umem              919 drivers/infiniband/hw/mlx4/qp.c 	qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0, 0);
umem              920 drivers/infiniband/hw/mlx4/qp.c 	if (IS_ERR(qp->umem)) {
umem              921 drivers/infiniband/hw/mlx4/qp.c 		err = PTR_ERR(qp->umem);
umem              925 drivers/infiniband/hw/mlx4/qp.c 	n = ib_umem_page_count(qp->umem);
umem              926 drivers/infiniband/hw/mlx4/qp.c 	shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
umem              932 drivers/infiniband/hw/mlx4/qp.c 	err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
umem              985 drivers/infiniband/hw/mlx4/qp.c 	ib_umem_release(qp->umem);
umem             1113 drivers/infiniband/hw/mlx4/qp.c 		qp->umem =
umem             1115 drivers/infiniband/hw/mlx4/qp.c 		if (IS_ERR(qp->umem)) {
umem             1116 drivers/infiniband/hw/mlx4/qp.c 			err = PTR_ERR(qp->umem);
umem             1120 drivers/infiniband/hw/mlx4/qp.c 		n = ib_umem_page_count(qp->umem);
umem             1121 drivers/infiniband/hw/mlx4/qp.c 		shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
umem             1127 drivers/infiniband/hw/mlx4/qp.c 		err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
umem             1287 drivers/infiniband/hw/mlx4/qp.c 	if (!qp->umem)
umem             1289 drivers/infiniband/hw/mlx4/qp.c 	ib_umem_release(qp->umem);
umem             1510 drivers/infiniband/hw/mlx4/qp.c 	ib_umem_release(qp->umem);
umem              113 drivers/infiniband/hw/mlx4/srq.c 		srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
umem              114 drivers/infiniband/hw/mlx4/srq.c 		if (IS_ERR(srq->umem))
umem              115 drivers/infiniband/hw/mlx4/srq.c 			return PTR_ERR(srq->umem);
umem              117 drivers/infiniband/hw/mlx4/srq.c 		err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
umem              122 drivers/infiniband/hw/mlx4/srq.c 		err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
umem              207 drivers/infiniband/hw/mlx4/srq.c 	if (!srq->umem)
umem              209 drivers/infiniband/hw/mlx4/srq.c 	ib_umem_release(srq->umem);
umem              283 drivers/infiniband/hw/mlx4/srq.c 	ib_umem_release(msrq->umem);
umem              734 drivers/infiniband/hw/mlx5/cq.c 	cq->buf.umem =
umem              737 drivers/infiniband/hw/mlx5/cq.c 	if (IS_ERR(cq->buf.umem)) {
umem              738 drivers/infiniband/hw/mlx5/cq.c 		err = PTR_ERR(cq->buf.umem);
umem              746 drivers/infiniband/hw/mlx5/cq.c 	mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
umem              760 drivers/infiniband/hw/mlx5/cq.c 	mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
umem              818 drivers/infiniband/hw/mlx5/cq.c 	ib_umem_release(cq->buf.umem);
umem              828 drivers/infiniband/hw/mlx5/cq.c 	ib_umem_release(cq->buf.umem);
umem             1120 drivers/infiniband/hw/mlx5/cq.c 	struct ib_umem *umem;
umem             1135 drivers/infiniband/hw/mlx5/cq.c 	umem = ib_umem_get(udata, ucmd.buf_addr,
umem             1138 drivers/infiniband/hw/mlx5/cq.c 	if (IS_ERR(umem)) {
umem             1139 drivers/infiniband/hw/mlx5/cq.c 		err = PTR_ERR(umem);
umem             1143 drivers/infiniband/hw/mlx5/cq.c 	mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
umem             1146 drivers/infiniband/hw/mlx5/cq.c 	cq->resize_umem = umem;
umem             1322 drivers/infiniband/hw/mlx5/cq.c 		ib_umem_release(cq->buf.umem);
umem             1323 drivers/infiniband/hw/mlx5/cq.c 		cq->buf.umem = cq->resize_umem;
umem              109 drivers/infiniband/hw/mlx5/devx.c 	struct ib_umem			*umem;
umem             2124 drivers/infiniband/hw/mlx5/devx.c 	obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
umem             2125 drivers/infiniband/hw/mlx5/devx.c 	if (IS_ERR(obj->umem))
umem             2126 drivers/infiniband/hw/mlx5/devx.c 		return PTR_ERR(obj->umem);
umem             2128 drivers/infiniband/hw/mlx5/devx.c 	mlx5_ib_cont_pages(obj->umem, obj->umem->address,
umem             2133 drivers/infiniband/hw/mlx5/devx.c 		ib_umem_release(obj->umem);
umem             2138 drivers/infiniband/hw/mlx5/devx.c 	obj->page_offset = obj->umem->address & page_mask;
umem             2157 drivers/infiniband/hw/mlx5/devx.c 	void *umem;
umem             2160 drivers/infiniband/hw/mlx5/devx.c 	umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
umem             2161 drivers/infiniband/hw/mlx5/devx.c 	mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
umem             2164 drivers/infiniband/hw/mlx5/devx.c 	MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
umem             2165 drivers/infiniband/hw/mlx5/devx.c 	MLX5_SET(umem, umem, log_page_size, obj->page_shift -
umem             2167 drivers/infiniband/hw/mlx5/devx.c 	MLX5_SET(umem, umem, page_offset, obj->page_offset);
umem             2168 drivers/infiniband/hw/mlx5/devx.c 	mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
umem             2169 drivers/infiniband/hw/mlx5/devx.c 			     (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
umem             2221 drivers/infiniband/hw/mlx5/devx.c 	ib_umem_release(obj->umem);
umem             2239 drivers/infiniband/hw/mlx5/devx.c 	ib_umem_release(obj->umem);
umem               41 drivers/infiniband/hw/mlx5/doorbell.c 	struct ib_umem	       *umem;
umem               67 drivers/infiniband/hw/mlx5/doorbell.c 	page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
umem               68 drivers/infiniband/hw/mlx5/doorbell.c 	if (IS_ERR(page->umem)) {
umem               69 drivers/infiniband/hw/mlx5/doorbell.c 		err = PTR_ERR(page->umem);
umem               77 drivers/infiniband/hw/mlx5/doorbell.c 	db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
umem               93 drivers/infiniband/hw/mlx5/doorbell.c 		ib_umem_release(db->u.user_page->umem);
umem               46 drivers/infiniband/hw/mlx5/mem.c void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
umem               65 drivers/infiniband/hw/mlx5/mem.c 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
umem              128 drivers/infiniband/hw/mlx5/mem.c void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
umem              141 drivers/infiniband/hw/mlx5/mem.c 	if (umem->is_odp) {
umem              147 drivers/infiniband/hw/mlx5/mem.c 				to_ib_umem_odp(umem)->dma_list[offset + i];
umem              155 drivers/infiniband/hw/mlx5/mem.c 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
umem              192 drivers/infiniband/hw/mlx5/mem.c void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
umem              195 drivers/infiniband/hw/mlx5/mem.c 	return __mlx5_ib_populate_pas(dev, umem, page_shift, 0,
umem              196 drivers/infiniband/hw/mlx5/mem.c 				      ib_umem_num_pages(umem), pas,
umem              311 drivers/infiniband/hw/mlx5/mlx5_ib.h 	struct ib_umem		*umem;
umem              341 drivers/infiniband/hw/mlx5/mlx5_ib.h 	struct ib_umem	       *umem;
umem              453 drivers/infiniband/hw/mlx5/mlx5_ib.h 	struct ib_umem		*umem;
umem              496 drivers/infiniband/hw/mlx5/mlx5_ib.h 	struct ib_umem		*umem;
umem              546 drivers/infiniband/hw/mlx5/mlx5_ib.h 	struct ib_umem	       *umem;
umem              601 drivers/infiniband/hw/mlx5/mlx5_ib.h 	struct ib_umem	       *umem;
umem              630 drivers/infiniband/hw/mlx5/mlx5_ib.h 	return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
umem              631 drivers/infiniband/hw/mlx5/mlx5_ib.h 	       mr->umem->is_odp;
umem             1211 drivers/infiniband/hw/mlx5/mlx5_ib.h void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
umem             1215 drivers/infiniband/hw/mlx5/mlx5_ib.h void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
umem             1218 drivers/infiniband/hw/mlx5/mlx5_ib.h void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
umem              724 drivers/infiniband/hw/mlx5/mr.c 	mr->umem = NULL;
umem              757 drivers/infiniband/hw/mlx5/mr.c 		       struct ib_umem **umem, int *npages, int *page_shift,
umem              762 drivers/infiniband/hw/mlx5/mr.c 	*umem = NULL;
umem              774 drivers/infiniband/hw/mlx5/mr.c 		u = &odp->umem;
umem              798 drivers/infiniband/hw/mlx5/mr.c 	*umem = u;
umem              850 drivers/infiniband/hw/mlx5/mr.c 				  struct ib_pd *pd, struct ib_umem *umem,
umem              875 drivers/infiniband/hw/mlx5/mr.c 	mr->umem = umem;
umem              890 drivers/infiniband/hw/mlx5/mr.c 	struct ib_umem *umem = mr->umem;
umem              899 drivers/infiniband/hw/mlx5/mr.c 	npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
umem              902 drivers/infiniband/hw/mlx5/mr.c 		__mlx5_ib_populate_pas(dev, umem, page_shift,
umem             1052 drivers/infiniband/hw/mlx5/mr.c 				     struct ib_umem *umem, int npages,
umem             1082 drivers/infiniband/hw/mlx5/mr.c 		mlx5_ib_populate_pas(dev, umem, page_shift, pas,
umem             1189 drivers/infiniband/hw/mlx5/mr.c 	mr->umem = NULL;
umem             1257 drivers/infiniband/hw/mlx5/mr.c 	struct ib_umem *umem;
umem             1282 drivers/infiniband/hw/mlx5/mr.c 	err = mr_umem_get(dev, udata, start, length, access_flags, &umem,
umem             1291 drivers/infiniband/hw/mlx5/mr.c 		mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
umem             1308 drivers/infiniband/hw/mlx5/mr.c 		mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
umem             1320 drivers/infiniband/hw/mlx5/mr.c 	mr->umem = umem;
umem             1339 drivers/infiniband/hw/mlx5/mr.c 		to_ib_umem_odp(mr->umem)->private = mr;
umem             1347 drivers/infiniband/hw/mlx5/mr.c 	ib_umem_release(umem);
umem             1415 drivers/infiniband/hw/mlx5/mr.c 	if (!mr->umem)
umem             1425 drivers/infiniband/hw/mlx5/mr.c 		addr = mr->umem->address;
umem             1426 drivers/infiniband/hw/mlx5/mr.c 		len = mr->umem->length;
umem             1435 drivers/infiniband/hw/mlx5/mr.c 		ib_umem_release(mr->umem);
umem             1436 drivers/infiniband/hw/mlx5/mr.c 		mr->umem = NULL;
umem             1438 drivers/infiniband/hw/mlx5/mr.c 				  &mr->umem, &npages, &page_shift, &ncont,
umem             1456 drivers/infiniband/hw/mlx5/mr.c 		mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
umem             1497 drivers/infiniband/hw/mlx5/mr.c 	ib_umem_release(mr->umem);
umem             1498 drivers/infiniband/hw/mlx5/mr.c 	mr->umem = NULL;
umem             1576 drivers/infiniband/hw/mlx5/mr.c 	struct ib_umem *umem = mr->umem;
umem             1579 drivers/infiniband/hw/mlx5/mr.c 		struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
umem             1610 drivers/infiniband/hw/mlx5/mr.c 		umem = NULL;
umem             1620 drivers/infiniband/hw/mlx5/mr.c 	ib_umem_release(umem);
umem             1621 drivers/infiniband/hw/mlx5/mr.c 	if (umem)
umem             1723 drivers/infiniband/hw/mlx5/mr.c 	mr->umem = NULL;
umem             1843 drivers/infiniband/hw/mlx5/mr.c 	mr->umem = NULL;
umem              109 drivers/infiniband/hw/mlx5/odp.c 	return to_ib_umem_odp(mr->umem)->per_mm;
umem              201 drivers/infiniband/hw/mlx5/odp.c 	lockdep_assert_held(&to_ib_umem_odp(mr->umem)->umem_mutex);
umem              228 drivers/infiniband/hw/mlx5/odp.c 	struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
umem              438 drivers/infiniband/hw/mlx5/odp.c 	mr->umem = &umem_odp->umem;
umem              480 drivers/infiniband/hw/mlx5/odp.c 	struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
umem              511 drivers/infiniband/hw/mlx5/odp.c 		mtt->umem = &odp->umem;
umem              566 drivers/infiniband/hw/mlx5/odp.c 	imr->umem = &umem_odp->umem;
umem              616 drivers/infiniband/hw/mlx5/odp.c 	struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
umem              642 drivers/infiniband/hw/mlx5/odp.c 	if (prefetch && !downgrade && !odp->umem.writable) {
umem              650 drivers/infiniband/hw/mlx5/odp.c 	if (odp->umem.writable && !downgrade)
umem              132 drivers/infiniband/hw/mlx5/qp.c static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem,
umem              153 drivers/infiniband/hw/mlx5/qp.c 	ret = ib_umem_copy_from(buffer, umem, offset, copy_length);
umem              170 drivers/infiniband/hw/mlx5/qp.c 	struct ib_umem *umem = base->ubuffer.umem;
umem              183 drivers/infiniband/hw/mlx5/qp.c 	ret = mlx5_ib_read_user_wqe_common(umem,
umem              213 drivers/infiniband/hw/mlx5/qp.c 	ret = mlx5_ib_read_user_wqe_common(umem,
umem              236 drivers/infiniband/hw/mlx5/qp.c 	struct ib_umem *umem = base->ubuffer.umem;
umem              241 drivers/infiniband/hw/mlx5/qp.c 	ret = mlx5_ib_read_user_wqe_common(umem,
umem              263 drivers/infiniband/hw/mlx5/qp.c 	struct ib_umem *umem = srq->umem;
umem              267 drivers/infiniband/hw/mlx5/qp.c 	ret = mlx5_ib_read_user_wqe_common(umem,
umem              747 drivers/infiniband/hw/mlx5/qp.c 			    struct ib_umem **umem, int *npages, int *page_shift,
umem              752 drivers/infiniband/hw/mlx5/qp.c 	*umem = ib_umem_get(udata, addr, size, 0, 0);
umem              753 drivers/infiniband/hw/mlx5/qp.c 	if (IS_ERR(*umem)) {
umem              755 drivers/infiniband/hw/mlx5/qp.c 		return PTR_ERR(*umem);
umem              758 drivers/infiniband/hw/mlx5/qp.c 	mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
umem              772 drivers/infiniband/hw/mlx5/qp.c 	ib_umem_release(*umem);
umem              773 drivers/infiniband/hw/mlx5/qp.c 	*umem = NULL;
umem              791 drivers/infiniband/hw/mlx5/qp.c 	ib_umem_release(rwq->umem);
umem              809 drivers/infiniband/hw/mlx5/qp.c 	rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0, 0);
umem              810 drivers/infiniband/hw/mlx5/qp.c 	if (IS_ERR(rwq->umem)) {
umem              812 drivers/infiniband/hw/mlx5/qp.c 		err = PTR_ERR(rwq->umem);
umem              816 drivers/infiniband/hw/mlx5/qp.c 	mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
umem              844 drivers/infiniband/hw/mlx5/qp.c 	ib_umem_release(rwq->umem);
umem              920 drivers/infiniband/hw/mlx5/qp.c 				       ubuffer->buf_size, &ubuffer->umem,
umem              925 drivers/infiniband/hw/mlx5/qp.c 		ubuffer->umem = NULL;
umem              940 drivers/infiniband/hw/mlx5/qp.c 	if (ubuffer->umem)
umem              941 drivers/infiniband/hw/mlx5/qp.c 		mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
umem              977 drivers/infiniband/hw/mlx5/qp.c 	ib_umem_release(ubuffer->umem);
umem              996 drivers/infiniband/hw/mlx5/qp.c 	ib_umem_release(base->ubuffer.umem);
umem             1238 drivers/infiniband/hw/mlx5/qp.c 			       &sq->ubuffer.umem, &npages, &page_shift, &ncont,
umem             1275 drivers/infiniband/hw/mlx5/qp.c 	mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0);
umem             1287 drivers/infiniband/hw/mlx5/qp.c 	ib_umem_release(sq->ubuffer.umem);
umem             1288 drivers/infiniband/hw/mlx5/qp.c 	sq->ubuffer.umem = NULL;
umem             1298 drivers/infiniband/hw/mlx5/qp.c 	ib_umem_release(sq->ubuffer.umem);
umem             6001 drivers/infiniband/hw/mlx5/qp.c 	mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
umem               83 drivers/infiniband/hw/mlx5/srq.c 	srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
umem               84 drivers/infiniband/hw/mlx5/srq.c 	if (IS_ERR(srq->umem)) {
umem               86 drivers/infiniband/hw/mlx5/srq.c 		err = PTR_ERR(srq->umem);
umem               90 drivers/infiniband/hw/mlx5/srq.c 	mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages,
umem              105 drivers/infiniband/hw/mlx5/srq.c 	mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
umem              126 drivers/infiniband/hw/mlx5/srq.c 	ib_umem_release(srq->umem);
umem              206 drivers/infiniband/hw/mlx5/srq.c 	ib_umem_release(srq->umem);
umem              400 drivers/infiniband/hw/mlx5/srq.c 		ib_umem_release(msrq->umem);
umem              849 drivers/infiniband/hw/mthca/mthca_provider.c 	mr->umem = NULL;
umem              883 drivers/infiniband/hw/mthca/mthca_provider.c 	mr->umem = ib_umem_get(udata, start, length, acc,
umem              886 drivers/infiniband/hw/mthca/mthca_provider.c 	if (IS_ERR(mr->umem)) {
umem              887 drivers/infiniband/hw/mthca/mthca_provider.c 		err = PTR_ERR(mr->umem);
umem              891 drivers/infiniband/hw/mthca/mthca_provider.c 	n = ib_umem_num_pages(mr->umem);
umem              909 drivers/infiniband/hw/mthca/mthca_provider.c 	for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
umem              944 drivers/infiniband/hw/mthca/mthca_provider.c 	ib_umem_release(mr->umem);
umem              956 drivers/infiniband/hw/mthca/mthca_provider.c 	ib_umem_release(mmr->umem);
umem               75 drivers/infiniband/hw/mthca/mthca_provider.h 	struct ib_umem   *umem;
umem              195 drivers/infiniband/hw/ocrdma/ocrdma.h 	struct ib_umem *umem;
umem              827 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	struct ib_umem *umem = mr->umem;
umem              837 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
umem              878 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	mr->umem = ib_umem_get(udata, start, len, acc, 0);
umem              879 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	if (IS_ERR(mr->umem)) {
umem              883 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	num_pbes = ib_umem_page_count(mr->umem);
umem              889 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	mr->hwmr.fbo = ib_umem_offset(mr->umem);
umem              928 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	ib_umem_release(mr->umem);
umem              264 drivers/infiniband/hw/qedr/qedr.h 	struct ib_umem *umem;
umem              482 drivers/infiniband/hw/qedr/qedr.h 	struct ib_umem *umem;
umem              601 drivers/infiniband/hw/qedr/verbs.c static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
umem              633 drivers/infiniband/hw/qedr/verbs.c 	for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
umem              709 drivers/infiniband/hw/qedr/verbs.c 	q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access, dmasync);
umem              710 drivers/infiniband/hw/qedr/verbs.c 	if (IS_ERR(q->umem)) {
umem              712 drivers/infiniband/hw/qedr/verbs.c 		       PTR_ERR(q->umem));
umem              713 drivers/infiniband/hw/qedr/verbs.c 		return PTR_ERR(q->umem);
umem              716 drivers/infiniband/hw/qedr/verbs.c 	fw_pages = ib_umem_page_count(q->umem) <<
umem              729 drivers/infiniband/hw/qedr/verbs.c 		qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
umem              742 drivers/infiniband/hw/qedr/verbs.c 	ib_umem_release(q->umem);
umem              743 drivers/infiniband/hw/qedr/verbs.c 	q->umem = NULL;
umem              935 drivers/infiniband/hw/qedr/verbs.c 		ib_umem_release(cq->q.umem);
umem              975 drivers/infiniband/hw/qedr/verbs.c 		ib_umem_release(cq->q.umem);
umem             1267 drivers/infiniband/hw/qedr/verbs.c 	ib_umem_release(srq->usrq.umem);
umem             1301 drivers/infiniband/hw/qedr/verbs.c 		ib_umem_release(srq->usrq.umem);
umem             1566 drivers/infiniband/hw/qedr/verbs.c 	qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
umem             1573 drivers/infiniband/hw/qedr/verbs.c 	qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
umem             1579 drivers/infiniband/hw/qedr/verbs.c 	ib_umem_release(qp->usq.umem);
umem             1580 drivers/infiniband/hw/qedr/verbs.c 	qp->usq.umem = NULL;
umem             1582 drivers/infiniband/hw/qedr/verbs.c 	ib_umem_release(qp->urq.umem);
umem             1583 drivers/infiniband/hw/qedr/verbs.c 	qp->urq.umem = NULL;
umem             2624 drivers/infiniband/hw/qedr/verbs.c 	mr->umem = ib_umem_get(udata, start, len, acc, 0);
umem             2625 drivers/infiniband/hw/qedr/verbs.c 	if (IS_ERR(mr->umem)) {
umem             2630 drivers/infiniband/hw/qedr/verbs.c 	rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
umem             2634 drivers/infiniband/hw/qedr/verbs.c 	qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
umem             2657 drivers/infiniband/hw/qedr/verbs.c 	mr->hw_mr.fbo = ib_umem_offset(mr->umem);
umem             2704 drivers/infiniband/hw/qedr/verbs.c 	ib_umem_release(mr->umem);
umem               70 drivers/infiniband/hw/usnic/usnic_ib.h 	struct usnic_uiom_reg		*umem;
umem              619 drivers/infiniband/hw/usnic/usnic_ib_verbs.c 	mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
umem              621 drivers/infiniband/hw/usnic/usnic_ib_verbs.c 	if (IS_ERR_OR_NULL(mr->umem)) {
umem              622 drivers/infiniband/hw/usnic/usnic_ib_verbs.c 		err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
umem              638 drivers/infiniband/hw/usnic/usnic_ib_verbs.c 	usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
umem              640 drivers/infiniband/hw/usnic/usnic_ib_verbs.c 	usnic_uiom_reg_release(mr->umem);
umem               91 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h 	struct ib_umem *umem;
umem              142 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h 	struct ib_umem *umem;
umem              172 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h 	struct ib_umem *umem;
umem              543 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h 				struct ib_umem *umem, u64 offset);
umem              138 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 		cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size,
umem              140 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 		if (IS_ERR(cq->umem)) {
umem              141 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 			ret = PTR_ERR(cq->umem);
umem              145 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 		npages = ib_umem_page_count(cq->umem);
umem              173 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 		pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
umem              216 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	ib_umem_release(cq->umem);
umem              228 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	ib_umem_release(cq->umem);
umem              183 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c 				struct ib_umem *umem, u64 offset)
umem              192 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c 	for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
umem              117 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	struct ib_umem *umem;
umem              129 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	umem = ib_umem_get(udata, start, length, access_flags, 0);
umem              130 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	if (IS_ERR(umem)) {
umem              133 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 		return ERR_CAST(umem);
umem              136 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	npages = ib_umem_num_pages(umem);
umem              152 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	mr->umem = umem;
umem              161 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0);
umem              190 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	ib_umem_release(umem);
umem              257 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	mr->umem = NULL;
umem              293 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	ib_umem_release(mr->umem);
umem              149 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0, 0);
umem              150 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	if (IS_ERR(srq->umem)) {
umem              151 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 		ret = PTR_ERR(srq->umem);
umem              155 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	srq->npages = ib_umem_page_count(srq->umem);
umem              171 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0);
umem              209 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	ib_umem_release(srq->umem);
umem              229 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	ib_umem_release(srq->umem);
umem              385 drivers/infiniband/sw/rdmavt/mr.c 	struct ib_umem *umem;
umem              393 drivers/infiniband/sw/rdmavt/mr.c 	umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
umem              394 drivers/infiniband/sw/rdmavt/mr.c 	if (IS_ERR(umem))
umem              395 drivers/infiniband/sw/rdmavt/mr.c 		return (void *)umem;
umem              397 drivers/infiniband/sw/rdmavt/mr.c 	n = ib_umem_num_pages(umem);
umem              408 drivers/infiniband/sw/rdmavt/mr.c 	mr->mr.offset = ib_umem_offset(umem);
umem              410 drivers/infiniband/sw/rdmavt/mr.c 	mr->umem = umem;
umem              415 drivers/infiniband/sw/rdmavt/mr.c 	for_each_sg_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
umem              437 drivers/infiniband/sw/rdmavt/mr.c 	ib_umem_release(umem);
umem              565 drivers/infiniband/sw/rdmavt/mr.c 	ib_umem_release(mr->umem);
umem              667 drivers/infiniband/sw/rdmavt/mr.c 	if (!mr->mr.lkey || mr->umem)
umem               59 drivers/infiniband/sw/rdmavt/mr.h 	struct ib_umem *umem;
umem               99 drivers/infiniband/sw/rxe/rxe_mr.c 	ib_umem_release(mem->umem);
umem              166 drivers/infiniband/sw/rxe/rxe_mr.c 	struct ib_umem		*umem;
umem              172 drivers/infiniband/sw/rxe/rxe_mr.c 	umem = ib_umem_get(udata, start, length, access, 0);
umem              173 drivers/infiniband/sw/rxe/rxe_mr.c 	if (IS_ERR(umem)) {
umem              175 drivers/infiniband/sw/rxe/rxe_mr.c 			(int)PTR_ERR(umem));
umem              180 drivers/infiniband/sw/rxe/rxe_mr.c 	mem->umem = umem;
umem              181 drivers/infiniband/sw/rxe/rxe_mr.c 	num_buf = ib_umem_num_pages(umem);
umem              188 drivers/infiniband/sw/rxe/rxe_mr.c 		ib_umem_release(umem);
umem              200 drivers/infiniband/sw/rxe/rxe_mr.c 		for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
umem              223 drivers/infiniband/sw/rxe/rxe_mr.c 	mem->umem		= umem;
umem              228 drivers/infiniband/sw/rxe/rxe_mr.c 	mem->offset		= ib_umem_offset(umem);
umem              326 drivers/infiniband/sw/rxe/rxe_verbs.h 	struct ib_umem		*umem;
umem              169 drivers/infiniband/sw/siw/siw.h 		struct siw_umem *umem;
umem               69 drivers/infiniband/sw/siw/siw_mem.c void siw_umem_release(struct siw_umem *umem, bool dirty)
umem               71 drivers/infiniband/sw/siw/siw_mem.c 	struct mm_struct *mm_s = umem->owning_mm;
umem               72 drivers/infiniband/sw/siw/siw_mem.c 	int i, num_pages = umem->num_pages;
umem               77 drivers/infiniband/sw/siw/siw_mem.c 		siw_free_plist(&umem->page_chunk[i], to_free,
umem               78 drivers/infiniband/sw/siw/siw_mem.c 			       umem->writable && dirty);
umem               79 drivers/infiniband/sw/siw/siw_mem.c 		kfree(umem->page_chunk[i].plist);
umem               82 drivers/infiniband/sw/siw/siw_mem.c 	atomic64_sub(umem->num_pages, &mm_s->pinned_vm);
umem               85 drivers/infiniband/sw/siw/siw_mem.c 	kfree(umem->page_chunk);
umem               86 drivers/infiniband/sw/siw/siw_mem.c 	kfree(umem);
umem              148 drivers/infiniband/sw/siw/siw_mem.c 			siw_umem_release(mem->umem, true);
umem              370 drivers/infiniband/sw/siw/siw_mem.c 	struct siw_umem *umem;
umem              387 drivers/infiniband/sw/siw/siw_mem.c 	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
umem              388 drivers/infiniband/sw/siw/siw_mem.c 	if (!umem)
umem              392 drivers/infiniband/sw/siw/siw_mem.c 	umem->owning_mm = mm_s;
umem              393 drivers/infiniband/sw/siw/siw_mem.c 	umem->writable = writable;
umem              408 drivers/infiniband/sw/siw/siw_mem.c 	umem->fp_addr = first_page_va;
umem              410 drivers/infiniband/sw/siw/siw_mem.c 	umem->page_chunk =
umem              412 drivers/infiniband/sw/siw/siw_mem.c 	if (!umem->page_chunk) {
umem              419 drivers/infiniband/sw/siw/siw_mem.c 		umem->page_chunk[i].plist =
umem              421 drivers/infiniband/sw/siw/siw_mem.c 		if (!umem->page_chunk[i].plist) {
umem              427 drivers/infiniband/sw/siw/siw_mem.c 			struct page **plist = &umem->page_chunk[i].plist[got];
umem              435 drivers/infiniband/sw/siw/siw_mem.c 			umem->num_pages += rv;
umem              447 drivers/infiniband/sw/siw/siw_mem.c 		return umem;
umem              449 drivers/infiniband/sw/siw/siw_mem.c 	siw_umem_release(umem, false);
umem               10 drivers/infiniband/sw/siw/siw_mem.h void siw_umem_release(struct siw_umem *umem, bool dirty);
umem               63 drivers/infiniband/sw/siw/siw_mem.h static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr)
umem               65 drivers/infiniband/sw/siw/siw_mem.h 	unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT,
umem               69 drivers/infiniband/sw/siw/siw_mem.h 	if (likely(page_idx < umem->num_pages))
umem               70 drivers/infiniband/sw/siw/siw_mem.h 		return umem->page_chunk[chunk_idx].plist[page_in_chunk];
umem               29 drivers/infiniband/sw/siw/siw_qp_rx.c static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
umem               39 drivers/infiniband/sw/siw/siw_qp_rx.c 		p = siw_get_upage(umem, dest_addr);
umem               44 drivers/infiniband/sw/siw/siw_qp_rx.c 				(void *)(uintptr_t)umem->fp_addr);
umem              492 drivers/infiniband/sw/siw/siw_qp_rx.c 			rv = siw_rx_umem(srx, mem_p->umem,
umem              605 drivers/infiniband/sw/siw/siw_qp_rx.c 		rv = siw_rx_umem(srx, mem->umem,
umem              849 drivers/infiniband/sw/siw/siw_qp_rx.c 		rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
umem               72 drivers/infiniband/sw/siw/siw_qp_tx.c 				p = siw_get_upage(mem->umem, sge->laddr);
umem               90 drivers/infiniband/sw/siw/siw_qp_tx.c 					p = siw_get_upage(mem->umem,
umem              497 drivers/infiniband/sw/siw/siw_qp_tx.c 					p = siw_get_upage(mem->umem,
umem             1299 drivers/infiniband/sw/siw/siw_verbs.c 	struct siw_umem *umem = NULL;
umem             1332 drivers/infiniband/sw/siw/siw_verbs.c 	umem = siw_umem_get(start, len, ib_access_writable(rights));
umem             1333 drivers/infiniband/sw/siw/siw_verbs.c 	if (IS_ERR(umem)) {
umem             1334 drivers/infiniband/sw/siw/siw_verbs.c 		rv = PTR_ERR(umem);
umem             1336 drivers/infiniband/sw/siw/siw_verbs.c 		umem = NULL;
umem             1344 drivers/infiniband/sw/siw/siw_verbs.c 	rv = siw_mr_add_mem(mr, pd, umem, start, len, rights);
umem             1384 drivers/infiniband/sw/siw/siw_verbs.c 		if (umem)
umem             1385 drivers/infiniband/sw/siw/siw_verbs.c 			siw_umem_release(umem, false);
umem             12831 drivers/net/ethernet/intel/i40e/i40e_main.c 		return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
umem               19 drivers/net/ethernet/intel/i40e/i40e_xsk.c static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem)
umem               27 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	for (i = 0; i < umem->npgs; i++) {
umem               28 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
umem               33 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		umem->pages[i].dma = dma;
umem               40 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
umem               42 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		umem->pages[i].dma = 0;
umem               53 drivers/net/ethernet/intel/i40e/i40e_xsk.c static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
umem               61 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	for (i = 0; i < umem->npgs; i++) {
umem               62 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
umem               65 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		umem->pages[i].dma = 0;
umem               77 drivers/net/ethernet/intel/i40e/i40e_xsk.c static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
umem               99 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
umem              101 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	err = i40e_xsk_umem_dma_map(vsi, umem);
umem              137 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	struct xdp_umem *umem;
umem              141 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	umem = xdp_get_umem_from_qid(netdev, qid);
umem              142 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	if (!umem)
umem              154 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	i40e_xsk_umem_dma_unmap(vsi, umem);
umem              175 drivers/net/ethernet/intel/i40e/i40e_xsk.c int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
umem              178 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	return umem ? i40e_xsk_umem_enable(vsi, umem, qid) :
umem              193 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	struct xdp_umem *umem = rx_ring->xsk_umem;
umem              208 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
umem              248 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	struct xdp_umem *umem = rx_ring->xsk_umem;
umem              257 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	if (!xsk_umem_peek_addr(umem, &handle)) {
umem              262 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	hr = umem->headroom + XDP_PACKET_HEADROOM;
umem              264 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	bi->dma = xdp_umem_get_dma(umem, handle);
umem              267 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	bi->addr = xdp_umem_get_data(umem, handle);
umem              270 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
umem              272 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	xsk_umem_discard_addr(umem);
umem              289 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	struct xdp_umem *umem = rx_ring->xsk_umem;
umem              292 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	if (!xsk_umem_peek_addr_rq(umem, &handle)) {
umem              299 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	hr = umem->headroom + XDP_PACKET_HEADROOM;
umem              301 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	bi->dma = xdp_umem_get_dma(umem, handle);
umem              304 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	bi->addr = xdp_umem_get_data(umem, handle);
umem              307 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
umem              309 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	xsk_umem_discard_addr_rq(umem);
umem              724 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	struct xdp_umem *umem = tx_ring->xsk_umem;
umem              764 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		xsk_umem_complete_tx(umem, xsk_frames);
umem              844 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	struct xdp_umem *umem = tx_ring->xsk_umem;
umem              864 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		xsk_umem_complete_tx(umem, xsk_frames);
umem               13 drivers/net/ethernet/intel/i40e/i40e_xsk.h int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
umem             10312 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
umem               33 drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
umem               24 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 				  struct xdp_umem *umem)
umem               30 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	for (i = 0; i < umem->npgs; i++) {
umem               31 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
umem               36 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		umem->pages[i].dma = dma;
umem               43 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
umem               45 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		umem->pages[i].dma = 0;
umem               52 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 				     struct xdp_umem *umem)
umem               57 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	for (i = 0; i < umem->npgs; i++) {
umem               58 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
umem               61 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		umem->pages[i].dma = 0;
umem               66 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 				 struct xdp_umem *umem,
umem               85 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
umem               87 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	err = ixgbe_xsk_umem_dma_map(adapter, umem);
umem              113 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	struct xdp_umem *umem;
umem              116 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	umem = xdp_get_umem_from_qid(adapter->netdev, qid);
umem              117 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	if (!umem)
umem              127 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	ixgbe_xsk_umem_dma_unmap(adapter, umem);
umem              135 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
umem              138 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
umem              146 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	struct xdp_umem *umem = rx_ring->xsk_umem;
umem              158 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
umem              258 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	struct xdp_umem *umem = rx_ring->xsk_umem;
umem              265 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	if (!xsk_umem_peek_addr(umem, &handle)) {
umem              270 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	hr = umem->headroom + XDP_PACKET_HEADROOM;
umem              272 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	bi->dma = xdp_umem_get_dma(umem, handle);
umem              275 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	bi->addr = xdp_umem_get_data(umem, handle);
umem              278 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
umem              280 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	xsk_umem_discard_addr(umem);
umem              287 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	struct xdp_umem *umem = rx_ring->xsk_umem;
umem              290 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	if (!xsk_umem_peek_addr_rq(umem, &handle)) {
umem              297 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	hr = umem->headroom + XDP_PACKET_HEADROOM;
umem              299 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	bi->dma = xdp_umem_get_dma(umem, handle);
umem              302 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	bi->addr = xdp_umem_get_data(umem, handle);
umem              305 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
umem              307 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	xsk_umem_discard_addr_rq(umem);
umem              645 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	struct xdp_umem *umem = tx_ring->xsk_umem;
umem              690 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		xsk_umem_complete_tx(umem, xsk_frames);
umem              732 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	struct xdp_umem *umem = tx_ring->xsk_umem;
umem              752 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		xsk_umem_complete_tx(umem, xsk_frames);
umem              523 drivers/net/ethernet/mellanox/mlx5/core/en.h 	struct xdp_umem           *umem;
umem              686 drivers/net/ethernet/mellanox/mlx5/core/en.h 	struct xdp_umem       *umem;
umem             1013 drivers/net/ethernet/mellanox/mlx5/core/en.h 		  struct xdp_umem *umem, struct mlx5e_rq *rq);
umem             1023 drivers/net/ethernet/mellanox/mlx5/core/en.h 		     struct mlx5e_sq_param *param, struct xdp_umem *umem,
umem              125 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 	struct xdp_umem *umem = rq->umem;
umem              145 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		xdp.handle = xsk_umem_adjust_offset(umem, xdp.handle, off);
umem              438 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		xsk_umem_complete_tx(sq->umem, xsk_frames);
umem              468 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		xsk_umem_complete_tx(sq->umem, xsk_frames);
umem               15 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	return xsk_umem_has_addrs_rq(rq->umem, count);
umem               21 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	struct xdp_umem *umem = rq->umem;
umem               24 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	if (!xsk_umem_peek_addr_rq(umem, &handle))
umem               27 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	dma_info->xsk.handle = xsk_umem_adjust_offset(umem, handle,
umem               29 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	dma_info->xsk.data = xdp_umem_get_data(umem, dma_info->xsk.handle);
umem               36 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	dma_info->addr = xdp_umem_get_dma(umem, handle);
umem               38 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	xsk_umem_discard_addr_rq(umem);
umem               48 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	xsk_umem_fq_reuse(rq->umem, handle & rq->umem->chunk_mask);
umem               30 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h 	if (!xsk_umem_uses_need_wakeup(rq->umem))
umem               34 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h 		xsk_set_rx_need_wakeup(rq->umem);
umem               36 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h 		xsk_clear_rx_need_wakeup(rq->umem);
umem               65 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c 		   struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
umem               85 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c 	err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq);
umem               99 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c 	err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true);
umem               15 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h 		   struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
umem               69 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c 	struct xdp_umem *umem = sq->umem;
umem               86 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c 		if (!xsk_umem_consume_tx(umem, &desc)) {
umem               95 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c 		xdptxd.dma_addr = xdp_umem_get_dma(umem, desc.addr);
umem               96 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c 		xdptxd.data = xdp_umem_get_data(umem, desc.addr);
umem              117 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c 		xsk_umem_consume_tx_done(umem);
umem               18 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h 	if (!xsk_umem_uses_need_wakeup(sq->umem))
umem               22 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h 		xsk_clear_tx_need_wakeup(sq->umem);
umem               24 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h 		xsk_set_tx_need_wakeup(sq->umem);
umem               10 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 			      struct xdp_umem *umem)
umem               15 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	for (i = 0; i < umem->npgs; i++) {
umem               16 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 		dma_addr_t dma = dma_map_page(dev, umem->pgs[i], 0, PAGE_SIZE,
umem               21 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 		umem->pages[i].dma = dma;
umem               28 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 		dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE,
umem               30 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 		umem->pages[i].dma = 0;
umem               37 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 				 struct xdp_umem *umem)
umem               42 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	for (i = 0; i < umem->npgs; i++) {
umem               43 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 		dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE,
umem               45 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 		umem->pages[i].dma = 0;
umem               72 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c static int mlx5e_xsk_add_umem(struct mlx5e_xsk *xsk, struct xdp_umem *umem, u16 ix)
umem               80 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	xsk->umems[ix] = umem;
umem               91 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c static bool mlx5e_xsk_is_umem_sane(struct xdp_umem *umem)
umem               93 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	return umem->headroom <= 0xffff && umem->chunk_size_nohr <= 0xffff;
umem               96 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk)
umem               98 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	xsk->headroom = umem->headroom;
umem               99 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	xsk->chunk_size = umem->chunk_size_nohr + umem->headroom;
umem              103 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 				   struct xdp_umem *umem, u16 ix)
umem              113 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	if (unlikely(!mlx5e_xsk_is_umem_sane(umem)))
umem              116 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	err = mlx5e_xsk_map_umem(priv, umem);
umem              120 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	err = mlx5e_xsk_add_umem(&priv->xsk, umem, ix);
umem              124 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	mlx5e_build_xsk_param(umem, &xsk);
umem              140 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	err = mlx5e_open_xsk(priv, params, &xsk, umem, c);
umem              164 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	mlx5e_xsk_unmap_umem(priv, umem);
umem              182 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	struct xdp_umem *umem = mlx5e_xsk_get_umem(&priv->channels.params,
umem              186 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	if (unlikely(!umem))
umem              203 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	mlx5e_xsk_unmap_umem(priv, umem);
umem              208 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c static int mlx5e_xsk_enable_umem(struct mlx5e_priv *priv, struct xdp_umem *umem,
umem              214 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	err = mlx5e_xsk_enable_locked(priv, umem, ix);
umem              231 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
umem              240 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	return umem ? mlx5e_xsk_enable_umem(priv, umem, ix) :
umem              244 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries)
umem              251 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
umem               22 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk);
umem               25 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid);
umem               27 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries);
umem              376 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			  struct xdp_umem *umem,
umem              403 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->umem    = umem;
umem              405 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (rq->umem)
umem              535 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5e_xsk_resize_reuseq(umem, num_xsk_frames);
umem              884 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		  struct xdp_umem *umem, struct mlx5e_rq *rq)
umem              888 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_alloc_rq(c, params, xsk, umem, param, rq);
umem              990 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			     struct xdp_umem *umem,
umem             1006 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	sq->umem      = umem;
umem             1008 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	sq->stats = sq->umem ?
umem             1461 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		     struct mlx5e_sq_param *param, struct xdp_umem *umem,
umem             1467 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_alloc_xdpsq(c, params, umem, param, sq, is_redirect);
umem             1965 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			      struct xdp_umem *umem,
umem             2008 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (umem) {
umem             2009 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_build_xsk_param(umem, &xsk);
umem             2010 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5e_open_xsk(priv, params, &xsk, umem, c);
umem             2362 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		struct xdp_umem *umem = NULL;
umem             2365 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, i);
umem             2367 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5e_open_channel(priv, i, &chs->params, cparam, umem, &chs->c[i]);
umem             3889 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		struct xdp_umem *umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, ix);
umem             3892 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (!umem)
umem             3895 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_build_xsk_param(umem, &xsk);
umem             4531 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		return mlx5e_xsk_setup_umem(dev, xdp->xsk.umem,
umem              264 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (rq->umem)
umem              296 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (rq->umem)
umem              383 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (rq->umem) {
umem              497 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (rq->umem &&
umem              705 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(alloc_err == -ENOMEM && rq->umem))
umem             1589 drivers/vhost/net.c 	struct vhost_umem *umem;
umem             1595 drivers/vhost/net.c 	umem = vhost_dev_reset_owner_prepare();
umem             1596 drivers/vhost/net.c 	if (!umem) {
umem             1603 drivers/vhost/net.c 	vhost_dev_reset_owner(&n->dev, umem);
umem              228 drivers/vhost/test.c 	struct vhost_umem *umem;
umem              234 drivers/vhost/test.c 	umem = vhost_dev_reset_owner_prepare();
umem              235 drivers/vhost/test.c 	if (!umem) {
umem              242 drivers/vhost/test.c 	vhost_dev_reset_owner(&n->dev, umem);
umem              326 drivers/vhost/vhost.c 	vq->umem = NULL;
umem              466 drivers/vhost/vhost.c 	dev->umem = NULL;
umem              586 drivers/vhost/vhost.c void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
umem              593 drivers/vhost/vhost.c 	INIT_LIST_HEAD(&umem->umem_list);
umem              594 drivers/vhost/vhost.c 	dev->umem = umem;
umem              599 drivers/vhost/vhost.c 		dev->vqs[i]->umem = umem;
umem              616 drivers/vhost/vhost.c static void vhost_umem_free(struct vhost_umem *umem,
umem              619 drivers/vhost/vhost.c 	vhost_umem_interval_tree_remove(node, &umem->umem_tree);
umem              622 drivers/vhost/vhost.c 	umem->numem--;
umem              625 drivers/vhost/vhost.c static void vhost_umem_clean(struct vhost_umem *umem)
umem              629 drivers/vhost/vhost.c 	if (!umem)
umem              632 drivers/vhost/vhost.c 	list_for_each_entry_safe(node, tmp, &umem->umem_list, link)
umem              633 drivers/vhost/vhost.c 		vhost_umem_free(umem, node);
umem              635 drivers/vhost/vhost.c 	kvfree(umem);
umem              675 drivers/vhost/vhost.c 	vhost_umem_clean(dev->umem);
umem              676 drivers/vhost/vhost.c 	dev->umem = NULL;
umem              712 drivers/vhost/vhost.c static bool vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
umem              717 drivers/vhost/vhost.c 	if (!umem)
umem              720 drivers/vhost/vhost.c 	list_for_each_entry(node, &umem->umem_list, link) {
umem              752 drivers/vhost/vhost.c static bool memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
umem              766 drivers/vhost/vhost.c 						 umem, log);
umem             1017 drivers/vhost/vhost.c static int vhost_new_umem_range(struct vhost_umem *umem,
umem             1030 drivers/vhost/vhost.c 	if (umem->numem == max_iotlb_entries) {
umem             1031 drivers/vhost/vhost.c 		tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link);
umem             1032 drivers/vhost/vhost.c 		vhost_umem_free(umem, tmp);
umem             1041 drivers/vhost/vhost.c 	list_add_tail(&node->link, &umem->umem_list);
umem             1042 drivers/vhost/vhost.c 	vhost_umem_interval_tree_insert(node, &umem->umem_tree);
umem             1043 drivers/vhost/vhost.c 	umem->numem++;
umem             1048 drivers/vhost/vhost.c static void vhost_del_umem_range(struct vhost_umem *umem,
umem             1053 drivers/vhost/vhost.c 	while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
umem             1055 drivers/vhost/vhost.c 		vhost_umem_free(umem, node);
umem             1322 drivers/vhost/vhost.c 	struct vhost_umem *umem = vq->iotlb;
umem             1329 drivers/vhost/vhost.c 		node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
umem             1375 drivers/vhost/vhost.c 	return memory_access_ok(dev, dev->umem, 1);
umem             1384 drivers/vhost/vhost.c 	return vq_memory_access_ok(log_base, vq->umem,
umem             1407 drivers/vhost/vhost.c 	struct vhost_umem *umem = kvzalloc(sizeof(*umem), GFP_KERNEL);
umem             1409 drivers/vhost/vhost.c 	if (!umem)
umem             1412 drivers/vhost/vhost.c 	umem->umem_tree = RB_ROOT_CACHED;
umem             1413 drivers/vhost/vhost.c 	umem->numem = 0;
umem             1414 drivers/vhost/vhost.c 	INIT_LIST_HEAD(&umem->umem_list);
umem             1416 drivers/vhost/vhost.c 	return umem;
umem             1467 drivers/vhost/vhost.c 	oldumem = d->umem;
umem             1468 drivers/vhost/vhost.c 	d->umem = newumem;
umem             1473 drivers/vhost/vhost.c 		d->vqs[i]->umem = newumem;
umem             1872 drivers/vhost/vhost.c 	struct vhost_umem *umem = vq->umem;
umem             1883 drivers/vhost/vhost.c 		list_for_each_entry(u, &umem->umem_list, link) {
umem             2045 drivers/vhost/vhost.c 	struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem;
umem             2057 drivers/vhost/vhost.c 		node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
umem             2060 drivers/vhost/vhost.c 			if (umem != dev->iotlb) {
umem              131 drivers/vhost/vhost.h 	struct vhost_umem *umem;
umem              167 drivers/vhost/vhost.h 	struct vhost_umem *umem;
umem             9996 include/linux/mlx5/mlx5_ifc.h 	struct mlx5_ifc_umem_bits  umem;
umem              614 include/linux/netdevice.h 	struct xdp_umem         *umem;
umem              747 include/linux/netdevice.h 	struct xdp_umem                 *umem;
umem              901 include/linux/netdevice.h 			struct xdp_umem *umem;
umem               84 include/net/xdp_sock.h 	struct xdp_umem *umem;
umem              116 include/net/xdp_sock.h bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
umem              117 include/net/xdp_sock.h u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
umem              118 include/net/xdp_sock.h void xsk_umem_discard_addr(struct xdp_umem *umem);
umem              119 include/net/xdp_sock.h void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
umem              120 include/net/xdp_sock.h bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
umem              121 include/net/xdp_sock.h void xsk_umem_consume_tx_done(struct xdp_umem *umem);
umem              123 include/net/xdp_sock.h struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
umem              127 include/net/xdp_sock.h void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
umem              128 include/net/xdp_sock.h void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
umem              129 include/net/xdp_sock.h void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
umem              130 include/net/xdp_sock.h void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
umem              131 include/net/xdp_sock.h bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
umem              153 include/net/xdp_sock.h static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
umem              158 include/net/xdp_sock.h 	page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr;
umem              163 include/net/xdp_sock.h static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
umem              167 include/net/xdp_sock.h 	return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK);
umem              171 include/net/xdp_sock.h static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
umem              173 include/net/xdp_sock.h 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
umem              178 include/net/xdp_sock.h 	return xsk_umem_has_addrs(umem, cnt - rq->length);
umem              181 include/net/xdp_sock.h static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
umem              183 include/net/xdp_sock.h 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
umem              186 include/net/xdp_sock.h 		return xsk_umem_peek_addr(umem, addr);
umem              192 include/net/xdp_sock.h static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
umem              194 include/net/xdp_sock.h 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
umem              197 include/net/xdp_sock.h 		xsk_umem_discard_addr(umem);
umem              202 include/net/xdp_sock.h static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
umem              204 include/net/xdp_sock.h 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
umem              213 include/net/xdp_sock.h static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address,
umem              216 include/net/xdp_sock.h 	if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG)
umem              241 include/net/xdp_sock.h static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
umem              246 include/net/xdp_sock.h static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
umem              251 include/net/xdp_sock.h static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
umem              255 include/net/xdp_sock.h static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
umem              259 include/net/xdp_sock.h static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
umem              265 include/net/xdp_sock.h static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
umem              275 include/net/xdp_sock.h 	struct xdp_umem *umem,
umem              305 include/net/xdp_sock.h static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
umem              310 include/net/xdp_sock.h static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
umem              315 include/net/xdp_sock.h static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
umem              320 include/net/xdp_sock.h static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
umem              325 include/net/xdp_sock.h static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
umem              329 include/net/xdp_sock.h static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
umem              333 include/net/xdp_sock.h static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
umem              337 include/net/xdp_sock.h static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
umem              341 include/net/xdp_sock.h static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
umem              345 include/net/xdp_sock.h static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
umem              349 include/net/xdp_sock.h static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
umem              354 include/net/xdp_sock.h static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
umem               58 include/rdma/ib_umem.h static inline int ib_umem_offset(struct ib_umem *umem)
umem               60 include/rdma/ib_umem.h 	return umem->address & ~PAGE_MASK;
umem               63 include/rdma/ib_umem.h static inline size_t ib_umem_num_pages(struct ib_umem *umem)
umem               65 include/rdma/ib_umem.h 	return (ALIGN(umem->address + umem->length, PAGE_SIZE) -
umem               66 include/rdma/ib_umem.h 		ALIGN_DOWN(umem->address, PAGE_SIZE)) >>
umem               74 include/rdma/ib_umem.h void ib_umem_release(struct ib_umem *umem);
umem               75 include/rdma/ib_umem.h int ib_umem_page_count(struct ib_umem *umem);
umem               76 include/rdma/ib_umem.h int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
umem               78 include/rdma/ib_umem.h unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
umem               92 include/rdma/ib_umem.h static inline void ib_umem_release(struct ib_umem *umem) { }
umem               93 include/rdma/ib_umem.h static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; }
umem               94 include/rdma/ib_umem.h static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
umem               98 include/rdma/ib_umem.h static inline int ib_umem_find_best_pgsz(struct ib_umem *umem,
umem               41 include/rdma/ib_umem_odp.h 	struct ib_umem umem;
umem               86 include/rdma/ib_umem_odp.h static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
umem               88 include/rdma/ib_umem_odp.h 	return container_of(umem, struct ib_umem_odp, umem);
umem               47 lib/test_user_copy.c static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
umem               65 lib/test_user_copy.c 	umem += start;
umem               87 lib/test_user_copy.c 	ret |= test(copy_to_user(umem, kmem, size),
umem               93 lib/test_user_copy.c 			int retval = check_zeroed_user(umem + start, len);
umem              105 lib/test_user_copy.c static int test_copy_struct_from_user(char *kmem, char __user *umem,
umem              124 lib/test_user_copy.c 	ret |= test(copy_to_user(umem, umem_src, size),
umem              134 lib/test_user_copy.c 	ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
umem              147 lib/test_user_copy.c 	ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
umem              157 lib/test_user_copy.c 	ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG,
umem              165 lib/test_user_copy.c 	ret |= test(clear_user(umem + ksize, usize - ksize),
umem              169 lib/test_user_copy.c 	ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
umem               26 net/xdp/xdp_umem.c void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
umem               33 net/xdp/xdp_umem.c 	spin_lock_irqsave(&umem->xsk_list_lock, flags);
umem               34 net/xdp/xdp_umem.c 	list_add_rcu(&xs->list, &umem->xsk_list);
umem               35 net/xdp/xdp_umem.c 	spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
umem               38 net/xdp/xdp_umem.c void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
umem               45 net/xdp/xdp_umem.c 	spin_lock_irqsave(&umem->xsk_list_lock, flags);
umem               47 net/xdp/xdp_umem.c 	spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
umem               54 net/xdp/xdp_umem.c static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
umem               63 net/xdp/xdp_umem.c 		dev->_rx[queue_id].umem = umem;
umem               65 net/xdp/xdp_umem.c 		dev->_tx[queue_id].umem = umem;
umem               74 net/xdp/xdp_umem.c 		return dev->_rx[queue_id].umem;
umem               76 net/xdp/xdp_umem.c 		return dev->_tx[queue_id].umem;
umem               85 net/xdp/xdp_umem.c 		dev->_rx[queue_id].umem = NULL;
umem               87 net/xdp/xdp_umem.c 		dev->_tx[queue_id].umem = NULL;
umem               90 net/xdp/xdp_umem.c int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
umem              108 net/xdp/xdp_umem.c 	err = xdp_reg_umem_at_qid(dev, umem, queue_id);
umem              112 net/xdp/xdp_umem.c 	umem->dev = dev;
umem              113 net/xdp/xdp_umem.c 	umem->queue_id = queue_id;
umem              116 net/xdp/xdp_umem.c 		umem->flags |= XDP_UMEM_USES_NEED_WAKEUP;
umem              121 net/xdp/xdp_umem.c 		xsk_set_tx_need_wakeup(umem);
umem              136 net/xdp/xdp_umem.c 	bpf.xsk.umem = umem;
umem              143 net/xdp/xdp_umem.c 	umem->zc = true;
umem              154 net/xdp/xdp_umem.c void xdp_umem_clear_dev(struct xdp_umem *umem)
umem              161 net/xdp/xdp_umem.c 	if (!umem->dev)
umem              164 net/xdp/xdp_umem.c 	if (umem->zc) {
umem              166 net/xdp/xdp_umem.c 		bpf.xsk.umem = NULL;
umem              167 net/xdp/xdp_umem.c 		bpf.xsk.queue_id = umem->queue_id;
umem              169 net/xdp/xdp_umem.c 		err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
umem              175 net/xdp/xdp_umem.c 	xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
umem              177 net/xdp/xdp_umem.c 	dev_put(umem->dev);
umem              178 net/xdp/xdp_umem.c 	umem->dev = NULL;
umem              179 net/xdp/xdp_umem.c 	umem->zc = false;
umem              182 net/xdp/xdp_umem.c static void xdp_umem_unmap_pages(struct xdp_umem *umem)
umem              186 net/xdp/xdp_umem.c 	for (i = 0; i < umem->npgs; i++)
umem              187 net/xdp/xdp_umem.c 		if (PageHighMem(umem->pgs[i]))
umem              188 net/xdp/xdp_umem.c 			vunmap(umem->pages[i].addr);
umem              191 net/xdp/xdp_umem.c static int xdp_umem_map_pages(struct xdp_umem *umem)
umem              196 net/xdp/xdp_umem.c 	for (i = 0; i < umem->npgs; i++) {
umem              197 net/xdp/xdp_umem.c 		if (PageHighMem(umem->pgs[i]))
umem              198 net/xdp/xdp_umem.c 			addr = vmap(&umem->pgs[i], 1, VM_MAP, PAGE_KERNEL);
umem              200 net/xdp/xdp_umem.c 			addr = page_address(umem->pgs[i]);
umem              203 net/xdp/xdp_umem.c 			xdp_umem_unmap_pages(umem);
umem              207 net/xdp/xdp_umem.c 		umem->pages[i].addr = addr;
umem              213 net/xdp/xdp_umem.c static void xdp_umem_unpin_pages(struct xdp_umem *umem)
umem              215 net/xdp/xdp_umem.c 	put_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
umem              217 net/xdp/xdp_umem.c 	kfree(umem->pgs);
umem              218 net/xdp/xdp_umem.c 	umem->pgs = NULL;
umem              221 net/xdp/xdp_umem.c static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
umem              223 net/xdp/xdp_umem.c 	if (umem->user) {
umem              224 net/xdp/xdp_umem.c 		atomic_long_sub(umem->npgs, &umem->user->locked_vm);
umem              225 net/xdp/xdp_umem.c 		free_uid(umem->user);
umem              229 net/xdp/xdp_umem.c static void xdp_umem_release(struct xdp_umem *umem)
umem              232 net/xdp/xdp_umem.c 	xdp_umem_clear_dev(umem);
umem              235 net/xdp/xdp_umem.c 	ida_simple_remove(&umem_ida, umem->id);
umem              237 net/xdp/xdp_umem.c 	if (umem->fq) {
umem              238 net/xdp/xdp_umem.c 		xskq_destroy(umem->fq);
umem              239 net/xdp/xdp_umem.c 		umem->fq = NULL;
umem              242 net/xdp/xdp_umem.c 	if (umem->cq) {
umem              243 net/xdp/xdp_umem.c 		xskq_destroy(umem->cq);
umem              244 net/xdp/xdp_umem.c 		umem->cq = NULL;
umem              247 net/xdp/xdp_umem.c 	xsk_reuseq_destroy(umem);
umem              249 net/xdp/xdp_umem.c 	xdp_umem_unmap_pages(umem);
umem              250 net/xdp/xdp_umem.c 	xdp_umem_unpin_pages(umem);
umem              252 net/xdp/xdp_umem.c 	kfree(umem->pages);
umem              253 net/xdp/xdp_umem.c 	umem->pages = NULL;
umem              255 net/xdp/xdp_umem.c 	xdp_umem_unaccount_pages(umem);
umem              256 net/xdp/xdp_umem.c 	kfree(umem);
umem              261 net/xdp/xdp_umem.c 	struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
umem              263 net/xdp/xdp_umem.c 	xdp_umem_release(umem);
umem              266 net/xdp/xdp_umem.c void xdp_get_umem(struct xdp_umem *umem)
umem              268 net/xdp/xdp_umem.c 	refcount_inc(&umem->users);
umem              271 net/xdp/xdp_umem.c void xdp_put_umem(struct xdp_umem *umem)
umem              273 net/xdp/xdp_umem.c 	if (!umem)
umem              276 net/xdp/xdp_umem.c 	if (refcount_dec_and_test(&umem->users)) {
umem              277 net/xdp/xdp_umem.c 		INIT_WORK(&umem->work, xdp_umem_release_deferred);
umem              278 net/xdp/xdp_umem.c 		schedule_work(&umem->work);
umem              282 net/xdp/xdp_umem.c static int xdp_umem_pin_pages(struct xdp_umem *umem)
umem              288 net/xdp/xdp_umem.c 	umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
umem              290 net/xdp/xdp_umem.c 	if (!umem->pgs)
umem              294 net/xdp/xdp_umem.c 	npgs = get_user_pages(umem->address, umem->npgs,
umem              295 net/xdp/xdp_umem.c 			      gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
umem              298 net/xdp/xdp_umem.c 	if (npgs != umem->npgs) {
umem              300 net/xdp/xdp_umem.c 			umem->npgs = npgs;
umem              310 net/xdp/xdp_umem.c 	xdp_umem_unpin_pages(umem);
umem              312 net/xdp/xdp_umem.c 	kfree(umem->pgs);
umem              313 net/xdp/xdp_umem.c 	umem->pgs = NULL;
umem              317 net/xdp/xdp_umem.c static int xdp_umem_account_pages(struct xdp_umem *umem)
umem              325 net/xdp/xdp_umem.c 	umem->user = get_uid(current_user());
umem              328 net/xdp/xdp_umem.c 		old_npgs = atomic_long_read(&umem->user->locked_vm);
umem              329 net/xdp/xdp_umem.c 		new_npgs = old_npgs + umem->npgs;
umem              331 net/xdp/xdp_umem.c 			free_uid(umem->user);
umem              332 net/xdp/xdp_umem.c 			umem->user = NULL;
umem              335 net/xdp/xdp_umem.c 	} while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
umem              340 net/xdp/xdp_umem.c static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
umem              392 net/xdp/xdp_umem.c 	umem->address = (unsigned long)addr;
umem              393 net/xdp/xdp_umem.c 	umem->chunk_mask = unaligned_chunks ? XSK_UNALIGNED_BUF_ADDR_MASK
umem              395 net/xdp/xdp_umem.c 	umem->size = size;
umem              396 net/xdp/xdp_umem.c 	umem->headroom = headroom;
umem              397 net/xdp/xdp_umem.c 	umem->chunk_size_nohr = chunk_size - headroom;
umem              398 net/xdp/xdp_umem.c 	umem->npgs = (u32)npgs;
umem              399 net/xdp/xdp_umem.c 	umem->pgs = NULL;
umem              400 net/xdp/xdp_umem.c 	umem->user = NULL;
umem              401 net/xdp/xdp_umem.c 	umem->flags = mr->flags;
umem              402 net/xdp/xdp_umem.c 	INIT_LIST_HEAD(&umem->xsk_list);
umem              403 net/xdp/xdp_umem.c 	spin_lock_init(&umem->xsk_list_lock);
umem              405 net/xdp/xdp_umem.c 	refcount_set(&umem->users, 1);
umem              407 net/xdp/xdp_umem.c 	err = xdp_umem_account_pages(umem);
umem              411 net/xdp/xdp_umem.c 	err = xdp_umem_pin_pages(umem);
umem              415 net/xdp/xdp_umem.c 	umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
umem              416 net/xdp/xdp_umem.c 	if (!umem->pages) {
umem              421 net/xdp/xdp_umem.c 	err = xdp_umem_map_pages(umem);
umem              425 net/xdp/xdp_umem.c 	kfree(umem->pages);
umem              428 net/xdp/xdp_umem.c 	xdp_umem_unpin_pages(umem);
umem              430 net/xdp/xdp_umem.c 	xdp_umem_unaccount_pages(umem);
umem              436 net/xdp/xdp_umem.c 	struct xdp_umem *umem;
umem              439 net/xdp/xdp_umem.c 	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
umem              440 net/xdp/xdp_umem.c 	if (!umem)
umem              445 net/xdp/xdp_umem.c 		kfree(umem);
umem              448 net/xdp/xdp_umem.c 	umem->id = err;
umem              450 net/xdp/xdp_umem.c 	err = xdp_umem_reg(umem, mr);
umem              452 net/xdp/xdp_umem.c 		ida_simple_remove(&umem_ida, umem->id);
umem              453 net/xdp/xdp_umem.c 		kfree(umem);
umem              457 net/xdp/xdp_umem.c 	return umem;
umem              460 net/xdp/xdp_umem.c bool xdp_umem_validate_queues(struct xdp_umem *umem)
umem              462 net/xdp/xdp_umem.c 	return umem->fq && umem->cq;
umem               11 net/xdp/xdp_umem.h int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
umem               13 net/xdp/xdp_umem.h void xdp_umem_clear_dev(struct xdp_umem *umem);
umem               14 net/xdp/xdp_umem.h bool xdp_umem_validate_queues(struct xdp_umem *umem);
umem               15 net/xdp/xdp_umem.h void xdp_get_umem(struct xdp_umem *umem);
umem               16 net/xdp/xdp_umem.h void xdp_put_umem(struct xdp_umem *umem);
umem               17 net/xdp/xdp_umem.h void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
umem               18 net/xdp/xdp_umem.h void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
umem               36 net/xdp/xsk.c  	return READ_ONCE(xs->rx) &&  READ_ONCE(xs->umem) &&
umem               37 net/xdp/xsk.c  		READ_ONCE(xs->umem->fq);
umem               40 net/xdp/xsk.c  bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
umem               42 net/xdp/xsk.c  	return xskq_has_addrs(umem->fq, cnt);
umem               46 net/xdp/xsk.c  u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
umem               48 net/xdp/xsk.c  	return xskq_peek_addr(umem->fq, addr, umem);
umem               52 net/xdp/xsk.c  void xsk_umem_discard_addr(struct xdp_umem *umem)
umem               54 net/xdp/xsk.c  	xskq_discard_addr(umem->fq);
umem               58 net/xdp/xsk.c  void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
umem               60 net/xdp/xsk.c  	if (umem->need_wakeup & XDP_WAKEUP_RX)
umem               63 net/xdp/xsk.c  	umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
umem               64 net/xdp/xsk.c  	umem->need_wakeup |= XDP_WAKEUP_RX;
umem               68 net/xdp/xsk.c  void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
umem               72 net/xdp/xsk.c  	if (umem->need_wakeup & XDP_WAKEUP_TX)
umem               76 net/xdp/xsk.c  	list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
umem               81 net/xdp/xsk.c  	umem->need_wakeup |= XDP_WAKEUP_TX;
umem               85 net/xdp/xsk.c  void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
umem               87 net/xdp/xsk.c  	if (!(umem->need_wakeup & XDP_WAKEUP_RX))
umem               90 net/xdp/xsk.c  	umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
umem               91 net/xdp/xsk.c  	umem->need_wakeup &= ~XDP_WAKEUP_RX;
umem               95 net/xdp/xsk.c  void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
umem               99 net/xdp/xsk.c  	if (!(umem->need_wakeup & XDP_WAKEUP_TX))
umem              103 net/xdp/xsk.c  	list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
umem              108 net/xdp/xsk.c  	umem->need_wakeup &= ~XDP_WAKEUP_TX;
umem              112 net/xdp/xsk.c  bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
umem              114 net/xdp/xsk.c  	return umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
umem              121 net/xdp/xsk.c  static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
umem              124 net/xdp/xsk.c  	void *to_buf = xdp_umem_get_data(umem, addr);
umem              127 net/xdp/xsk.c  	if (xskq_crosses_non_contig_pg(umem, addr, len + metalen)) {
umem              128 net/xdp/xsk.c  		void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr;
umem              144 net/xdp/xsk.c  	u64 offset = xs->umem->headroom;
umem              150 net/xdp/xsk.c  	if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) ||
umem              151 net/xdp/xsk.c  	    len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
umem              164 net/xdp/xsk.c  	memcpy_addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
umem              165 net/xdp/xsk.c  	__xsk_rcv_memcpy(xs->umem, memcpy_addr, from_buf, len, metalen);
umem              168 net/xdp/xsk.c  	addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
umem              171 net/xdp/xsk.c  		xskq_discard_addr(xs->umem->fq);
umem              226 net/xdp/xsk.c  	u64 offset = xs->umem->headroom;
umem              238 net/xdp/xsk.c  	if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) ||
umem              239 net/xdp/xsk.c  	    len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
umem              244 net/xdp/xsk.c  	addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
umem              245 net/xdp/xsk.c  	buffer = xdp_umem_get_data(xs->umem, addr);
umem              248 net/xdp/xsk.c  	addr = xsk_umem_adjust_offset(xs->umem, addr, metalen);
umem              253 net/xdp/xsk.c  	xskq_discard_addr(xs->umem->fq);
umem              268 net/xdp/xsk.c  void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
umem              270 net/xdp/xsk.c  	xskq_produce_flush_addr_n(umem->cq, nb_entries);
umem              274 net/xdp/xsk.c  void xsk_umem_consume_tx_done(struct xdp_umem *umem)
umem              279 net/xdp/xsk.c  	list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
umem              286 net/xdp/xsk.c  bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
umem              291 net/xdp/xsk.c  	list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
umem              292 net/xdp/xsk.c  		if (!xskq_peek_desc(xs->tx, desc, umem))
umem              295 net/xdp/xsk.c  		if (xskq_produce_addr_lazy(umem->cq, desc->addr))
umem              333 net/xdp/xsk.c  	WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
umem              353 net/xdp/xsk.c  	while (xskq_peek_desc(xs->tx, &desc, xs->umem)) {
umem              372 net/xdp/xsk.c  		buffer = xdp_umem_get_data(xs->umem, addr);
umem              374 net/xdp/xsk.c  		if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
umem              437 net/xdp/xsk.c  	struct xdp_umem *umem;
umem              442 net/xdp/xsk.c  	umem = xs->umem;
umem              444 net/xdp/xsk.c  	if (umem->need_wakeup) {
umem              446 net/xdp/xsk.c  			xsk_wakeup(xs, umem->need_wakeup);
umem              487 net/xdp/xsk.c  	xdp_del_sk_umem(xs->umem, xs);
umem              597 net/xdp/xsk.c  static void xsk_check_page_contiguity(struct xdp_umem *umem, u32 flags)
umem              599 net/xdp/xsk.c  	struct xdp_umem_page *pgs = umem->pages;
umem              602 net/xdp/xsk.c  	for (i = 0; i < umem->npgs - 1; i++) {
umem              660 net/xdp/xsk.c  		if (xs->umem) {
umem              684 net/xdp/xsk.c  		xdp_get_umem(umem_xs->umem);
umem              685 net/xdp/xsk.c  		WRITE_ONCE(xs->umem, umem_xs->umem);
umem              687 net/xdp/xsk.c  	} else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
umem              692 net/xdp/xsk.c  		xskq_set_umem(xs->umem->fq, xs->umem->size,
umem              693 net/xdp/xsk.c  			      xs->umem->chunk_mask);
umem              694 net/xdp/xsk.c  		xskq_set_umem(xs->umem->cq, xs->umem->size,
umem              695 net/xdp/xsk.c  			      xs->umem->chunk_mask);
umem              697 net/xdp/xsk.c  		err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
umem              701 net/xdp/xsk.c  		xsk_check_page_contiguity(xs->umem, flags);
umem              705 net/xdp/xsk.c  	xs->zc = xs->umem->zc;
umem              707 net/xdp/xsk.c  	xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
umem              708 net/xdp/xsk.c  	xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
umem              709 net/xdp/xsk.c  	xdp_add_sk_umem(xs->umem, xs);
umem              773 net/xdp/xsk.c  		struct xdp_umem *umem;
umem              784 net/xdp/xsk.c  		if (xs->state != XSK_READY || xs->umem) {
umem              789 net/xdp/xsk.c  		umem = xdp_umem_create(&mr);
umem              790 net/xdp/xsk.c  		if (IS_ERR(umem)) {
umem              792 net/xdp/xsk.c  			return PTR_ERR(umem);
umem              797 net/xdp/xsk.c  		WRITE_ONCE(xs->umem, umem);
umem              815 net/xdp/xsk.c  		if (!xs->umem) {
umem              820 net/xdp/xsk.c  		q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
umem              821 net/xdp/xsk.c  			&xs->umem->cq;
umem              969 net/xdp/xsk.c  	struct xdp_umem *umem;
umem              981 net/xdp/xsk.c  		umem = READ_ONCE(xs->umem);
umem              982 net/xdp/xsk.c  		if (!umem)
umem              988 net/xdp/xsk.c  			q = READ_ONCE(umem->fq);
umem              990 net/xdp/xsk.c  			q = READ_ONCE(umem->cq);
umem             1029 net/xdp/xsk.c  				xdp_umem_clear_dev(xs->umem);
umem             1073 net/xdp/xsk.c  	xdp_put_umem(xs->umem);
umem               49 net/xdp/xsk_diag.c 	struct xdp_umem *umem = xs->umem;
umem               53 net/xdp/xsk_diag.c 	if (!umem)
umem               56 net/xdp/xsk_diag.c 	du.id = umem->id;
umem               57 net/xdp/xsk_diag.c 	du.size = umem->size;
umem               58 net/xdp/xsk_diag.c 	du.num_pages = umem->npgs;
umem               59 net/xdp/xsk_diag.c 	du.chunk_size = umem->chunk_size_nohr + umem->headroom;
umem               60 net/xdp/xsk_diag.c 	du.headroom = umem->headroom;
umem               61 net/xdp/xsk_diag.c 	du.ifindex = umem->dev ? umem->dev->ifindex : 0;
umem               62 net/xdp/xsk_diag.c 	du.queue_id = umem->queue_id;
umem               64 net/xdp/xsk_diag.c 	if (umem->zc)
umem               66 net/xdp/xsk_diag.c 	du.refs = refcount_read(&umem->users);
umem               70 net/xdp/xsk_diag.c 	if (!err && umem->fq)
umem               71 net/xdp/xsk_diag.c 		err = xsk_diag_put_ring(umem->fq, XDP_DIAG_UMEM_FILL_RING, nlskb);
umem               72 net/xdp/xsk_diag.c 	if (!err && umem->cq) {
umem               73 net/xdp/xsk_diag.c 		err = xsk_diag_put_ring(umem->cq, XDP_DIAG_UMEM_COMPLETION_RING,
umem               87 net/xdp/xsk_queue.c struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
umem               90 net/xdp/xsk_queue.c 	struct xdp_umem_fq_reuse *oldq = umem->fq_reuse;
umem               93 net/xdp/xsk_queue.c 		umem->fq_reuse = newq;
umem              104 net/xdp/xsk_queue.c 	umem->fq_reuse = newq;
umem              115 net/xdp/xsk_queue.c void xsk_reuseq_destroy(struct xdp_umem *umem)
umem              117 net/xdp/xsk_queue.c 	xsk_reuseq_free(umem->fq_reuse);
umem              118 net/xdp/xsk_queue.c 	umem->fq_reuse = NULL;
umem              137 net/xdp/xsk_queue.h static inline bool xskq_crosses_non_contig_pg(struct xdp_umem *umem, u64 addr,
umem              142 net/xdp/xsk_queue.h 		(unsigned long)umem->pages[(addr >> PAGE_SHIFT)].addr &
umem              160 net/xdp/xsk_queue.h 						struct xdp_umem *umem)
umem              166 net/xdp/xsk_queue.h 	    xskq_crosses_non_contig_pg(umem, addr, length)) {
umem              175 net/xdp/xsk_queue.h 				      struct xdp_umem *umem)
umem              183 net/xdp/xsk_queue.h 		if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) {
umem              185 net/xdp/xsk_queue.h 							 umem->chunk_size_nohr,
umem              186 net/xdp/xsk_queue.h 							 umem))
umem              202 net/xdp/xsk_queue.h 				  struct xdp_umem *umem)
umem              213 net/xdp/xsk_queue.h 	return xskq_validate_addr(q, addr, umem);
umem              273 net/xdp/xsk_queue.h 				      struct xdp_umem *umem)
umem              275 net/xdp/xsk_queue.h 	if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) {
umem              276 net/xdp/xsk_queue.h 		if (!xskq_is_valid_addr_unaligned(q, d->addr, d->len, umem))
umem              279 net/xdp/xsk_queue.h 		if (d->len > umem->chunk_size_nohr || d->options) {
umem              301 net/xdp/xsk_queue.h 						  struct xdp_umem *umem)
umem              308 net/xdp/xsk_queue.h 		if (xskq_is_valid_desc(q, desc, umem))
umem              319 net/xdp/xsk_queue.h 					      struct xdp_umem *umem)
umem              330 net/xdp/xsk_queue.h 	return xskq_validate_desc(q, desc, umem);
umem              379 net/xdp/xsk_queue.h void xsk_reuseq_destroy(struct xdp_umem *umem);
umem               83 samples/bpf/xdpsock_user.c 	struct xsk_umem *umem;
umem               90 samples/bpf/xdpsock_user.c 	struct xsk_umem_info *umem;
umem              198 samples/bpf/xdpsock_user.c 	struct xsk_umem *umem = xsks[0]->umem->umem;
umem              204 samples/bpf/xdpsock_user.c 	(void)xsk_umem__delete(umem);
umem              276 samples/bpf/xdpsock_user.c static size_t gen_eth_frame(struct xsk_umem_info *umem, u64 addr)
umem              278 samples/bpf/xdpsock_user.c 	memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data,
umem              285 samples/bpf/xdpsock_user.c 	struct xsk_umem_info *umem;
umem              296 samples/bpf/xdpsock_user.c 	umem = calloc(1, sizeof(*umem));
umem              297 samples/bpf/xdpsock_user.c 	if (!umem)
umem              300 samples/bpf/xdpsock_user.c 	ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq,
umem              306 samples/bpf/xdpsock_user.c 	umem->buffer = buffer;
umem              307 samples/bpf/xdpsock_user.c 	return umem;
umem              310 samples/bpf/xdpsock_user.c static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem)
umem              322 samples/bpf/xdpsock_user.c 	xsk->umem = umem;
umem              328 samples/bpf/xdpsock_user.c 	ret = xsk_socket__create(&xsk->xsk, opt_if, opt_queue, umem->umem,
umem              337 samples/bpf/xdpsock_user.c 	ret = xsk_ring_prod__reserve(&xsk->umem->fq,
umem              343 samples/bpf/xdpsock_user.c 		*xsk_ring_prod__fill_addr(&xsk->umem->fq, idx++) =
umem              345 samples/bpf/xdpsock_user.c 	xsk_ring_prod__submit(&xsk->umem->fq,
umem              489 samples/bpf/xdpsock_user.c 	struct xsk_umem_info *umem = xsk->umem;
umem              504 samples/bpf/xdpsock_user.c 	rcvd = xsk_ring_cons__peek(&umem->cq, ndescs, &idx_cq);
umem              509 samples/bpf/xdpsock_user.c 		ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
umem              513 samples/bpf/xdpsock_user.c 			if (xsk_ring_prod__needs_wakeup(&umem->fq))
umem              515 samples/bpf/xdpsock_user.c 			ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
umem              519 samples/bpf/xdpsock_user.c 			*xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) =
umem              520 samples/bpf/xdpsock_user.c 				*xsk_ring_cons__comp_addr(&umem->cq, idx_cq++);
umem              522 samples/bpf/xdpsock_user.c 		xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
umem              523 samples/bpf/xdpsock_user.c 		xsk_ring_cons__release(&xsk->umem->cq, rcvd);
umem              540 samples/bpf/xdpsock_user.c 	rcvd = xsk_ring_cons__peek(&xsk->umem->cq, BATCH_SIZE, &idx);
umem              542 samples/bpf/xdpsock_user.c 		xsk_ring_cons__release(&xsk->umem->cq, rcvd);
umem              556 samples/bpf/xdpsock_user.c 		if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
umem              561 samples/bpf/xdpsock_user.c 	ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
umem              565 samples/bpf/xdpsock_user.c 		if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
umem              567 samples/bpf/xdpsock_user.c 		ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
umem              576 samples/bpf/xdpsock_user.c 		char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
umem              579 samples/bpf/xdpsock_user.c 		*xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig;
umem              582 samples/bpf/xdpsock_user.c 	xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
umem              671 samples/bpf/xdpsock_user.c 		if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
umem              691 samples/bpf/xdpsock_user.c 		char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
umem              734 samples/bpf/xdpsock_user.c 	struct xsk_umem_info *umem;
umem              756 samples/bpf/xdpsock_user.c 	umem = xsk_configure_umem(bufs, NUM_FRAMES * opt_xsk_frame_size);
umem              757 samples/bpf/xdpsock_user.c 	xsks[num_socks++] = xsk_configure_socket(umem);
umem              763 samples/bpf/xdpsock_user.c 			(void)gen_eth_frame(umem, i * opt_xsk_frame_size);
umem               60 tools/lib/bpf/xsk.c 	struct xsk_umem *umem;
umem               91 tools/lib/bpf/xsk.c int xsk_umem__fd(const struct xsk_umem *umem)
umem               93 tools/lib/bpf/xsk.c 	return umem ? umem->fd : -EINVAL;
umem              210 tools/lib/bpf/xsk.c 	struct xsk_umem *umem;
umem              219 tools/lib/bpf/xsk.c 	umem = calloc(1, sizeof(*umem));
umem              220 tools/lib/bpf/xsk.c 	if (!umem)
umem              223 tools/lib/bpf/xsk.c 	umem->fd = socket(AF_XDP, SOCK_RAW, 0);
umem              224 tools/lib/bpf/xsk.c 	if (umem->fd < 0) {
umem              229 tools/lib/bpf/xsk.c 	umem->umem_area = umem_area;
umem              230 tools/lib/bpf/xsk.c 	xsk_set_umem_config(&umem->config, usr_config);
umem              235 tools/lib/bpf/xsk.c 	mr.chunk_size = umem->config.frame_size;
umem              236 tools/lib/bpf/xsk.c 	mr.headroom = umem->config.frame_headroom;
umem              237 tools/lib/bpf/xsk.c 	mr.flags = umem->config.flags;
umem              239 tools/lib/bpf/xsk.c 	err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
umem              244 tools/lib/bpf/xsk.c 	err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_FILL_RING,
umem              245 tools/lib/bpf/xsk.c 			 &umem->config.fill_size,
umem              246 tools/lib/bpf/xsk.c 			 sizeof(umem->config.fill_size));
umem              251 tools/lib/bpf/xsk.c 	err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
umem              252 tools/lib/bpf/xsk.c 			 &umem->config.comp_size,
umem              253 tools/lib/bpf/xsk.c 			 sizeof(umem->config.comp_size));
umem              259 tools/lib/bpf/xsk.c 	err = xsk_get_mmap_offsets(umem->fd, &off);
umem              265 tools/lib/bpf/xsk.c 	map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
umem              266 tools/lib/bpf/xsk.c 		   PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
umem              273 tools/lib/bpf/xsk.c 	umem->fill = fill;
umem              274 tools/lib/bpf/xsk.c 	fill->mask = umem->config.fill_size - 1;
umem              275 tools/lib/bpf/xsk.c 	fill->size = umem->config.fill_size;
umem              280 tools/lib/bpf/xsk.c 	fill->cached_cons = umem->config.fill_size;
umem              282 tools/lib/bpf/xsk.c 	map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
umem              283 tools/lib/bpf/xsk.c 		   PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
umem              290 tools/lib/bpf/xsk.c 	umem->comp = comp;
umem              291 tools/lib/bpf/xsk.c 	comp->mask = umem->config.comp_size - 1;
umem              292 tools/lib/bpf/xsk.c 	comp->size = umem->config.comp_size;
umem              298 tools/lib/bpf/xsk.c 	*umem_ptr = umem;
umem              302 tools/lib/bpf/xsk.c 	munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
umem              304 tools/lib/bpf/xsk.c 	close(umem->fd);
umem              306 tools/lib/bpf/xsk.c 	free(umem);
umem              559 tools/lib/bpf/xsk.c 		       __u32 queue_id, struct xsk_umem *umem,
umem              569 tools/lib/bpf/xsk.c 	if (!umem || !xsk_ptr || !rx || !tx)
umem              572 tools/lib/bpf/xsk.c 	if (umem->refcount) {
umem              581 tools/lib/bpf/xsk.c 	if (umem->refcount++ > 0) {
umem              588 tools/lib/bpf/xsk.c 		xsk->fd = umem->fd;
umem              593 tools/lib/bpf/xsk.c 	xsk->umem = umem;
umem              701 tools/lib/bpf/xsk.c 	if (--umem->refcount)
umem              708 tools/lib/bpf/xsk.c int xsk_umem__delete(struct xsk_umem *umem)
umem              713 tools/lib/bpf/xsk.c 	if (!umem)
umem              716 tools/lib/bpf/xsk.c 	if (umem->refcount)
umem              719 tools/lib/bpf/xsk.c 	err = xsk_get_mmap_offsets(umem->fd, &off);
umem              721 tools/lib/bpf/xsk.c 		munmap(umem->fill->ring - off.fr.desc,
umem              722 tools/lib/bpf/xsk.c 		       off.fr.desc + umem->config.fill_size * sizeof(__u64));
umem              723 tools/lib/bpf/xsk.c 		munmap(umem->comp->ring - off.cr.desc,
umem              724 tools/lib/bpf/xsk.c 		       off.cr.desc + umem->config.comp_size * sizeof(__u64));
umem              727 tools/lib/bpf/xsk.c 	close(umem->fd);
umem              728 tools/lib/bpf/xsk.c 	free(umem);
umem              760 tools/lib/bpf/xsk.c 	xsk->umem->refcount--;
umem              764 tools/lib/bpf/xsk.c 	if (xsk->fd != xsk->umem->fd)
umem              186 tools/lib/bpf/xsk.h LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem);
umem              216 tools/lib/bpf/xsk.h LIBBPF_API int xsk_umem__create(struct xsk_umem **umem,
umem              221 tools/lib/bpf/xsk.h LIBBPF_API int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
umem              226 tools/lib/bpf/xsk.h LIBBPF_API int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
umem              233 tools/lib/bpf/xsk.h 				  struct xsk_umem *umem,
umem              239 tools/lib/bpf/xsk.h LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem);