Lines Matching refs:ibmr

87 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
265 struct rds_ib_mr *ibmr = NULL; in rds_ib_reuse_fmr() local
274 ibmr = llist_entry(ret, struct rds_ib_mr, llnode); in rds_ib_reuse_fmr()
278 return ibmr; in rds_ib_reuse_fmr()
296 struct rds_ib_mr *ibmr = NULL; in rds_ib_alloc_fmr() local
303 ibmr = rds_ib_reuse_fmr(pool); in rds_ib_alloc_fmr()
304 if (ibmr) in rds_ib_alloc_fmr()
305 return ibmr; in rds_ib_alloc_fmr()
328 rds_ib_flush_mr_pool(pool, 0, &ibmr); in rds_ib_alloc_fmr()
329 if (ibmr) in rds_ib_alloc_fmr()
330 return ibmr; in rds_ib_alloc_fmr()
333 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev)); in rds_ib_alloc_fmr()
334 if (!ibmr) { in rds_ib_alloc_fmr()
339 memset(ibmr, 0, sizeof(*ibmr)); in rds_ib_alloc_fmr()
341 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd, in rds_ib_alloc_fmr()
347 if (IS_ERR(ibmr->fmr)) { in rds_ib_alloc_fmr()
348 err = PTR_ERR(ibmr->fmr); in rds_ib_alloc_fmr()
349 ibmr->fmr = NULL; in rds_ib_alloc_fmr()
355 return ibmr; in rds_ib_alloc_fmr()
358 if (ibmr) { in rds_ib_alloc_fmr()
359 if (ibmr->fmr) in rds_ib_alloc_fmr()
360 ib_dealloc_fmr(ibmr->fmr); in rds_ib_alloc_fmr()
361 kfree(ibmr); in rds_ib_alloc_fmr()
367 static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr, in rds_ib_map_fmr() argument
428 ret = ib_map_phys_fmr(ibmr->fmr, in rds_ib_map_fmr()
435 rds_ib_teardown_mr(ibmr); in rds_ib_map_fmr()
437 ibmr->sg = scat; in rds_ib_map_fmr()
438 ibmr->sg_len = nents; in rds_ib_map_fmr()
439 ibmr->sg_dma_len = sg_dma_len; in rds_ib_map_fmr()
440 ibmr->remap_count++; in rds_ib_map_fmr()
453 struct rds_ib_mr *ibmr = trans_private; in rds_ib_sync_mr() local
454 struct rds_ib_device *rds_ibdev = ibmr->device; in rds_ib_sync_mr()
458 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg, in rds_ib_sync_mr()
459 ibmr->sg_dma_len, DMA_BIDIRECTIONAL); in rds_ib_sync_mr()
462 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg, in rds_ib_sync_mr()
463 ibmr->sg_dma_len, DMA_BIDIRECTIONAL); in rds_ib_sync_mr()
468 static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) in __rds_ib_teardown_mr() argument
470 struct rds_ib_device *rds_ibdev = ibmr->device; in __rds_ib_teardown_mr()
472 if (ibmr->sg_dma_len) { in __rds_ib_teardown_mr()
474 ibmr->sg, ibmr->sg_len, in __rds_ib_teardown_mr()
476 ibmr->sg_dma_len = 0; in __rds_ib_teardown_mr()
480 if (ibmr->sg_len) { in __rds_ib_teardown_mr()
483 for (i = 0; i < ibmr->sg_len; ++i) { in __rds_ib_teardown_mr()
484 struct page *page = sg_page(&ibmr->sg[i]); in __rds_ib_teardown_mr()
492 kfree(ibmr->sg); in __rds_ib_teardown_mr()
494 ibmr->sg = NULL; in __rds_ib_teardown_mr()
495 ibmr->sg_len = 0; in __rds_ib_teardown_mr()
499 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr) in rds_ib_teardown_mr() argument
501 unsigned int pinned = ibmr->sg_len; in rds_ib_teardown_mr()
503 __rds_ib_teardown_mr(ibmr); in rds_ib_teardown_mr()
505 struct rds_ib_device *rds_ibdev = ibmr->device; in rds_ib_teardown_mr()
528 struct rds_ib_mr *ibmr; in llist_append_to_list() local
535 ibmr = llist_entry(node, struct rds_ib_mr, llnode); in llist_append_to_list()
536 list_add_tail(&ibmr->unmap_list, list); in llist_append_to_list()
551 struct rds_ib_mr *ibmr; in list_to_llist_nodes() local
555 list_for_each_entry(ibmr, list, unmap_list) { in list_to_llist_nodes()
556 cur = &ibmr->llnode; in list_to_llist_nodes()
573 struct rds_ib_mr *ibmr, *next; in rds_ib_flush_mr_pool() local
587 ibmr = rds_ib_reuse_fmr(pool); in rds_ib_flush_mr_pool()
588 if (ibmr) { in rds_ib_flush_mr_pool()
589 *ibmr_ret = ibmr; in rds_ib_flush_mr_pool()
599 ibmr = rds_ib_reuse_fmr(pool); in rds_ib_flush_mr_pool()
600 if (ibmr) { in rds_ib_flush_mr_pool()
601 *ibmr_ret = ibmr; in rds_ib_flush_mr_pool()
611 ibmr = rds_ib_reuse_fmr(pool); in rds_ib_flush_mr_pool()
612 if (ibmr) { in rds_ib_flush_mr_pool()
613 *ibmr_ret = ibmr; in rds_ib_flush_mr_pool()
632 list_for_each_entry(ibmr, &unmap_list, unmap_list) in rds_ib_flush_mr_pool()
633 list_add(&ibmr->fmr->list, &fmr_list); in rds_ib_flush_mr_pool()
640 list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) { in rds_ib_flush_mr_pool()
641 unpinned += ibmr->sg_len; in rds_ib_flush_mr_pool()
642 __rds_ib_teardown_mr(ibmr); in rds_ib_flush_mr_pool()
643 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) { in rds_ib_flush_mr_pool()
645 list_del(&ibmr->unmap_list); in rds_ib_flush_mr_pool()
646 ib_dealloc_fmr(ibmr->fmr); in rds_ib_flush_mr_pool()
647 kfree(ibmr); in rds_ib_flush_mr_pool()
696 struct rds_ib_mr *ibmr = trans_private; in rds_ib_free_mr() local
697 struct rds_ib_device *rds_ibdev = ibmr->device; in rds_ib_free_mr()
700 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); in rds_ib_free_mr()
703 if (ibmr->remap_count >= pool->fmr_attr.max_maps) in rds_ib_free_mr()
704 llist_add(&ibmr->llnode, &pool->drop_list); in rds_ib_free_mr()
706 llist_add(&ibmr->llnode, &pool->free_list); in rds_ib_free_mr()
708 atomic_add(ibmr->sg_len, &pool->free_pinned); in rds_ib_free_mr()
747 struct rds_ib_mr *ibmr = NULL; in rds_ib_get_mr() local
761 ibmr = rds_ib_alloc_fmr(rds_ibdev); in rds_ib_get_mr()
762 if (IS_ERR(ibmr)) { in rds_ib_get_mr()
764 return ibmr; in rds_ib_get_mr()
767 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents); in rds_ib_get_mr()
769 *key_ret = ibmr->fmr->rkey; in rds_ib_get_mr()
773 ibmr->device = rds_ibdev; in rds_ib_get_mr()
778 if (ibmr) in rds_ib_get_mr()
779 rds_ib_free_mr(ibmr, 0); in rds_ib_get_mr()
780 ibmr = ERR_PTR(ret); in rds_ib_get_mr()
784 return ibmr; in rds_ib_get_mr()