Lines Matching refs:ibmr

107 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
292 struct rds_ib_mr *ibmr = NULL; in rds_ib_reuse_fmr() local
301 ibmr = llist_entry(ret, struct rds_ib_mr, llnode); in rds_ib_reuse_fmr()
305 return ibmr; in rds_ib_reuse_fmr()
324 struct rds_ib_mr *ibmr = NULL; in rds_ib_alloc_fmr() local
344 ibmr = rds_ib_reuse_fmr(pool); in rds_ib_alloc_fmr()
345 if (ibmr) in rds_ib_alloc_fmr()
346 return ibmr; in rds_ib_alloc_fmr()
375 rds_ib_flush_mr_pool(pool, 0, &ibmr); in rds_ib_alloc_fmr()
376 if (ibmr) in rds_ib_alloc_fmr()
377 return ibmr; in rds_ib_alloc_fmr()
380 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev)); in rds_ib_alloc_fmr()
381 if (!ibmr) { in rds_ib_alloc_fmr()
386 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd, in rds_ib_alloc_fmr()
392 if (IS_ERR(ibmr->fmr)) { in rds_ib_alloc_fmr()
393 err = PTR_ERR(ibmr->fmr); in rds_ib_alloc_fmr()
394 ibmr->fmr = NULL; in rds_ib_alloc_fmr()
399 ibmr->pool = pool; in rds_ib_alloc_fmr()
405 return ibmr; in rds_ib_alloc_fmr()
408 if (ibmr) { in rds_ib_alloc_fmr()
409 if (ibmr->fmr) in rds_ib_alloc_fmr()
410 ib_dealloc_fmr(ibmr->fmr); in rds_ib_alloc_fmr()
411 kfree(ibmr); in rds_ib_alloc_fmr()
417 static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr, in rds_ib_map_fmr() argument
460 if (page_cnt > ibmr->pool->fmr_attr.max_pages) in rds_ib_map_fmr()
478 ret = ib_map_phys_fmr(ibmr->fmr, in rds_ib_map_fmr()
485 rds_ib_teardown_mr(ibmr); in rds_ib_map_fmr()
487 ibmr->sg = scat; in rds_ib_map_fmr()
488 ibmr->sg_len = nents; in rds_ib_map_fmr()
489 ibmr->sg_dma_len = sg_dma_len; in rds_ib_map_fmr()
490 ibmr->remap_count++; in rds_ib_map_fmr()
492 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_map_fmr()
506 struct rds_ib_mr *ibmr = trans_private; in rds_ib_sync_mr() local
507 struct rds_ib_device *rds_ibdev = ibmr->device; in rds_ib_sync_mr()
511 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg, in rds_ib_sync_mr()
512 ibmr->sg_dma_len, DMA_BIDIRECTIONAL); in rds_ib_sync_mr()
515 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg, in rds_ib_sync_mr()
516 ibmr->sg_dma_len, DMA_BIDIRECTIONAL); in rds_ib_sync_mr()
521 static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) in __rds_ib_teardown_mr() argument
523 struct rds_ib_device *rds_ibdev = ibmr->device; in __rds_ib_teardown_mr()
525 if (ibmr->sg_dma_len) { in __rds_ib_teardown_mr()
527 ibmr->sg, ibmr->sg_len, in __rds_ib_teardown_mr()
529 ibmr->sg_dma_len = 0; in __rds_ib_teardown_mr()
533 if (ibmr->sg_len) { in __rds_ib_teardown_mr()
536 for (i = 0; i < ibmr->sg_len; ++i) { in __rds_ib_teardown_mr()
537 struct page *page = sg_page(&ibmr->sg[i]); in __rds_ib_teardown_mr()
545 kfree(ibmr->sg); in __rds_ib_teardown_mr()
547 ibmr->sg = NULL; in __rds_ib_teardown_mr()
548 ibmr->sg_len = 0; in __rds_ib_teardown_mr()
552 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr) in rds_ib_teardown_mr() argument
554 unsigned int pinned = ibmr->sg_len; in rds_ib_teardown_mr()
556 __rds_ib_teardown_mr(ibmr); in rds_ib_teardown_mr()
558 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_teardown_mr()
581 struct rds_ib_mr *ibmr; in llist_append_to_list() local
589 ibmr = llist_entry(node, struct rds_ib_mr, llnode); in llist_append_to_list()
590 list_add_tail(&ibmr->unmap_list, list); in llist_append_to_list()
607 struct rds_ib_mr *ibmr; in list_to_llist_nodes() local
611 list_for_each_entry(ibmr, list, unmap_list) { in list_to_llist_nodes()
612 cur = &ibmr->llnode; in list_to_llist_nodes()
629 struct rds_ib_mr *ibmr, *next; in rds_ib_flush_mr_pool() local
646 ibmr = rds_ib_reuse_fmr(pool); in rds_ib_flush_mr_pool()
647 if (ibmr) { in rds_ib_flush_mr_pool()
648 *ibmr_ret = ibmr; in rds_ib_flush_mr_pool()
658 ibmr = rds_ib_reuse_fmr(pool); in rds_ib_flush_mr_pool()
659 if (ibmr) { in rds_ib_flush_mr_pool()
660 *ibmr_ret = ibmr; in rds_ib_flush_mr_pool()
670 ibmr = rds_ib_reuse_fmr(pool); in rds_ib_flush_mr_pool()
671 if (ibmr) { in rds_ib_flush_mr_pool()
672 *ibmr_ret = ibmr; in rds_ib_flush_mr_pool()
691 list_for_each_entry(ibmr, &unmap_list, unmap_list) in rds_ib_flush_mr_pool()
692 list_add(&ibmr->fmr->list, &fmr_list); in rds_ib_flush_mr_pool()
699 list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) { in rds_ib_flush_mr_pool()
700 unpinned += ibmr->sg_len; in rds_ib_flush_mr_pool()
701 __rds_ib_teardown_mr(ibmr); in rds_ib_flush_mr_pool()
703 ibmr->remap_count >= pool->fmr_attr.max_maps) { in rds_ib_flush_mr_pool()
704 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_flush_mr_pool()
708 list_del(&ibmr->unmap_list); in rds_ib_flush_mr_pool()
709 ib_dealloc_fmr(ibmr->fmr); in rds_ib_flush_mr_pool()
710 kfree(ibmr); in rds_ib_flush_mr_pool()
758 struct rds_ib_mr *ibmr = trans_private; in rds_ib_free_mr() local
759 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_free_mr()
760 struct rds_ib_device *rds_ibdev = ibmr->device; in rds_ib_free_mr()
762 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); in rds_ib_free_mr()
765 if (ibmr->remap_count >= pool->fmr_attr.max_maps) in rds_ib_free_mr()
766 llist_add(&ibmr->llnode, &pool->drop_list); in rds_ib_free_mr()
768 llist_add(&ibmr->llnode, &pool->free_list); in rds_ib_free_mr()
770 atomic_add(ibmr->sg_len, &pool->free_pinned); in rds_ib_free_mr()
812 struct rds_ib_mr *ibmr = NULL; in rds_ib_get_mr() local
826 ibmr = rds_ib_alloc_fmr(rds_ibdev, nents); in rds_ib_get_mr()
827 if (IS_ERR(ibmr)) { in rds_ib_get_mr()
829 return ibmr; in rds_ib_get_mr()
832 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents); in rds_ib_get_mr()
834 *key_ret = ibmr->fmr->rkey; in rds_ib_get_mr()
838 ibmr->device = rds_ibdev; in rds_ib_get_mr()
843 if (ibmr) in rds_ib_get_mr()
844 rds_ib_free_mr(ibmr, 0); in rds_ib_get_mr()
845 ibmr = ERR_PTR(ret); in rds_ib_get_mr()
849 return ibmr; in rds_ib_get_mr()