Lines Matching refs:rdma
240 struct p9_trans_rdma *rdma = c->trans; in p9_cm_event_handler() local
243 BUG_ON(rdma->state != P9_RDMA_INIT); in p9_cm_event_handler()
244 rdma->state = P9_RDMA_ADDR_RESOLVED; in p9_cm_event_handler()
248 BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED); in p9_cm_event_handler()
249 rdma->state = P9_RDMA_ROUTE_RESOLVED; in p9_cm_event_handler()
253 BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED); in p9_cm_event_handler()
254 rdma->state = P9_RDMA_CONNECTED; in p9_cm_event_handler()
258 if (rdma) in p9_cm_event_handler()
259 rdma->state = P9_RDMA_CLOSED; in p9_cm_event_handler()
279 rdma_disconnect(rdma->cm_id); in p9_cm_event_handler()
284 complete(&rdma->cm_done); in p9_cm_event_handler()
289 handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma, in handle_recv() argument
297 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, in handle_recv()
325 rdma->state = P9_RDMA_FLUSHING; in handle_recv()
330 handle_send(struct p9_client *client, struct p9_trans_rdma *rdma, in handle_send() argument
333 ib_dma_unmap_single(rdma->cm_id->device, in handle_send()
347 struct p9_trans_rdma *rdma = client->trans; in cq_comp_handler() local
351 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP); in cq_comp_handler()
357 handle_recv(client, rdma, c, wc.status, wc.byte_len); in cq_comp_handler()
358 up(&rdma->rq_sem); in cq_comp_handler()
362 handle_send(client, rdma, c, wc.status, wc.byte_len); in cq_comp_handler()
363 up(&rdma->sq_sem); in cq_comp_handler()
380 static void rdma_destroy_trans(struct p9_trans_rdma *rdma) in rdma_destroy_trans() argument
382 if (!rdma) in rdma_destroy_trans()
385 if (rdma->dma_mr && !IS_ERR(rdma->dma_mr)) in rdma_destroy_trans()
386 ib_dereg_mr(rdma->dma_mr); in rdma_destroy_trans()
388 if (rdma->qp && !IS_ERR(rdma->qp)) in rdma_destroy_trans()
389 ib_destroy_qp(rdma->qp); in rdma_destroy_trans()
391 if (rdma->pd && !IS_ERR(rdma->pd)) in rdma_destroy_trans()
392 ib_dealloc_pd(rdma->pd); in rdma_destroy_trans()
394 if (rdma->cq && !IS_ERR(rdma->cq)) in rdma_destroy_trans()
395 ib_destroy_cq(rdma->cq); in rdma_destroy_trans()
397 if (rdma->cm_id && !IS_ERR(rdma->cm_id)) in rdma_destroy_trans()
398 rdma_destroy_id(rdma->cm_id); in rdma_destroy_trans()
400 kfree(rdma); in rdma_destroy_trans()
406 struct p9_trans_rdma *rdma = client->trans; in post_recv() local
410 c->busa = ib_dma_map_single(rdma->cm_id->device, in post_recv()
413 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) in post_recv()
418 sge.lkey = rdma->lkey; in post_recv()
425 return ib_post_recv(rdma->qp, &wr, &bad_wr); in post_recv()
434 struct p9_trans_rdma *rdma = client->trans; in rdma_request() local
450 if (unlikely(atomic_read(&rdma->excess_rc) > 0)) { in rdma_request()
451 if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) { in rdma_request()
458 atomic_inc(&rdma->excess_rc); in rdma_request()
477 if (down_interruptible(&rdma->rq_sem)) { in rdma_request()
499 c->busa = ib_dma_map_single(rdma->cm_id->device, in rdma_request()
502 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) { in rdma_request()
509 sge.lkey = rdma->lkey; in rdma_request()
519 if (down_interruptible(&rdma->sq_sem)) { in rdma_request()
529 err = ib_post_send(rdma->qp, &wr, &bad_wr); in rdma_request()
545 atomic_inc(&rdma->excess_rc); in rdma_request()
551 spin_lock_irqsave(&rdma->req_lock, flags); in rdma_request()
552 if (rdma->state < P9_RDMA_CLOSING) { in rdma_request()
553 rdma->state = P9_RDMA_CLOSING; in rdma_request()
554 spin_unlock_irqrestore(&rdma->req_lock, flags); in rdma_request()
555 rdma_disconnect(rdma->cm_id); in rdma_request()
557 spin_unlock_irqrestore(&rdma->req_lock, flags); in rdma_request()
563 struct p9_trans_rdma *rdma; in rdma_close() local
568 rdma = client->trans; in rdma_close()
569 if (!rdma) in rdma_close()
573 rdma_disconnect(rdma->cm_id); in rdma_close()
574 rdma_destroy_trans(rdma); in rdma_close()
583 struct p9_trans_rdma *rdma; in alloc_rdma() local
585 rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL); in alloc_rdma()
586 if (!rdma) in alloc_rdma()
589 rdma->sq_depth = opts->sq_depth; in alloc_rdma()
590 rdma->rq_depth = opts->rq_depth; in alloc_rdma()
591 rdma->timeout = opts->timeout; in alloc_rdma()
592 spin_lock_init(&rdma->req_lock); in alloc_rdma()
593 init_completion(&rdma->cm_done); in alloc_rdma()
594 sema_init(&rdma->sq_sem, rdma->sq_depth); in alloc_rdma()
595 sema_init(&rdma->rq_sem, rdma->rq_depth); in alloc_rdma()
596 atomic_set(&rdma->excess_rc, 0); in alloc_rdma()
598 return rdma; in alloc_rdma()
614 struct p9_trans_rdma *rdma = client->trans; in rdma_cancelled() local
615 atomic_inc(&rdma->excess_rc); in rdma_cancelled()
619 static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma) in p9_rdma_bind_privport() argument
629 err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl); in p9_rdma_bind_privport()
647 struct p9_trans_rdma *rdma; in rdma_create_trans() local
658 rdma = alloc_rdma(&opts); in rdma_create_trans()
659 if (!rdma) in rdma_create_trans()
663 rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP, in rdma_create_trans()
665 if (IS_ERR(rdma->cm_id)) in rdma_create_trans()
669 client->trans = rdma; in rdma_create_trans()
673 err = p9_rdma_bind_privport(rdma); in rdma_create_trans()
682 rdma->addr.sin_family = AF_INET; in rdma_create_trans()
683 rdma->addr.sin_addr.s_addr = in_aton(addr); in rdma_create_trans()
684 rdma->addr.sin_port = htons(opts.port); in rdma_create_trans()
685 err = rdma_resolve_addr(rdma->cm_id, NULL, in rdma_create_trans()
686 (struct sockaddr *)&rdma->addr, in rdma_create_trans()
687 rdma->timeout); in rdma_create_trans()
690 err = wait_for_completion_interruptible(&rdma->cm_done); in rdma_create_trans()
691 if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED)) in rdma_create_trans()
695 err = rdma_resolve_route(rdma->cm_id, rdma->timeout); in rdma_create_trans()
698 err = wait_for_completion_interruptible(&rdma->cm_done); in rdma_create_trans()
699 if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED)) in rdma_create_trans()
703 err = ib_query_device(rdma->cm_id->device, &devattr); in rdma_create_trans()
708 rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler, in rdma_create_trans()
711 if (IS_ERR(rdma->cq)) in rdma_create_trans()
713 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP); in rdma_create_trans()
716 rdma->pd = ib_alloc_pd(rdma->cm_id->device); in rdma_create_trans()
717 if (IS_ERR(rdma->pd)) in rdma_create_trans()
721 rdma->dma_mr = NULL; in rdma_create_trans()
723 rdma->lkey = rdma->cm_id->device->local_dma_lkey; in rdma_create_trans()
725 rdma->dma_mr = ib_get_dma_mr(rdma->pd, IB_ACCESS_LOCAL_WRITE); in rdma_create_trans()
726 if (IS_ERR(rdma->dma_mr)) in rdma_create_trans()
728 rdma->lkey = rdma->dma_mr->lkey; in rdma_create_trans()
741 qp_attr.send_cq = rdma->cq; in rdma_create_trans()
742 qp_attr.recv_cq = rdma->cq; in rdma_create_trans()
743 err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr); in rdma_create_trans()
746 rdma->qp = rdma->cm_id->qp; in rdma_create_trans()
754 err = rdma_connect(rdma->cm_id, &conn_param); in rdma_create_trans()
757 err = wait_for_completion_interruptible(&rdma->cm_done); in rdma_create_trans()
758 if (err || (rdma->state != P9_RDMA_CONNECTED)) in rdma_create_trans()
766 rdma_destroy_trans(rdma); in rdma_create_trans()