Lines Matching refs:xprt

64 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
67 static void svc_rdma_detach(struct svc_xprt *xprt);
68 static void svc_rdma_free(struct svc_xprt *xprt);
69 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
71 static void rq_cq_reap(struct svcxprt_rdma *xprt);
72 static void sq_cq_reap(struct svcxprt_rdma *xprt);
126 struct svc_xprt *xprt; in svc_rdma_bc_create() local
131 xprt = &cma_xprt->sc_xprt; in svc_rdma_bc_create()
133 svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv); in svc_rdma_bc_create()
134 serv->sv_bc_xprt = xprt; in svc_rdma_bc_create()
136 dprintk("svcrdma: %s(%p)\n", __func__, xprt); in svc_rdma_bc_create()
137 return xprt; in svc_rdma_bc_create()
140 static void svc_rdma_bc_detach(struct svc_xprt *xprt) in svc_rdma_bc_detach() argument
142 dprintk("svcrdma: %s(%p)\n", __func__, xprt); in svc_rdma_bc_detach()
145 static void svc_rdma_bc_free(struct svc_xprt *xprt) in svc_rdma_bc_free() argument
148 container_of(xprt, struct svcxprt_rdma, sc_xprt); in svc_rdma_bc_free()
150 dprintk("svcrdma: %s(%p)\n", __func__, xprt); in svc_rdma_bc_free()
151 if (xprt) in svc_rdma_bc_free()
156 struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) in svc_rdma_get_context() argument
162 ctxt->xprt = xprt; in svc_rdma_get_context()
166 atomic_inc(&xprt->sc_ctxt_used); in svc_rdma_get_context()
172 struct svcxprt_rdma *xprt = ctxt->xprt; in svc_rdma_unmap_dma() local
181 if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { in svc_rdma_unmap_dma()
182 atomic_dec(&xprt->sc_dma_used); in svc_rdma_unmap_dma()
183 ib_dma_unmap_page(xprt->sc_cm_id->device, in svc_rdma_unmap_dma()
193 struct svcxprt_rdma *xprt; in svc_rdma_put_context() local
196 xprt = ctxt->xprt; in svc_rdma_put_context()
202 atomic_dec(&xprt->sc_ctxt_used); in svc_rdma_put_context()
227 struct svc_xprt *xprt = context; in cq_event_handler() local
230 set_bit(XPT_CLOSE, &xprt->xpt_flags); in cq_event_handler()
236 struct svc_xprt *xprt = context; in qp_event_handler() local
259 set_bit(XPT_CLOSE, &xprt->xpt_flags); in qp_event_handler()
275 struct svcxprt_rdma *xprt; in dto_tasklet_func() local
280 xprt = list_entry(dto_xprt_q.next, in dto_tasklet_func()
282 list_del_init(&xprt->sc_dto_q); in dto_tasklet_func()
285 rq_cq_reap(xprt); in dto_tasklet_func()
286 sq_cq_reap(xprt); in dto_tasklet_func()
288 svc_xprt_put(&xprt->sc_xprt); in dto_tasklet_func()
302 struct svcxprt_rdma *xprt = cq_context; in rq_comp_handler() local
306 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) in rq_comp_handler()
314 set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags); in rq_comp_handler()
321 if (list_empty(&xprt->sc_dto_q)) { in rq_comp_handler()
322 svc_xprt_get(&xprt->sc_xprt); in rq_comp_handler()
323 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); in rq_comp_handler()
339 static void rq_cq_reap(struct svcxprt_rdma *xprt) in rq_cq_reap() argument
345 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags)) in rq_cq_reap()
348 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP); in rq_cq_reap()
351 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { in rq_cq_reap()
359 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); in rq_cq_reap()
361 svc_xprt_put(&xprt->sc_xprt); in rq_cq_reap()
364 spin_lock_bh(&xprt->sc_rq_dto_lock); in rq_cq_reap()
365 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q); in rq_cq_reap()
366 spin_unlock_bh(&xprt->sc_rq_dto_lock); in rq_cq_reap()
367 svc_xprt_put(&xprt->sc_xprt); in rq_cq_reap()
373 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); in rq_cq_reap()
379 if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) in rq_cq_reap()
380 svc_xprt_enqueue(&xprt->sc_xprt); in rq_cq_reap()
386 static void process_context(struct svcxprt_rdma *xprt, in process_context() argument
406 svc_rdma_put_frmr(xprt, ctxt->frmr); in process_context()
410 spin_lock_bh(&xprt->sc_rq_dto_lock); in process_context()
411 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); in process_context()
413 &xprt->sc_read_complete_q); in process_context()
414 spin_unlock_bh(&xprt->sc_rq_dto_lock); in process_context()
418 svc_xprt_enqueue(&xprt->sc_xprt); in process_context()
436 static void sq_cq_reap(struct svcxprt_rdma *xprt) in sq_cq_reap() argument
441 struct ib_cq *cq = xprt->sc_sq_cq; in sq_cq_reap()
446 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags)) in sq_cq_reap()
449 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); in sq_cq_reap()
462 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); in sq_cq_reap()
466 atomic_dec(&xprt->sc_sq_count); in sq_cq_reap()
467 wake_up(&xprt->sc_send_wait); in sq_cq_reap()
472 process_context(xprt, ctxt); in sq_cq_reap()
474 svc_xprt_put(&xprt->sc_xprt); in sq_cq_reap()
484 struct svcxprt_rdma *xprt = cq_context; in sq_comp_handler() local
488 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) in sq_comp_handler()
496 set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags); in sq_comp_handler()
503 if (list_empty(&xprt->sc_dto_q)) { in sq_comp_handler()
504 svc_xprt_get(&xprt->sc_xprt); in sq_comp_handler()
505 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); in sq_comp_handler()
546 int svc_rdma_post_recv(struct svcxprt_rdma *xprt) in svc_rdma_post_recv() argument
556 ctxt = svc_rdma_get_context(xprt); in svc_rdma_post_recv()
559 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) { in svc_rdma_post_recv()
560 if (sge_no >= xprt->sc_max_sge) { in svc_rdma_post_recv()
566 pa = ib_dma_map_page(xprt->sc_cm_id->device, in svc_rdma_post_recv()
569 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) in svc_rdma_post_recv()
571 atomic_inc(&xprt->sc_dma_used); in svc_rdma_post_recv()
574 ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey; in svc_rdma_post_recv()
583 svc_xprt_get(&xprt->sc_xprt); in svc_rdma_post_recv()
584 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); in svc_rdma_post_recv()
588 svc_xprt_put(&xprt->sc_xprt); in svc_rdma_post_recv()
654 struct svcxprt_rdma *xprt = cma_id->context; in rdma_listen_handler() local
669 "cm_id=%p\n", xprt, cma_id); in rdma_listen_handler()
674 xprt, cma_id); in rdma_listen_handler()
675 if (xprt) in rdma_listen_handler()
676 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); in rdma_listen_handler()
692 struct svc_xprt *xprt = cma_id->context; in rdma_cma_handler() local
694 container_of(xprt, struct svcxprt_rdma, sc_xprt); in rdma_cma_handler()
698 svc_xprt_get(xprt); in rdma_cma_handler()
700 "cm_id=%p\n", xprt, cma_id); in rdma_cma_handler()
702 svc_xprt_enqueue(xprt); in rdma_cma_handler()
706 xprt, cma_id); in rdma_cma_handler()
707 if (xprt) { in rdma_cma_handler()
708 set_bit(XPT_CLOSE, &xprt->xpt_flags); in rdma_cma_handler()
709 svc_xprt_enqueue(xprt); in rdma_cma_handler()
710 svc_xprt_put(xprt); in rdma_cma_handler()
715 "event = %s (%d)\n", cma_id, xprt, in rdma_cma_handler()
717 if (xprt) { in rdma_cma_handler()
718 set_bit(XPT_CLOSE, &xprt->xpt_flags); in rdma_cma_handler()
719 svc_xprt_enqueue(xprt); in rdma_cma_handler()
720 svc_xprt_put(xprt); in rdma_cma_handler()
790 static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt) in rdma_alloc_frmr() argument
801 num_sg = min_t(u32, RPCSVC_MAXPAGES, xprt->sc_frmr_pg_list_len); in rdma_alloc_frmr()
802 mr = ib_alloc_mr(xprt->sc_pd, IB_MR_TYPE_MEM_REG, num_sg); in rdma_alloc_frmr()
825 static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt) in rdma_dealloc_frmr_q() argument
829 while (!list_empty(&xprt->sc_frmr_q)) { in rdma_dealloc_frmr_q()
830 frmr = list_entry(xprt->sc_frmr_q.next, in rdma_dealloc_frmr_q()
882 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) in svc_rdma_accept() argument
895 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); in svc_rdma_accept()
896 clear_bit(XPT_CONN, &xprt->xpt_flags); in svc_rdma_accept()
1149 static void svc_rdma_detach(struct svc_xprt *xprt) in svc_rdma_detach() argument
1152 container_of(xprt, struct svcxprt_rdma, sc_xprt); in svc_rdma_detach()
1153 dprintk("svc: svc_rdma_detach(%p)\n", xprt); in svc_rdma_detach()
1228 static void svc_rdma_free(struct svc_xprt *xprt) in svc_rdma_free() argument
1231 container_of(xprt, struct svcxprt_rdma, sc_xprt); in svc_rdma_free()
1236 static int svc_rdma_has_wspace(struct svc_xprt *xprt) in svc_rdma_has_wspace() argument
1239 container_of(xprt, struct svcxprt_rdma, sc_xprt); in svc_rdma_has_wspace()
1257 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) in svc_rdma_send() argument
1264 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) in svc_rdma_send()
1273 spin_lock_bh(&xprt->sc_lock); in svc_rdma_send()
1274 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) { in svc_rdma_send()
1275 spin_unlock_bh(&xprt->sc_lock); in svc_rdma_send()
1279 sq_cq_reap(xprt); in svc_rdma_send()
1282 wait_event(xprt->sc_send_wait, in svc_rdma_send()
1283 atomic_read(&xprt->sc_sq_count) < in svc_rdma_send()
1284 xprt->sc_sq_depth); in svc_rdma_send()
1285 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) in svc_rdma_send()
1291 svc_xprt_get(&xprt->sc_xprt); in svc_rdma_send()
1294 atomic_add(wr_count, &xprt->sc_sq_count); in svc_rdma_send()
1295 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr); in svc_rdma_send()
1297 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); in svc_rdma_send()
1298 atomic_sub(wr_count, &xprt->sc_sq_count); in svc_rdma_send()
1300 svc_xprt_put(&xprt->sc_xprt); in svc_rdma_send()
1303 ret, atomic_read(&xprt->sc_sq_count), in svc_rdma_send()
1304 xprt->sc_sq_depth); in svc_rdma_send()
1306 spin_unlock_bh(&xprt->sc_lock); in svc_rdma_send()
1308 wake_up(&xprt->sc_send_wait); in svc_rdma_send()
1314 void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, in svc_rdma_send_error() argument
1328 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); in svc_rdma_send_error()
1330 ctxt = svc_rdma_get_context(xprt); in svc_rdma_send_error()
1336 ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device, in svc_rdma_send_error()
1338 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { in svc_rdma_send_error()
1343 atomic_inc(&xprt->sc_dma_used); in svc_rdma_send_error()
1344 ctxt->sge[0].lkey = xprt->sc_dma_lkey; in svc_rdma_send_error()
1357 ret = svc_rdma_send(xprt, &err_wr); in svc_rdma_send_error()