Lines Matching refs:xprt

63 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
66 static void svc_rdma_detach(struct svc_xprt *xprt);
67 static void svc_rdma_free(struct svc_xprt *xprt);
68 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
70 static void rq_cq_reap(struct svcxprt_rdma *xprt);
71 static void sq_cq_reap(struct svcxprt_rdma *xprt);
98 struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) in svc_rdma_get_context() argument
108 ctxt->xprt = xprt; in svc_rdma_get_context()
112 atomic_inc(&xprt->sc_ctxt_used); in svc_rdma_get_context()
118 struct svcxprt_rdma *xprt = ctxt->xprt; in svc_rdma_unmap_dma() local
127 if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { in svc_rdma_unmap_dma()
128 atomic_dec(&xprt->sc_dma_used); in svc_rdma_unmap_dma()
129 ib_dma_unmap_page(xprt->sc_cm_id->device, in svc_rdma_unmap_dma()
139 struct svcxprt_rdma *xprt; in svc_rdma_put_context() local
142 xprt = ctxt->xprt; in svc_rdma_put_context()
148 atomic_dec(&xprt->sc_ctxt_used); in svc_rdma_put_context()
177 struct svc_xprt *xprt = context; in cq_event_handler() local
180 set_bit(XPT_CLOSE, &xprt->xpt_flags); in cq_event_handler()
186 struct svc_xprt *xprt = context; in qp_event_handler() local
207 set_bit(XPT_CLOSE, &xprt->xpt_flags); in qp_event_handler()
223 struct svcxprt_rdma *xprt; in dto_tasklet_func() local
228 xprt = list_entry(dto_xprt_q.next, in dto_tasklet_func()
230 list_del_init(&xprt->sc_dto_q); in dto_tasklet_func()
233 rq_cq_reap(xprt); in dto_tasklet_func()
234 sq_cq_reap(xprt); in dto_tasklet_func()
236 svc_xprt_put(&xprt->sc_xprt); in dto_tasklet_func()
250 struct svcxprt_rdma *xprt = cq_context; in rq_comp_handler() local
254 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) in rq_comp_handler()
262 set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags); in rq_comp_handler()
269 if (list_empty(&xprt->sc_dto_q)) { in rq_comp_handler()
270 svc_xprt_get(&xprt->sc_xprt); in rq_comp_handler()
271 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); in rq_comp_handler()
287 static void rq_cq_reap(struct svcxprt_rdma *xprt) in rq_cq_reap() argument
293 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags)) in rq_cq_reap()
296 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP); in rq_cq_reap()
299 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { in rq_cq_reap()
307 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); in rq_cq_reap()
309 svc_xprt_put(&xprt->sc_xprt); in rq_cq_reap()
312 spin_lock_bh(&xprt->sc_rq_dto_lock); in rq_cq_reap()
313 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q); in rq_cq_reap()
314 spin_unlock_bh(&xprt->sc_rq_dto_lock); in rq_cq_reap()
315 svc_xprt_put(&xprt->sc_xprt); in rq_cq_reap()
321 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); in rq_cq_reap()
327 if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) in rq_cq_reap()
328 svc_xprt_enqueue(&xprt->sc_xprt); in rq_cq_reap()
334 static void process_context(struct svcxprt_rdma *xprt, in process_context() argument
354 svc_rdma_put_frmr(xprt, ctxt->frmr); in process_context()
358 spin_lock_bh(&xprt->sc_rq_dto_lock); in process_context()
359 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); in process_context()
361 &xprt->sc_read_complete_q); in process_context()
362 spin_unlock_bh(&xprt->sc_rq_dto_lock); in process_context()
366 svc_xprt_enqueue(&xprt->sc_xprt); in process_context()
384 static void sq_cq_reap(struct svcxprt_rdma *xprt) in sq_cq_reap() argument
389 struct ib_cq *cq = xprt->sc_sq_cq; in sq_cq_reap()
394 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags)) in sq_cq_reap()
397 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); in sq_cq_reap()
409 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); in sq_cq_reap()
413 atomic_dec(&xprt->sc_sq_count); in sq_cq_reap()
414 wake_up(&xprt->sc_send_wait); in sq_cq_reap()
419 process_context(xprt, ctxt); in sq_cq_reap()
421 svc_xprt_put(&xprt->sc_xprt); in sq_cq_reap()
431 struct svcxprt_rdma *xprt = cq_context; in sq_comp_handler() local
435 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) in sq_comp_handler()
443 set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags); in sq_comp_handler()
450 if (list_empty(&xprt->sc_dto_q)) { in sq_comp_handler()
451 svc_xprt_get(&xprt->sc_xprt); in sq_comp_handler()
452 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); in sq_comp_handler()
505 int svc_rdma_post_recv(struct svcxprt_rdma *xprt) in svc_rdma_post_recv() argument
515 ctxt = svc_rdma_get_context(xprt); in svc_rdma_post_recv()
518 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) { in svc_rdma_post_recv()
519 if (sge_no >= xprt->sc_max_sge) { in svc_rdma_post_recv()
525 pa = ib_dma_map_page(xprt->sc_cm_id->device, in svc_rdma_post_recv()
528 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) in svc_rdma_post_recv()
530 atomic_inc(&xprt->sc_dma_used); in svc_rdma_post_recv()
533 ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey; in svc_rdma_post_recv()
542 svc_xprt_get(&xprt->sc_xprt); in svc_rdma_post_recv()
543 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); in svc_rdma_post_recv()
547 svc_xprt_put(&xprt->sc_xprt); in svc_rdma_post_recv()
613 struct svcxprt_rdma *xprt = cma_id->context; in rdma_listen_handler() local
627 "cm_id=%p\n", xprt, cma_id); in rdma_listen_handler()
632 xprt, cma_id); in rdma_listen_handler()
633 if (xprt) in rdma_listen_handler()
634 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); in rdma_listen_handler()
649 struct svc_xprt *xprt = cma_id->context; in rdma_cma_handler() local
651 container_of(xprt, struct svcxprt_rdma, sc_xprt); in rdma_cma_handler()
655 svc_xprt_get(xprt); in rdma_cma_handler()
657 "cm_id=%p\n", xprt, cma_id); in rdma_cma_handler()
659 svc_xprt_enqueue(xprt); in rdma_cma_handler()
663 xprt, cma_id); in rdma_cma_handler()
664 if (xprt) { in rdma_cma_handler()
665 set_bit(XPT_CLOSE, &xprt->xpt_flags); in rdma_cma_handler()
666 svc_xprt_enqueue(xprt); in rdma_cma_handler()
667 svc_xprt_put(xprt); in rdma_cma_handler()
672 "event=%d\n", cma_id, xprt, event->event); in rdma_cma_handler()
673 if (xprt) { in rdma_cma_handler()
674 set_bit(XPT_CLOSE, &xprt->xpt_flags); in rdma_cma_handler()
675 svc_xprt_enqueue(xprt); in rdma_cma_handler()
744 static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt) in rdma_alloc_frmr() argument
754 mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES); in rdma_alloc_frmr()
758 pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device, in rdma_alloc_frmr()
776 static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt) in rdma_dealloc_frmr_q() argument
780 while (!list_empty(&xprt->sc_frmr_q)) { in rdma_dealloc_frmr_q()
781 frmr = list_entry(xprt->sc_frmr_q.next, in rdma_dealloc_frmr_q()
809 static void frmr_unmap_dma(struct svcxprt_rdma *xprt, in frmr_unmap_dma() argument
817 atomic_dec(&xprt->sc_dma_used); in frmr_unmap_dma()
846 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) in svc_rdma_accept() argument
858 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); in svc_rdma_accept()
859 clear_bit(XPT_CONN, &xprt->xpt_flags); in svc_rdma_accept()
1117 static void svc_rdma_detach(struct svc_xprt *xprt) in svc_rdma_detach() argument
1120 container_of(xprt, struct svcxprt_rdma, sc_xprt); in svc_rdma_detach()
1121 dprintk("svc: svc_rdma_detach(%p)\n", xprt); in svc_rdma_detach()
1196 static void svc_rdma_free(struct svc_xprt *xprt) in svc_rdma_free() argument
1199 container_of(xprt, struct svcxprt_rdma, sc_xprt); in svc_rdma_free()
1204 static int svc_rdma_has_wspace(struct svc_xprt *xprt) in svc_rdma_has_wspace() argument
1207 container_of(xprt, struct svcxprt_rdma, sc_xprt); in svc_rdma_has_wspace()
1235 int svc_rdma_fastreg(struct svcxprt_rdma *xprt, in svc_rdma_fastreg() argument
1256 return svc_rdma_send(xprt, &fastreg_wr); in svc_rdma_fastreg()
1259 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) in svc_rdma_send() argument
1266 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) in svc_rdma_send()
1275 spin_lock_bh(&xprt->sc_lock); in svc_rdma_send()
1276 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) { in svc_rdma_send()
1277 spin_unlock_bh(&xprt->sc_lock); in svc_rdma_send()
1281 sq_cq_reap(xprt); in svc_rdma_send()
1284 wait_event(xprt->sc_send_wait, in svc_rdma_send()
1285 atomic_read(&xprt->sc_sq_count) < in svc_rdma_send()
1286 xprt->sc_sq_depth); in svc_rdma_send()
1287 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) in svc_rdma_send()
1293 svc_xprt_get(&xprt->sc_xprt); in svc_rdma_send()
1296 atomic_add(wr_count, &xprt->sc_sq_count); in svc_rdma_send()
1297 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr); in svc_rdma_send()
1299 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); in svc_rdma_send()
1300 atomic_sub(wr_count, &xprt->sc_sq_count); in svc_rdma_send()
1302 svc_xprt_put(&xprt->sc_xprt); in svc_rdma_send()
1305 ret, atomic_read(&xprt->sc_sq_count), in svc_rdma_send()
1306 xprt->sc_sq_depth); in svc_rdma_send()
1308 spin_unlock_bh(&xprt->sc_lock); in svc_rdma_send()
1310 wake_up(&xprt->sc_send_wait); in svc_rdma_send()
1316 void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, in svc_rdma_send_error() argument
1330 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); in svc_rdma_send_error()
1332 ctxt = svc_rdma_get_context(xprt); in svc_rdma_send_error()
1338 ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device, in svc_rdma_send_error()
1340 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { in svc_rdma_send_error()
1345 atomic_inc(&xprt->sc_dma_used); in svc_rdma_send_error()
1346 ctxt->sge[0].lkey = xprt->sc_dma_lkey; in svc_rdma_send_error()
1359 ret = svc_rdma_send(xprt, &err_wr); in svc_rdma_send_error()