Lines Matching refs:newxprt
571 struct svcxprt_rdma *newxprt; in handle_connect_req() local
575 newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0); in handle_connect_req()
576 if (!newxprt) { in handle_connect_req()
580 newxprt->sc_cm_id = new_cma_id; in handle_connect_req()
581 new_cma_id->context = newxprt; in handle_connect_req()
583 newxprt, newxprt->sc_cm_id, listen_xprt); in handle_connect_req()
586 newxprt->sc_ord = client_ird; in handle_connect_req()
589 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; in handle_connect_req()
590 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa)); in handle_connect_req()
591 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; in handle_connect_req()
592 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa)); in handle_connect_req()
599 list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); in handle_connect_req()
849 struct svcxprt_rdma *newxprt = NULL; in svc_rdma_accept() local
863 newxprt = list_entry(listen_rdma->sc_accept_q.next, in svc_rdma_accept()
865 list_del_init(&newxprt->sc_accept_q); in svc_rdma_accept()
870 if (!newxprt) in svc_rdma_accept()
874 newxprt, newxprt->sc_cm_id); in svc_rdma_accept()
876 ret = ib_query_device(newxprt->sc_cm_id->device, &devattr); in svc_rdma_accept()
879 "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret); in svc_rdma_accept()
885 newxprt->sc_max_sge = min((size_t)devattr.max_sge, in svc_rdma_accept()
887 newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr, in svc_rdma_accept()
889 newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests; in svc_rdma_accept()
895 newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord); in svc_rdma_accept()
896 newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord); in svc_rdma_accept()
898 newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device); in svc_rdma_accept()
899 if (IS_ERR(newxprt->sc_pd)) { in svc_rdma_accept()
903 newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device, in svc_rdma_accept()
906 newxprt, in svc_rdma_accept()
907 newxprt->sc_sq_depth, in svc_rdma_accept()
909 if (IS_ERR(newxprt->sc_sq_cq)) { in svc_rdma_accept()
913 newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device, in svc_rdma_accept()
916 newxprt, in svc_rdma_accept()
917 newxprt->sc_max_requests, in svc_rdma_accept()
919 if (IS_ERR(newxprt->sc_rq_cq)) { in svc_rdma_accept()
926 qp_attr.qp_context = &newxprt->sc_xprt; in svc_rdma_accept()
927 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth; in svc_rdma_accept()
928 qp_attr.cap.max_recv_wr = newxprt->sc_max_requests; in svc_rdma_accept()
929 qp_attr.cap.max_send_sge = newxprt->sc_max_sge; in svc_rdma_accept()
930 qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; in svc_rdma_accept()
933 qp_attr.send_cq = newxprt->sc_sq_cq; in svc_rdma_accept()
934 qp_attr.recv_cq = newxprt->sc_rq_cq; in svc_rdma_accept()
941 newxprt->sc_cm_id, newxprt->sc_pd, in svc_rdma_accept()
942 newxprt->sc_cm_id->device, newxprt->sc_pd->device, in svc_rdma_accept()
948 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); in svc_rdma_accept()
953 newxprt->sc_qp = newxprt->sc_cm_id->qp; in svc_rdma_accept()
977 newxprt->sc_reader = rdma_read_chunk_lcl; in svc_rdma_accept()
979 newxprt->sc_frmr_pg_list_len = in svc_rdma_accept()
981 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG; in svc_rdma_accept()
982 newxprt->sc_reader = rdma_read_chunk_frmr; in svc_rdma_accept()
988 switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) { in svc_rdma_accept()
990 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV; in svc_rdma_accept()
991 if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) { in svc_rdma_accept()
1003 if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) { in svc_rdma_accept()
1020 newxprt->sc_phys_mr = in svc_rdma_accept()
1021 ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc); in svc_rdma_accept()
1022 if (IS_ERR(newxprt->sc_phys_mr)) { in svc_rdma_accept()
1027 newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey; in svc_rdma_accept()
1029 newxprt->sc_dma_lkey = in svc_rdma_accept()
1030 newxprt->sc_cm_id->device->local_dma_lkey; in svc_rdma_accept()
1033 for (i = 0; i < newxprt->sc_max_requests; i++) { in svc_rdma_accept()
1034 ret = svc_rdma_post_recv(newxprt); in svc_rdma_accept()
1042 newxprt->sc_cm_id->event_handler = rdma_cma_handler; in svc_rdma_accept()
1048 ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP); in svc_rdma_accept()
1049 ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP); in svc_rdma_accept()
1052 set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags); in svc_rdma_accept()
1055 conn_param.initiator_depth = newxprt->sc_ord; in svc_rdma_accept()
1056 ret = rdma_accept(newxprt->sc_cm_id, &conn_param); in svc_rdma_accept()
1073 newxprt, in svc_rdma_accept()
1074 &((struct sockaddr_in *)&newxprt->sc_cm_id-> in svc_rdma_accept()
1076 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> in svc_rdma_accept()
1078 &((struct sockaddr_in *)&newxprt->sc_cm_id-> in svc_rdma_accept()
1080 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> in svc_rdma_accept()
1082 newxprt->sc_max_sge, in svc_rdma_accept()
1083 newxprt->sc_sq_depth, in svc_rdma_accept()
1084 newxprt->sc_max_requests, in svc_rdma_accept()
1085 newxprt->sc_ord); in svc_rdma_accept()
1087 return &newxprt->sc_xprt; in svc_rdma_accept()
1092 svc_xprt_get(&newxprt->sc_xprt); in svc_rdma_accept()
1093 if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) in svc_rdma_accept()
1094 ib_destroy_qp(newxprt->sc_qp); in svc_rdma_accept()
1095 rdma_destroy_id(newxprt->sc_cm_id); in svc_rdma_accept()
1097 svc_xprt_put(&newxprt->sc_xprt); in svc_rdma_accept()