Lines Matching refs:ch

292 static int srp_new_cm_id(struct srp_rdma_ch *ch)  in srp_new_cm_id()  argument
294 struct srp_target_port *target = ch->target; in srp_new_cm_id()
298 srp_cm_handler, ch); in srp_new_cm_id()
302 if (ch->cm_id) in srp_new_cm_id()
303 ib_destroy_cm_id(ch->cm_id); in srp_new_cm_id()
304 ch->cm_id = new_cm_id; in srp_new_cm_id()
305 ch->path.sgid = target->sgid; in srp_new_cm_id()
306 ch->path.dgid = target->orig_dgid; in srp_new_cm_id()
307 ch->path.pkey = target->pkey; in srp_new_cm_id()
308 ch->path.service_id = target->service_id; in srp_new_cm_id()
466 static void srp_destroy_qp(struct srp_rdma_ch *ch) in srp_destroy_qp() argument
474 WARN_ON_ONCE(ch->connected); in srp_destroy_qp()
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE); in srp_destroy_qp()
481 init_completion(&ch->done); in srp_destroy_qp()
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr); in srp_destroy_qp()
485 wait_for_completion(&ch->done); in srp_destroy_qp()
488 ib_destroy_qp(ch->qp); in srp_destroy_qp()
491 static int srp_create_ch_ib(struct srp_rdma_ch *ch) in srp_create_ch_ib() argument
493 struct srp_target_port *target = ch->target; in srp_create_ch_ib()
508 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch, in srp_create_ch_ib()
509 target->queue_size + 1, ch->comp_vector); in srp_create_ch_ib()
515 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch, in srp_create_ch_ib()
516 m * target->queue_size, ch->comp_vector); in srp_create_ch_ib()
552 if (ch->fr_pool) in srp_create_ch_ib()
553 srp_destroy_fr_pool(ch->fr_pool); in srp_create_ch_ib()
554 ch->fr_pool = fr_pool; in srp_create_ch_ib()
563 if (ch->fmr_pool) in srp_create_ch_ib()
564 ib_destroy_fmr_pool(ch->fmr_pool); in srp_create_ch_ib()
565 ch->fmr_pool = fmr_pool; in srp_create_ch_ib()
568 if (ch->qp) in srp_create_ch_ib()
569 srp_destroy_qp(ch); in srp_create_ch_ib()
570 if (ch->recv_cq) in srp_create_ch_ib()
571 ib_destroy_cq(ch->recv_cq); in srp_create_ch_ib()
572 if (ch->send_cq) in srp_create_ch_ib()
573 ib_destroy_cq(ch->send_cq); in srp_create_ch_ib()
575 ch->qp = qp; in srp_create_ch_ib()
576 ch->recv_cq = recv_cq; in srp_create_ch_ib()
577 ch->send_cq = send_cq; in srp_create_ch_ib()
601 struct srp_rdma_ch *ch) in srp_free_ch_ib() argument
606 if (!ch->target) in srp_free_ch_ib()
609 if (ch->cm_id) { in srp_free_ch_ib()
610 ib_destroy_cm_id(ch->cm_id); in srp_free_ch_ib()
611 ch->cm_id = NULL; in srp_free_ch_ib()
615 if (!ch->qp) in srp_free_ch_ib()
619 if (ch->fr_pool) in srp_free_ch_ib()
620 srp_destroy_fr_pool(ch->fr_pool); in srp_free_ch_ib()
622 if (ch->fmr_pool) in srp_free_ch_ib()
623 ib_destroy_fmr_pool(ch->fmr_pool); in srp_free_ch_ib()
625 srp_destroy_qp(ch); in srp_free_ch_ib()
626 ib_destroy_cq(ch->send_cq); in srp_free_ch_ib()
627 ib_destroy_cq(ch->recv_cq); in srp_free_ch_ib()
635 ch->target = NULL; in srp_free_ch_ib()
637 ch->qp = NULL; in srp_free_ch_ib()
638 ch->send_cq = ch->recv_cq = NULL; in srp_free_ch_ib()
640 if (ch->rx_ring) { in srp_free_ch_ib()
642 srp_free_iu(target->srp_host, ch->rx_ring[i]); in srp_free_ch_ib()
643 kfree(ch->rx_ring); in srp_free_ch_ib()
644 ch->rx_ring = NULL; in srp_free_ch_ib()
646 if (ch->tx_ring) { in srp_free_ch_ib()
648 srp_free_iu(target->srp_host, ch->tx_ring[i]); in srp_free_ch_ib()
649 kfree(ch->tx_ring); in srp_free_ch_ib()
650 ch->tx_ring = NULL; in srp_free_ch_ib()
658 struct srp_rdma_ch *ch = ch_ptr; in srp_path_rec_completion() local
659 struct srp_target_port *target = ch->target; in srp_path_rec_completion()
661 ch->status = status; in srp_path_rec_completion()
666 ch->path = *pathrec; in srp_path_rec_completion()
667 complete(&ch->done); in srp_path_rec_completion()
670 static int srp_lookup_path(struct srp_rdma_ch *ch) in srp_lookup_path() argument
672 struct srp_target_port *target = ch->target; in srp_lookup_path()
675 ch->path.numb_path = 1; in srp_lookup_path()
677 init_completion(&ch->done); in srp_lookup_path()
679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client, in srp_lookup_path()
682 &ch->path, in srp_lookup_path()
691 ch, &ch->path_query); in srp_lookup_path()
692 if (ch->path_query_id < 0) in srp_lookup_path()
693 return ch->path_query_id; in srp_lookup_path()
695 ret = wait_for_completion_interruptible(&ch->done); in srp_lookup_path()
699 if (ch->status < 0) in srp_lookup_path()
703 return ch->status; in srp_lookup_path()
706 static int srp_send_req(struct srp_rdma_ch *ch, bool multich) in srp_send_req() argument
708 struct srp_target_port *target = ch->target; in srp_send_req()
719 req->param.primary_path = &ch->path; in srp_send_req()
722 req->param.qp_num = ch->qp->qp_num; in srp_send_req()
723 req->param.qp_type = ch->qp->qp_type; in srp_send_req()
789 status = ib_send_cm_req(ch->cm_id, &req->param); in srp_send_req()
815 struct srp_rdma_ch *ch; in srp_disconnect_target() local
821 ch = &target->ch[i]; in srp_disconnect_target()
822 ch->connected = false; in srp_disconnect_target()
823 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) { in srp_disconnect_target()
831 struct srp_rdma_ch *ch) in srp_free_req_data() argument
838 if (!ch->target || !ch->req_ring) in srp_free_req_data()
842 req = &ch->req_ring[i]; in srp_free_req_data()
856 kfree(ch->req_ring); in srp_free_req_data()
857 ch->req_ring = NULL; in srp_free_req_data()
860 static int srp_alloc_req_data(struct srp_rdma_ch *ch) in srp_alloc_req_data() argument
862 struct srp_target_port *target = ch->target; in srp_alloc_req_data()
870 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), in srp_alloc_req_data()
872 if (!ch->req_ring) in srp_alloc_req_data()
876 req = &ch->req_ring[i]; in srp_alloc_req_data()
924 struct srp_rdma_ch *ch; in srp_remove_target() local
936 ch = &target->ch[i]; in srp_remove_target()
937 srp_free_ch_ib(target, ch); in srp_remove_target()
942 ch = &target->ch[i]; in srp_remove_target()
943 srp_free_req_data(target, ch); in srp_remove_target()
945 kfree(target->ch); in srp_remove_target()
946 target->ch = NULL; in srp_remove_target()
981 c += target->ch[i].connected; in srp_connected_ch()
986 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) in srp_connect_ch() argument
988 struct srp_target_port *target = ch->target; in srp_connect_ch()
993 ret = srp_lookup_path(ch); in srp_connect_ch()
998 init_completion(&ch->done); in srp_connect_ch()
999 ret = srp_send_req(ch, multich); in srp_connect_ch()
1002 ret = wait_for_completion_interruptible(&ch->done); in srp_connect_ch()
1012 switch (ch->status) { in srp_connect_ch()
1014 ch->connected = true; in srp_connect_ch()
1018 ret = srp_lookup_path(ch); in srp_connect_ch()
1029 ch->status = -ECONNRESET; in srp_connect_ch()
1030 return ch->status; in srp_connect_ch()
1033 return ch->status; in srp_connect_ch()
1038 static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey) in srp_inv_rkey() argument
1050 return ib_post_send(ch->qp, &wr, &bad_wr); in srp_inv_rkey()
1054 struct srp_rdma_ch *ch, in srp_unmap_data() argument
1057 struct srp_target_port *target = ch->target; in srp_unmap_data()
1071 res = srp_inv_rkey(ch, (*pfr)->mr->rkey); in srp_unmap_data()
1081 srp_fr_pool_put(ch->fr_pool, req->fr_list, in srp_unmap_data()
1105 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch, in srp_claim_req() argument
1112 spin_lock_irqsave(&ch->lock, flags); in srp_claim_req()
1121 spin_unlock_irqrestore(&ch->lock, flags); in srp_claim_req()
1133 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, in srp_free_req() argument
1138 srp_unmap_data(scmnd, ch, req); in srp_free_req()
1140 spin_lock_irqsave(&ch->lock, flags); in srp_free_req()
1141 ch->req_lim += req_lim_delta; in srp_free_req()
1142 spin_unlock_irqrestore(&ch->lock, flags); in srp_free_req()
1145 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req, in srp_finish_req() argument
1148 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL); in srp_finish_req()
1151 srp_free_req(ch, req, scmnd, 0); in srp_finish_req()
1160 struct srp_rdma_ch *ch; in srp_terminate_io() local
1173 ch = &target->ch[i]; in srp_terminate_io()
1176 struct srp_request *req = &ch->req_ring[j]; in srp_terminate_io()
1178 srp_finish_req(ch, req, NULL, in srp_terminate_io()
1196 struct srp_rdma_ch *ch; in srp_rport_reconnect() local
1211 ch = &target->ch[i]; in srp_rport_reconnect()
1212 if (!ch->target) in srp_rport_reconnect()
1214 ret += srp_new_cm_id(ch); in srp_rport_reconnect()
1217 ch = &target->ch[i]; in srp_rport_reconnect()
1218 if (!ch->target) in srp_rport_reconnect()
1221 struct srp_request *req = &ch->req_ring[j]; in srp_rport_reconnect()
1223 srp_finish_req(ch, req, NULL, DID_RESET << 16); in srp_rport_reconnect()
1227 ch = &target->ch[i]; in srp_rport_reconnect()
1228 if (!ch->target) in srp_rport_reconnect()
1235 ret += srp_create_ch_ib(ch); in srp_rport_reconnect()
1237 INIT_LIST_HEAD(&ch->free_tx); in srp_rport_reconnect()
1239 list_add(&ch->tx_ring[j]->list, &ch->free_tx); in srp_rport_reconnect()
1245 ch = &target->ch[i]; in srp_rport_reconnect()
1246 if (ret || !ch->target) in srp_rport_reconnect()
1248 ret = srp_connect_ch(ch, multich); in srp_rport_reconnect()
1274 struct srp_rdma_ch *ch) in srp_map_finish_fmr() argument
1279 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, in srp_map_finish_fmr()
1293 struct srp_rdma_ch *ch) in srp_map_finish_fr() argument
1295 struct srp_target_port *target = ch->target; in srp_map_finish_fr()
1302 desc = srp_fr_pool_get(ch->fr_pool); in srp_map_finish_fr()
1331 return ib_post_send(ch->qp, &wr, &bad_wr); in srp_map_finish_fr()
1335 struct srp_rdma_ch *ch) in srp_finish_mapping() argument
1337 struct srp_target_port *target = ch->target; in srp_finish_mapping()
1348 srp_map_finish_fr(state, ch) : in srp_finish_mapping()
1349 srp_map_finish_fmr(state, ch); in srp_finish_mapping()
1369 struct srp_rdma_ch *ch, in srp_map_sg_entry() argument
1373 struct srp_target_port *target = ch->target; in srp_map_sg_entry()
1402 ret = srp_finish_mapping(state, ch); in srp_map_sg_entry()
1423 ret = srp_finish_mapping(state, ch); in srp_map_sg_entry()
1447 ret = srp_finish_mapping(state, ch); in srp_map_sg_entry()
1454 static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch, in srp_map_sg() argument
1458 struct srp_target_port *target = ch->target; in srp_map_sg()
1469 use_mr = !!ch->fr_pool; in srp_map_sg()
1472 use_mr = !!ch->fmr_pool; in srp_map_sg()
1476 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) { in srp_map_sg()
1498 if (use_mr && srp_finish_mapping(state, ch)) in srp_map_sg()
1506 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, in srp_map_data() argument
1509 struct srp_target_port *target = ch->target; in srp_map_data()
1571 srp_map_sg(&state, ch, req, scat, count); in srp_map_data()
1632 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu, in srp_put_tx_iu() argument
1637 spin_lock_irqsave(&ch->lock, flags); in srp_put_tx_iu()
1638 list_add(&iu->list, &ch->free_tx); in srp_put_tx_iu()
1640 ++ch->req_lim; in srp_put_tx_iu()
1641 spin_unlock_irqrestore(&ch->lock, flags); in srp_put_tx_iu()
1657 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch, in __srp_get_tx_iu() argument
1660 struct srp_target_port *target = ch->target; in __srp_get_tx_iu()
1664 srp_send_completion(ch->send_cq, ch); in __srp_get_tx_iu()
1666 if (list_empty(&ch->free_tx)) in __srp_get_tx_iu()
1671 if (ch->req_lim <= rsv) { in __srp_get_tx_iu()
1676 --ch->req_lim; in __srp_get_tx_iu()
1679 iu = list_first_entry(&ch->free_tx, struct srp_iu, list); in __srp_get_tx_iu()
1684 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len) in srp_post_send() argument
1686 struct srp_target_port *target = ch->target; in srp_post_send()
1701 return ib_post_send(ch->qp, &wr, &bad_wr); in srp_post_send()
1704 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu) in srp_post_recv() argument
1706 struct srp_target_port *target = ch->target; in srp_post_recv()
1719 return ib_post_recv(ch->qp, &wr, &bad_wr); in srp_post_recv()
1722 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) in srp_process_rsp() argument
1724 struct srp_target_port *target = ch->target; in srp_process_rsp()
1730 spin_lock_irqsave(&ch->lock, flags); in srp_process_rsp()
1731 ch->req_lim += be32_to_cpu(rsp->req_lim_delta); in srp_process_rsp()
1732 spin_unlock_irqrestore(&ch->lock, flags); in srp_process_rsp()
1734 ch->tsk_mgmt_status = -1; in srp_process_rsp()
1736 ch->tsk_mgmt_status = rsp->data[3]; in srp_process_rsp()
1737 complete(&ch->tsk_mgmt_done); in srp_process_rsp()
1742 scmnd = srp_claim_req(ch, req, NULL, scmnd); in srp_process_rsp()
1747 rsp->tag, ch - target->ch, ch->qp->qp_num); in srp_process_rsp()
1749 spin_lock_irqsave(&ch->lock, flags); in srp_process_rsp()
1750 ch->req_lim += be32_to_cpu(rsp->req_lim_delta); in srp_process_rsp()
1751 spin_unlock_irqrestore(&ch->lock, flags); in srp_process_rsp()
1773 srp_free_req(ch, req, scmnd, in srp_process_rsp()
1781 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta, in srp_response_common() argument
1784 struct srp_target_port *target = ch->target; in srp_response_common()
1790 spin_lock_irqsave(&ch->lock, flags); in srp_response_common()
1791 ch->req_lim += req_delta; in srp_response_common()
1792 iu = __srp_get_tx_iu(ch, SRP_IU_RSP); in srp_response_common()
1793 spin_unlock_irqrestore(&ch->lock, flags); in srp_response_common()
1805 err = srp_post_send(ch, iu, len); in srp_response_common()
1809 srp_put_tx_iu(ch, iu, SRP_IU_RSP); in srp_response_common()
1815 static void srp_process_cred_req(struct srp_rdma_ch *ch, in srp_process_cred_req() argument
1824 if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) in srp_process_cred_req()
1825 shost_printk(KERN_ERR, ch->target->scsi_host, PFX in srp_process_cred_req()
1829 static void srp_process_aer_req(struct srp_rdma_ch *ch, in srp_process_aer_req() argument
1832 struct srp_target_port *target = ch->target; in srp_process_aer_req()
1842 if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) in srp_process_aer_req()
1847 static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc) in srp_handle_recv() argument
1849 struct srp_target_port *target = ch->target; in srp_handle_recv()
1855 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len, in srp_handle_recv()
1869 srp_process_rsp(ch, iu->buf); in srp_handle_recv()
1873 srp_process_cred_req(ch, iu->buf); in srp_handle_recv()
1877 srp_process_aer_req(ch, iu->buf); in srp_handle_recv()
1892 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len, in srp_handle_recv()
1895 res = srp_post_recv(ch, iu); in srp_handle_recv()
1918 bool send_err, struct srp_rdma_ch *ch) in srp_handle_qp_err() argument
1920 struct srp_target_port *target = ch->target; in srp_handle_qp_err()
1923 complete(&ch->done); in srp_handle_qp_err()
1927 if (ch->connected && !target->qp_in_error) { in srp_handle_qp_err()
1949 struct srp_rdma_ch *ch = ch_ptr; in srp_recv_completion() local
1955 srp_handle_recv(ch, &wc); in srp_recv_completion()
1957 srp_handle_qp_err(wc.wr_id, wc.status, false, ch); in srp_recv_completion()
1964 struct srp_rdma_ch *ch = ch_ptr; in srp_send_completion() local
1971 list_add(&iu->list, &ch->free_tx); in srp_send_completion()
1973 srp_handle_qp_err(wc.wr_id, wc.status, true, ch); in srp_send_completion()
1982 struct srp_rdma_ch *ch; in srp_queuecommand() local
2008 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; in srp_queuecommand()
2014 spin_lock_irqsave(&ch->lock, flags); in srp_queuecommand()
2015 iu = __srp_get_tx_iu(ch, SRP_IU_CMD); in srp_queuecommand()
2016 spin_unlock_irqrestore(&ch->lock, flags); in srp_queuecommand()
2021 req = &ch->req_ring[idx]; in srp_queuecommand()
2039 len = srp_map_data(scmnd, ch, req); in srp_queuecommand()
2057 if (srp_post_send(ch, iu, len)) { in srp_queuecommand()
2071 srp_unmap_data(scmnd, ch, req); in srp_queuecommand()
2074 srp_put_tx_iu(ch, iu, SRP_IU_CMD); in srp_queuecommand()
2097 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch) in srp_alloc_iu_bufs() argument
2099 struct srp_target_port *target = ch->target; in srp_alloc_iu_bufs()
2102 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), in srp_alloc_iu_bufs()
2104 if (!ch->rx_ring) in srp_alloc_iu_bufs()
2106 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), in srp_alloc_iu_bufs()
2108 if (!ch->tx_ring) in srp_alloc_iu_bufs()
2112 ch->rx_ring[i] = srp_alloc_iu(target->srp_host, in srp_alloc_iu_bufs()
2113 ch->max_ti_iu_len, in srp_alloc_iu_bufs()
2115 if (!ch->rx_ring[i]) in srp_alloc_iu_bufs()
2120 ch->tx_ring[i] = srp_alloc_iu(target->srp_host, in srp_alloc_iu_bufs()
2123 if (!ch->tx_ring[i]) in srp_alloc_iu_bufs()
2126 list_add(&ch->tx_ring[i]->list, &ch->free_tx); in srp_alloc_iu_bufs()
2133 srp_free_iu(target->srp_host, ch->rx_ring[i]); in srp_alloc_iu_bufs()
2134 srp_free_iu(target->srp_host, ch->tx_ring[i]); in srp_alloc_iu_bufs()
2139 kfree(ch->tx_ring); in srp_alloc_iu_bufs()
2140 ch->tx_ring = NULL; in srp_alloc_iu_bufs()
2141 kfree(ch->rx_ring); in srp_alloc_iu_bufs()
2142 ch->rx_ring = NULL; in srp_alloc_iu_bufs()
2176 struct srp_rdma_ch *ch) in srp_cm_rep_handler() argument
2178 struct srp_target_port *target = ch->target; in srp_cm_rep_handler()
2185 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); in srp_cm_rep_handler()
2186 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); in srp_cm_rep_handler()
2193 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE, in srp_cm_rep_handler()
2205 if (!ch->rx_ring) { in srp_cm_rep_handler()
2206 ret = srp_alloc_iu_bufs(ch); in srp_cm_rep_handler()
2221 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); in srp_cm_rep_handler()
2226 struct srp_iu *iu = ch->rx_ring[i]; in srp_cm_rep_handler()
2228 ret = srp_post_recv(ch, iu); in srp_cm_rep_handler()
2240 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); in srp_cm_rep_handler()
2250 ch->status = ret; in srp_cm_rep_handler()
2255 struct srp_rdma_ch *ch) in srp_cm_rej_handler() argument
2257 struct srp_target_port *target = ch->target; in srp_cm_rej_handler()
2265 ch->path.dlid = cpi->redirect_lid; in srp_cm_rej_handler()
2266 ch->path.pkey = cpi->redirect_pkey; in srp_cm_rej_handler()
2268 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16); in srp_cm_rej_handler()
2270 ch->status = ch->path.dlid ? in srp_cm_rej_handler()
2281 memcpy(ch->path.dgid.raw, in srp_cm_rej_handler()
2286 be64_to_cpu(ch->path.dgid.global.subnet_prefix), in srp_cm_rej_handler()
2287 be64_to_cpu(ch->path.dgid.global.interface_id)); in srp_cm_rej_handler()
2289 ch->status = SRP_PORT_REDIRECT; in srp_cm_rej_handler()
2293 ch->status = -ECONNRESET; in srp_cm_rej_handler()
2300 ch->status = -ECONNRESET; in srp_cm_rej_handler()
2321 ch->status = -ECONNRESET; in srp_cm_rej_handler()
2326 ch->status = SRP_STALE_CONN; in srp_cm_rej_handler()
2332 ch->status = -ECONNRESET; in srp_cm_rej_handler()
2338 struct srp_rdma_ch *ch = cm_id->context; in srp_cm_handler() local
2339 struct srp_target_port *target = ch->target; in srp_cm_handler()
2347 ch->status = -ECONNRESET; in srp_cm_handler()
2352 srp_cm_rep_handler(cm_id, event->private_data, ch); in srp_cm_handler()
2359 srp_cm_rej_handler(cm_id, event, ch); in srp_cm_handler()
2365 ch->connected = false; in srp_cm_handler()
2377 ch->status = 0; in srp_cm_handler()
2392 complete(&ch->done); in srp_cm_handler()
2412 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, in srp_send_tsk_mgmt() argument
2415 struct srp_target_port *target = ch->target; in srp_send_tsk_mgmt()
2421 if (!ch->connected || target->qp_in_error) in srp_send_tsk_mgmt()
2424 init_completion(&ch->tsk_mgmt_done); in srp_send_tsk_mgmt()
2431 spin_lock_irq(&ch->lock); in srp_send_tsk_mgmt()
2432 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT); in srp_send_tsk_mgmt()
2433 spin_unlock_irq(&ch->lock); in srp_send_tsk_mgmt()
2454 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) { in srp_send_tsk_mgmt()
2455 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT); in srp_send_tsk_mgmt()
2462 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done, in srp_send_tsk_mgmt()
2475 struct srp_rdma_ch *ch; in srp_abort() local
2486 ch = &target->ch[ch_idx]; in srp_abort()
2487 if (!srp_claim_req(ch, req, NULL, scmnd)) in srp_abort()
2491 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, in srp_abort()
2498 srp_free_req(ch, req, scmnd, 0); in srp_abort()
2508 struct srp_rdma_ch *ch; in srp_reset_device() local
2513 ch = &target->ch[0]; in srp_reset_device()
2514 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, in srp_reset_device()
2517 if (ch->tsk_mgmt_status) in srp_reset_device()
2521 ch = &target->ch[i]; in srp_reset_device()
2523 struct srp_request *req = &ch->req_ring[i]; in srp_reset_device()
2525 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); in srp_reset_device()
2603 struct srp_rdma_ch *ch = &target->ch[0]; in show_dgid() local
2605 return sprintf(buf, "%pI6\n", ch->path.dgid.raw); in show_dgid()
2620 struct srp_rdma_ch *ch; in show_req_lim() local
2624 ch = &target->ch[i]; in show_req_lim()
2625 req_lim = min(req_lim, ch->req_lim); in show_req_lim()
3138 struct srp_rdma_ch *ch; in srp_create_target() local
3222 target->ch = kcalloc(target->ch_count, sizeof(*target->ch), in srp_create_target()
3224 if (!target->ch) in srp_create_target()
3246 ch = &target->ch[ch_start + cpu_idx]; in srp_create_target()
3247 ch->target = target; in srp_create_target()
3248 ch->comp_vector = cv_start == cv_end ? cv_start : in srp_create_target()
3250 spin_lock_init(&ch->lock); in srp_create_target()
3251 INIT_LIST_HEAD(&ch->free_tx); in srp_create_target()
3252 ret = srp_new_cm_id(ch); in srp_create_target()
3256 ret = srp_create_ch_ib(ch); in srp_create_target()
3260 ret = srp_alloc_req_data(ch); in srp_create_target()
3264 ret = srp_connect_ch(ch, multich); in srp_create_target()
3273 srp_free_ch_ib(target, ch); in srp_create_target()
3274 srp_free_req_data(target, ch); in srp_create_target()
3275 target->ch_count = ch - target->ch; in srp_create_target()
3318 ch = &target->ch[i]; in srp_create_target()
3319 srp_free_ch_ib(target, ch); in srp_create_target()
3320 srp_free_req_data(target, ch); in srp_create_target()
3323 kfree(target->ch); in srp_create_target()