Home
last modified time | relevance | path

Searched refs:send_cq (Results 1 – 49 of 49) sorted by relevance

/linux-4.4.14/drivers/staging/rdma/amso1100/
Dc2_qp.c422 struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq); in c2_alloc_qp() local
462 wr.sq_cq_handle = send_cq->adapter_handle; in c2_alloc_qp()
574 static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq) in c2_lock_cqs() argument
576 if (send_cq == recv_cq) in c2_lock_cqs()
577 spin_lock_irq(&send_cq->lock); in c2_lock_cqs()
578 else if (send_cq > recv_cq) { in c2_lock_cqs()
579 spin_lock_irq(&send_cq->lock); in c2_lock_cqs()
583 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); in c2_lock_cqs()
587 static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq) in c2_unlock_cqs() argument
589 if (send_cq == recv_cq) in c2_unlock_cqs()
[all …]
/linux-4.4.14/drivers/infiniband/ulp/ipoib/
Dipoib_verbs.c186 priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL, in ipoib_transport_dev_init()
188 if (IS_ERR(priv->send_cq)) { in ipoib_transport_dev_init()
196 init_attr.send_cq = priv->send_cq; in ipoib_transport_dev_init()
239 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_init()
269 if (ib_destroy_cq(priv->send_cq)) in ipoib_transport_dev_cleanup()
Dipoib_cm.c254 .send_cq = priv->recv_cq, /* For drain WR */ in ipoib_cm_create_rx_qp()
759 rc = ib_req_notify_cq(priv->send_cq, in ipoib_cm_send()
764 ipoib_send_comp_handler(priv->send_cq, dev); in ipoib_cm_send()
1020 .send_cq = priv->recv_cq, in ipoib_cm_create_tx_qp()
Dipoib_ib.c428 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in poll_tx()
592 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) in ipoib_send()
Dipoib.h346 struct ib_cq *send_cq; member
/linux-4.4.14/drivers/infiniband/hw/mlx5/
Dqp.c962 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq); in create_qp_common()
1013 if (init_attr->send_cq) in create_qp_common()
1014 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn); in create_qp_common()
1048 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) in mlx5_ib_lock_cqs() argument
1049 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mlx5_ib_lock_cqs()
1051 if (send_cq) { in mlx5_ib_lock_cqs()
1053 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx5_ib_lock_cqs()
1054 spin_lock_irq(&send_cq->lock); in mlx5_ib_lock_cqs()
1057 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { in mlx5_ib_lock_cqs()
1058 spin_lock_irq(&send_cq->lock); in mlx5_ib_lock_cqs()
[all …]
Dmain.c1109 init_attr->send_cq = cq; in create_umr_res()
/linux-4.4.14/drivers/infiniband/hw/mthca/
Dmthca_qp.c719 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); in __mthca_modify_qp()
821 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) in __mthca_modify_qp()
822 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); in __mthca_modify_qp()
1143 struct mthca_cq *send_cq, in mthca_alloc_qp_common() argument
1271 struct mthca_cq *send_cq, in mthca_alloc_qp() argument
1298 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, in mthca_alloc_qp()
1313 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) in mthca_lock_cqs() argument
1314 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mthca_lock_cqs()
1316 if (send_cq == recv_cq) { in mthca_lock_cqs()
1317 spin_lock_irq(&send_cq->lock); in mthca_lock_cqs()
[all …]
Dmthca_dev.h545 struct mthca_cq *send_cq,
553 struct mthca_cq *send_cq,
Dmthca_provider.c570 to_mcq(init_attr->send_cq), in mthca_create_qp()
605 to_mcq(init_attr->send_cq), in mthca_create_qp()
/linux-4.4.14/drivers/infiniband/hw/mlx4/
Dqp.c50 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
52 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
863 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), in create_qp_common()
872 mcq = to_mcq(init_attr->send_cq); in create_qp_common()
876 mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), in create_qp_common()
933 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) in mlx4_ib_lock_cqs() argument
934 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mlx4_ib_lock_cqs()
936 if (send_cq == recv_cq) { in mlx4_ib_lock_cqs()
937 spin_lock(&send_cq->lock); in mlx4_ib_lock_cqs()
939 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx4_ib_lock_cqs()
[all …]
Dmain.c2673 send_mcq = to_mcq(mqp->ibqp.send_cq); in mlx4_ib_handle_catas_error()
2676 mqp->ibqp.send_cq->comp_handler) { in mlx4_ib_handle_catas_error()
Dmad.c1627 qp_init_attr.init_attr.send_cq = ctx->cq; in create_pv_sqp()
/linux-4.4.14/net/rds/
Diw_cm.c203 attr->send_cq = ib_create_cq(dev, send_cq_handler, in rds_iw_init_qp_attrs()
206 if (IS_ERR(attr->send_cq)) { in rds_iw_init_qp_attrs()
207 ret = PTR_ERR(attr->send_cq); in rds_iw_init_qp_attrs()
208 attr->send_cq = NULL; in rds_iw_init_qp_attrs()
224 ret = ib_req_notify_cq(attr->send_cq, IB_CQ_NEXT_COMP); in rds_iw_init_qp_attrs()
238 if (attr->send_cq) in rds_iw_init_qp_attrs()
239 ib_destroy_cq(attr->send_cq); in rds_iw_init_qp_attrs()
281 ic->i_send_cq = attr.send_cq; in rds_iw_setup_qp()
Dib_cm.c421 attr.send_cq = ic->i_send_cq; in rds_ib_setup_qp()
/linux-4.4.14/drivers/staging/rdma/ehca/
Dehca_qp.c409 list = &qp->send_cq->sqp_err_list; in ehca_add_to_err_list()
635 if (init_attr->send_cq) in internal_create_qp()
636 my_qp->send_cq = in internal_create_qp()
637 container_of(init_attr->send_cq, struct ehca_cq, ib_cq); in internal_create_qp()
683 if (my_qp->send_cq) in internal_create_qp()
684 parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle; in internal_create_qp()
829 my_qp->ib_qp.send_cq = init_attr->send_cq; in internal_create_qp()
878 if (my_qp->send_cq) { in internal_create_qp()
879 ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp); in internal_create_qp()
916 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num); in internal_create_qp()
[all …]
Dehca_classes.h207 struct ehca_cq *send_cq; member
Dehca_main.c581 qp_init_attr.send_cq = ibcq; in ehca_create_aqp1()
/linux-4.4.14/drivers/net/ethernet/ibm/ehea/
Dehea_main.c209 arr[i++].fwh = pr->send_cq->fw_handle; in ehea_update_firmware_handles()
816 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes() local
825 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes()
827 ehea_inc_cq(send_cq); in ehea_proc_cqes()
867 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes()
870 ehea_update_feca(send_cq, cqe_counter); in ehea_proc_cqes()
905 ehea_reset_cq_ep(pr->send_cq); in ehea_poll()
907 ehea_reset_cq_n1(pr->send_cq); in ehea_poll()
910 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll()
1501 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, in ehea_init_port_res()
[all …]
Dehea.h364 struct ehea_cq *send_cq; member
/linux-4.4.14/drivers/infiniband/ulp/srp/
Dib_srp.h145 struct ib_cq *send_cq; member
Dib_srp.c487 struct ib_cq *recv_cq, *send_cq; in srp_create_ch_ib() local
511 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch, in srp_create_ch_ib()
513 if (IS_ERR(send_cq)) { in srp_create_ch_ib()
514 ret = PTR_ERR(send_cq); in srp_create_ch_ib()
527 init_attr->send_cq = send_cq; in srp_create_ch_ib()
562 if (ch->send_cq) in srp_create_ch_ib()
563 ib_destroy_cq(ch->send_cq); in srp_create_ch_ib()
567 ch->send_cq = send_cq; in srp_create_ch_ib()
586 ib_destroy_cq(send_cq); in srp_create_ch_ib()
626 ib_destroy_cq(ch->send_cq); in srp_free_ch_ib()
[all …]
/linux-4.4.14/drivers/infiniband/core/
Dverbs.c621 qp->send_cq = qp->recv_cq = NULL; in ib_create_qp()
649 qp->send_cq = qp_init_attr->send_cq; in ib_create_qp()
653 atomic_inc(&qp_init_attr->send_cq->usecnt); in ib_create_qp()
1132 scq = qp->send_cq; in ib_destroy_qp()
Duverbs_cmd.c1825 attr.send_cq = scq; in create_qp()
1875 qp->send_cq = attr.send_cq; in create_qp()
1883 atomic_inc(&attr.send_cq->usecnt); in create_qp()
Dmad.c3105 qp_init_attr.send_cq = qp_info->port_priv->cq; in create_mad_qp()
/linux-4.4.14/net/sunrpc/xprtrdma/
Dverbs.c271 while (ib_poll_cq(ep->rep_attr.send_cq, 1, &wc) > 0) in rpcrdma_flush_cqs()
662 ep->rep_attr.send_cq = sendcq; in rpcrdma_ep_create()
717 rpcrdma_clean_cq(ep->rep_attr.send_cq); in rpcrdma_ep_destroy()
729 rc = ib_destroy_cq(ep->rep_attr.send_cq); in rpcrdma_ep_destroy()
Dsvc_rdma_transport.c972 qp_attr.send_cq = newxprt->sc_sq_cq; in svc_rdma_accept()
/linux-4.4.14/drivers/staging/rdma/ipath/
Dipath_qp.c657 init_attr->send_cq = qp->ibqp.send_cq; in ipath_query_qp()
Dipath_ruc.c719 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, in ipath_send_complete()
Dipath_rc.c960 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); in do_rc_ack()
/linux-4.4.14/net/9p/
Dtrans_rdma.c721 qp_attr.send_cq = rdma->cq; in rdma_create_trans()
/linux-4.4.14/drivers/infiniband/hw/qib/
Dqib_qp.c892 init_attr->send_cq = qp->ibqp.send_cq; in qib_query_qp()
Dqib_ruc.c803 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, in qib_send_complete()
Dqib_rc.c1028 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); in qib_rc_send_complete()
1084 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); in do_rc_completion()
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
Dqp.c1182 schp = to_c4iw_cq(qhp->ibqp.send_cq); in flush_qp()
1642 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); in c4iw_create_qp()
1693 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; in c4iw_create_qp()
Dcq.c240 struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq); in c4iw_flush_sq()
/linux-4.4.14/drivers/infiniband/hw/usnic/
Dusnic_ib_verbs.c513 cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2; in usnic_ib_create_qp()
/linux-4.4.14/drivers/staging/rdma/hfi1/
Dqp.c928 init_attr->send_cq = qp->ibqp.send_cq; in hfi1_query_qp()
Druc.c930 hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, in hfi1_send_complete()
Drc.c1042 hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); in hfi1_rc_send_complete()
1099 hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); in do_rc_completion()
/linux-4.4.14/include/rdma/
Dib_verbs.h885 struct ib_cq *send_cq; member
1341 struct ib_cq *send_cq; member
/linux-4.4.14/drivers/infiniband/hw/cxgb3/
Diwch_provider.c935 schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid); in iwch_create_qp()
991 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid; in iwch_create_qp()
/linux-4.4.14/drivers/infiniband/ulp/iser/
Diser_verbs.c493 init_attr.send_cq = ib_conn->comp->cq; in iser_create_ib_conn_res()
/linux-4.4.14/drivers/infiniband/hw/nes/
Dnes_verbs.c1248 nescq = to_nescq(init_attr->send_cq); in nes_create_qp()
2829 init_attr->send_cq = nesqp->ibqp.send_cq; in nes_query_qp()
/linux-4.4.14/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c1275 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) || in ocrdma_check_qp_params()
1402 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq); in ocrdma_store_gsi_qp_cq()
Docrdma_hw.c2438 cq = get_ocrdma_cq(attrs->send_cq); in ocrdma_mbx_create_qp()
/linux-4.4.14/drivers/staging/lustre/lnet/klnds/o2iblnd/
Do2iblnd.c774 init_qp_attr->send_cq = cq; in kiblnd_create_conn()
/linux-4.4.14/drivers/infiniband/ulp/isert/
Dib_isert.c157 attr.send_cq = comp->cq; in isert_create_qp()
/linux-4.4.14/drivers/infiniband/ulp/srpt/
Dib_srpt.c2045 qp_init->send_cq = ch->cq; in srpt_create_ch_ib()