Lines Matching refs:qhp

86 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)  in set_state()  argument
89 spin_lock_irqsave(&qhp->lock, flag); in set_state()
90 qhp->attr.state = state; in set_state()
91 spin_unlock_irqrestore(&qhp->lock, flag); in set_state()
573 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, in build_rdma_recv() argument
578 ret = build_isgl((__be64 *)qhp->wq.rq.queue, in build_rdma_recv()
579 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], in build_rdma_recv()
688 static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc) in ring_kernel_sq_db() argument
692 spin_lock_irqsave(&qhp->rhp->lock, flags); in ring_kernel_sq_db()
693 spin_lock(&qhp->lock); in ring_kernel_sq_db()
694 if (qhp->rhp->db_state == NORMAL) in ring_kernel_sq_db()
695 t4_ring_sq_db(&qhp->wq, inc, in ring_kernel_sq_db()
696 is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL); in ring_kernel_sq_db()
698 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_sq_db()
699 qhp->wq.sq.wq_pidx_inc += inc; in ring_kernel_sq_db()
701 spin_unlock(&qhp->lock); in ring_kernel_sq_db()
702 spin_unlock_irqrestore(&qhp->rhp->lock, flags); in ring_kernel_sq_db()
706 static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) in ring_kernel_rq_db() argument
710 spin_lock_irqsave(&qhp->rhp->lock, flags); in ring_kernel_rq_db()
711 spin_lock(&qhp->lock); in ring_kernel_rq_db()
712 if (qhp->rhp->db_state == NORMAL) in ring_kernel_rq_db()
713 t4_ring_rq_db(&qhp->wq, inc, in ring_kernel_rq_db()
714 is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL); in ring_kernel_rq_db()
716 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_rq_db()
717 qhp->wq.rq.wq_pidx_inc += inc; in ring_kernel_rq_db()
719 spin_unlock(&qhp->lock); in ring_kernel_rq_db()
720 spin_unlock_irqrestore(&qhp->rhp->lock, flags); in ring_kernel_rq_db()
731 struct c4iw_qp *qhp; in c4iw_post_send() local
738 qhp = to_c4iw_qp(ibqp); in c4iw_post_send()
739 spin_lock_irqsave(&qhp->lock, flag); in c4iw_post_send()
740 if (t4_wq_in_error(&qhp->wq)) { in c4iw_post_send()
741 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
744 num_wrs = t4_sq_avail(&qhp->wq); in c4iw_post_send()
746 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
755 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + in c4iw_post_send()
756 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); in c4iw_post_send()
761 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) in c4iw_post_send()
763 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; in c4iw_post_send()
774 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
779 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
793 if (!qhp->wq.sq.oldest_read) in c4iw_post_send()
794 qhp->wq.sq.oldest_read = swsqe; in c4iw_post_send()
799 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16, in c4iw_post_send()
801 qhp->rhp->rdev.lldi.adapter_type) ? in c4iw_post_send()
820 swsqe->idx = qhp->wq.sq.pidx; in c4iw_post_send()
823 qhp->sq_sig_all; in c4iw_post_send()
828 qhp->rhp->rdev.lldi.ports[0]); in c4iw_post_send()
832 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); in c4iw_post_send()
835 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, in c4iw_post_send()
839 t4_sq_produce(&qhp->wq, len16); in c4iw_post_send()
842 if (!qhp->rhp->rdev.status_page->db_off) { in c4iw_post_send()
843 t4_ring_sq_db(&qhp->wq, idx, in c4iw_post_send()
844 is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe); in c4iw_post_send()
845 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
847 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
848 ring_kernel_sq_db(qhp, idx); in c4iw_post_send()
857 struct c4iw_qp *qhp; in c4iw_post_receive() local
864 qhp = to_c4iw_qp(ibqp); in c4iw_post_receive()
865 spin_lock_irqsave(&qhp->lock, flag); in c4iw_post_receive()
866 if (t4_wq_in_error(&qhp->wq)) { in c4iw_post_receive()
867 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
870 num_wrs = t4_rq_avail(&qhp->wq); in c4iw_post_receive()
872 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
881 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + in c4iw_post_receive()
882 qhp->wq.rq.wq_pidx * in c4iw_post_receive()
885 err = build_rdma_recv(qhp, wqe, wr, &len16); in c4iw_post_receive()
893 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; in c4iw_post_receive()
895 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts = in c4iw_post_receive()
897 qhp->rhp->rdev.lldi.ports[0]); in c4iw_post_receive()
899 &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts); in c4iw_post_receive()
904 wqe->recv.wrid = qhp->wq.rq.pidx; in c4iw_post_receive()
910 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); in c4iw_post_receive()
911 t4_rq_produce(&qhp->wq, len16); in c4iw_post_receive()
916 if (!qhp->rhp->rdev.status_page->db_off) { in c4iw_post_receive()
917 t4_ring_rq_db(&qhp->wq, idx, in c4iw_post_receive()
918 is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe); in c4iw_post_receive()
919 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
921 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
922 ring_kernel_rq_db(qhp, idx); in c4iw_post_receive()
1068 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, in post_terminate() argument
1075 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, in post_terminate()
1076 qhp->ep->hwtid); in post_terminate()
1081 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); in post_terminate()
1087 FW_WR_FLOWID_V(qhp->ep->hwtid) | in post_terminate()
1093 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { in post_terminate()
1094 term->layer_etype = qhp->attr.layer_etype; in post_terminate()
1095 term->ecode = qhp->attr.ecode; in post_terminate()
1098 c4iw_ofld_send(&qhp->rhp->rdev, skb); in post_terminate()
1104 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, in __flush_qp() argument
1111 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); in __flush_qp()
1115 spin_lock(&qhp->lock); in __flush_qp()
1117 if (qhp->wq.flushed) { in __flush_qp()
1118 spin_unlock(&qhp->lock); in __flush_qp()
1122 qhp->wq.flushed = 1; in __flush_qp()
1125 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); in __flush_qp()
1126 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); in __flush_qp()
1127 spin_unlock(&qhp->lock); in __flush_qp()
1132 spin_lock(&qhp->lock); in __flush_qp()
1135 sq_flushed = c4iw_flush_sq(qhp); in __flush_qp()
1136 spin_unlock(&qhp->lock); in __flush_qp()
1163 static void flush_qp(struct c4iw_qp *qhp) in flush_qp() argument
1168 rchp = to_c4iw_cq(qhp->ibqp.recv_cq); in flush_qp()
1169 schp = to_c4iw_cq(qhp->ibqp.send_cq); in flush_qp()
1171 t4_set_wq_in_error(&qhp->wq); in flush_qp()
1172 if (qhp->ibqp.uobject) { in flush_qp()
1186 __flush_qp(qhp, rchp, schp); in flush_qp()
1189 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, in rdma_fini() argument
1196 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, in rdma_fini()
1219 ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid, in rdma_fini()
1220 qhp->wq.sq.qid, __func__); in rdma_fini()
1251 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) in rdma_init() argument
1257 PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp, in rdma_init()
1258 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); in rdma_init()
1265 ret = alloc_ird(rhp, qhp->attr.max_ird); in rdma_init()
1267 qhp->attr.max_ird = 0; in rdma_init()
1271 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); in rdma_init()
1279 FW_WR_FLOWID_V(qhp->ep->hwtid) | in rdma_init()
1282 wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait; in rdma_init()
1286 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) | in rdma_init()
1287 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type); in rdma_init()
1289 if (qhp->attr.mpa_attr.recv_marker_enabled) in rdma_init()
1291 if (qhp->attr.mpa_attr.xmit_marker_enabled) in rdma_init()
1293 if (qhp->attr.mpa_attr.crc_enabled) in rdma_init()
1299 if (!qhp->ibqp.uobject) in rdma_init()
1302 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); in rdma_init()
1303 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd); in rdma_init()
1304 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); in rdma_init()
1305 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); in rdma_init()
1306 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); in rdma_init()
1307 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); in rdma_init()
1308 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq); in rdma_init()
1309 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord); in rdma_init()
1310 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird); in rdma_init()
1311 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq); in rdma_init()
1312 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq); in rdma_init()
1313 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); in rdma_init()
1314 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - in rdma_init()
1316 if (qhp->attr.mpa_attr.initiator) in rdma_init()
1317 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); in rdma_init()
1323 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait, in rdma_init()
1324 qhp->ep->hwtid, qhp->wq.sq.qid, __func__); in rdma_init()
1328 free_ird(rhp, qhp->attr.max_ird); in rdma_init()
1334 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, in c4iw_modify_qp() argument
1340 struct c4iw_qp_attributes newattr = qhp->attr; in c4iw_modify_qp()
1348 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, in c4iw_modify_qp()
1351 mutex_lock(&qhp->mutex); in c4iw_modify_qp()
1355 if (qhp->attr.state != C4IW_QP_STATE_IDLE) { in c4iw_modify_qp()
1379 qhp->attr = newattr; in c4iw_modify_qp()
1383 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc); in c4iw_modify_qp()
1387 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc); in c4iw_modify_qp()
1393 if (qhp->attr.state == attrs->next_state) in c4iw_modify_qp()
1396 switch (qhp->attr.state) { in c4iw_modify_qp()
1408 qhp->attr.mpa_attr = attrs->mpa_attr; in c4iw_modify_qp()
1409 qhp->attr.llp_stream_handle = attrs->llp_stream_handle; in c4iw_modify_qp()
1410 qhp->ep = qhp->attr.llp_stream_handle; in c4iw_modify_qp()
1411 set_state(qhp, C4IW_QP_STATE_RTS); in c4iw_modify_qp()
1419 c4iw_get_ep(&qhp->ep->com); in c4iw_modify_qp()
1420 ret = rdma_init(rhp, qhp); in c4iw_modify_qp()
1425 set_state(qhp, C4IW_QP_STATE_ERROR); in c4iw_modify_qp()
1426 flush_qp(qhp); in c4iw_modify_qp()
1436 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); in c4iw_modify_qp()
1437 t4_set_wq_in_error(&qhp->wq); in c4iw_modify_qp()
1438 set_state(qhp, C4IW_QP_STATE_CLOSING); in c4iw_modify_qp()
1439 ep = qhp->ep; in c4iw_modify_qp()
1443 c4iw_get_ep(&qhp->ep->com); in c4iw_modify_qp()
1445 ret = rdma_fini(rhp, qhp, ep); in c4iw_modify_qp()
1450 t4_set_wq_in_error(&qhp->wq); in c4iw_modify_qp()
1451 set_state(qhp, C4IW_QP_STATE_TERMINATE); in c4iw_modify_qp()
1452 qhp->attr.layer_etype = attrs->layer_etype; in c4iw_modify_qp()
1453 qhp->attr.ecode = attrs->ecode; in c4iw_modify_qp()
1454 ep = qhp->ep; in c4iw_modify_qp()
1456 c4iw_get_ep(&qhp->ep->com); in c4iw_modify_qp()
1460 terminate = qhp->attr.send_term; in c4iw_modify_qp()
1461 ret = rdma_fini(rhp, qhp, ep); in c4iw_modify_qp()
1467 t4_set_wq_in_error(&qhp->wq); in c4iw_modify_qp()
1468 set_state(qhp, C4IW_QP_STATE_ERROR); in c4iw_modify_qp()
1472 ep = qhp->ep; in c4iw_modify_qp()
1473 c4iw_get_ep(&qhp->ep->com); in c4iw_modify_qp()
1489 flush_qp(qhp); in c4iw_modify_qp()
1490 set_state(qhp, C4IW_QP_STATE_IDLE); in c4iw_modify_qp()
1491 qhp->attr.llp_stream_handle = NULL; in c4iw_modify_qp()
1492 c4iw_put_ep(&qhp->ep->com); in c4iw_modify_qp()
1493 qhp->ep = NULL; in c4iw_modify_qp()
1494 wake_up(&qhp->wait); in c4iw_modify_qp()
1508 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { in c4iw_modify_qp()
1512 set_state(qhp, C4IW_QP_STATE_IDLE); in c4iw_modify_qp()
1523 __func__, qhp->attr.state); in c4iw_modify_qp()
1530 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep, in c4iw_modify_qp()
1531 qhp->wq.sq.qid); in c4iw_modify_qp()
1534 qhp->attr.llp_stream_handle = NULL; in c4iw_modify_qp()
1536 ep = qhp->ep; in c4iw_modify_qp()
1537 qhp->ep = NULL; in c4iw_modify_qp()
1538 set_state(qhp, C4IW_QP_STATE_ERROR); in c4iw_modify_qp()
1542 flush_qp(qhp); in c4iw_modify_qp()
1543 wake_up(&qhp->wait); in c4iw_modify_qp()
1545 mutex_unlock(&qhp->mutex); in c4iw_modify_qp()
1548 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); in c4iw_modify_qp()
1567 PDBG("%s exit state %d\n", __func__, qhp->attr.state); in c4iw_modify_qp()
1574 struct c4iw_qp *qhp; in c4iw_destroy_qp() local
1578 qhp = to_c4iw_qp(ib_qp); in c4iw_destroy_qp()
1579 rhp = qhp->rhp; in c4iw_destroy_qp()
1582 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE) in c4iw_destroy_qp()
1583 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); in c4iw_destroy_qp()
1585 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); in c4iw_destroy_qp()
1586 wait_event(qhp->wait, !qhp->ep); in c4iw_destroy_qp()
1588 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); in c4iw_destroy_qp()
1589 atomic_dec(&qhp->refcnt); in c4iw_destroy_qp()
1590 wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); in c4iw_destroy_qp()
1593 if (!list_empty(&qhp->db_fc_entry)) in c4iw_destroy_qp()
1594 list_del_init(&qhp->db_fc_entry); in c4iw_destroy_qp()
1596 free_ird(rhp, qhp->attr.max_ird); in c4iw_destroy_qp()
1600 destroy_qp(&rhp->rdev, &qhp->wq, in c4iw_destroy_qp()
1603 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); in c4iw_destroy_qp()
1604 kfree(qhp); in c4iw_destroy_qp()
1612 struct c4iw_qp *qhp; in c4iw_create_qp() local
1651 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); in c4iw_create_qp()
1652 if (!qhp) in c4iw_create_qp()
1654 qhp->wq.sq.size = sqsize; in c4iw_create_qp()
1655 qhp->wq.sq.memsize = in c4iw_create_qp()
1657 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64); in c4iw_create_qp()
1658 qhp->wq.sq.flush_cidx = -1; in c4iw_create_qp()
1659 qhp->wq.rq.size = rqsize; in c4iw_create_qp()
1660 qhp->wq.rq.memsize = in c4iw_create_qp()
1662 sizeof(*qhp->wq.rq.queue); in c4iw_create_qp()
1665 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); in c4iw_create_qp()
1666 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE); in c4iw_create_qp()
1669 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, in c4iw_create_qp()
1678 qhp->rhp = rhp; in c4iw_create_qp()
1679 qhp->attr.pd = php->pdid; in c4iw_create_qp()
1680 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; in c4iw_create_qp()
1681 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid; in c4iw_create_qp()
1682 qhp->attr.sq_num_entries = attrs->cap.max_send_wr; in c4iw_create_qp()
1683 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; in c4iw_create_qp()
1684 qhp->attr.sq_max_sges = attrs->cap.max_send_sge; in c4iw_create_qp()
1685 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; in c4iw_create_qp()
1686 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; in c4iw_create_qp()
1687 qhp->attr.state = C4IW_QP_STATE_IDLE; in c4iw_create_qp()
1688 qhp->attr.next_state = C4IW_QP_STATE_IDLE; in c4iw_create_qp()
1689 qhp->attr.enable_rdma_read = 1; in c4iw_create_qp()
1690 qhp->attr.enable_rdma_write = 1; in c4iw_create_qp()
1691 qhp->attr.enable_bind = 1; in c4iw_create_qp()
1692 qhp->attr.max_ord = 0; in c4iw_create_qp()
1693 qhp->attr.max_ird = 0; in c4iw_create_qp()
1694 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; in c4iw_create_qp()
1695 spin_lock_init(&qhp->lock); in c4iw_create_qp()
1696 mutex_init(&qhp->mutex); in c4iw_create_qp()
1697 init_waitqueue_head(&qhp->wait); in c4iw_create_qp()
1698 atomic_set(&qhp->refcnt, 1); in c4iw_create_qp()
1700 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); in c4iw_create_qp()
1725 if (t4_sq_onchip(&qhp->wq.sq)) { in c4iw_create_qp()
1735 uresp.sqid = qhp->wq.sq.qid; in c4iw_create_qp()
1736 uresp.sq_size = qhp->wq.sq.size; in c4iw_create_qp()
1737 uresp.sq_memsize = qhp->wq.sq.memsize; in c4iw_create_qp()
1738 uresp.rqid = qhp->wq.rq.qid; in c4iw_create_qp()
1739 uresp.rq_size = qhp->wq.rq.size; in c4iw_create_qp()
1740 uresp.rq_memsize = qhp->wq.rq.memsize; in c4iw_create_qp()
1761 mm1->addr = qhp->wq.sq.phys_addr; in c4iw_create_qp()
1762 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); in c4iw_create_qp()
1765 mm2->addr = virt_to_phys(qhp->wq.rq.queue); in c4iw_create_qp()
1766 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); in c4iw_create_qp()
1769 mm3->addr = (__force unsigned long)qhp->wq.sq.udb; in c4iw_create_qp()
1773 mm4->addr = (__force unsigned long)qhp->wq.rq.udb; in c4iw_create_qp()
1784 qhp->ibqp.qp_num = qhp->wq.sq.qid; in c4iw_create_qp()
1785 init_timer(&(qhp->timer)); in c4iw_create_qp()
1786 INIT_LIST_HEAD(&qhp->db_fc_entry); in c4iw_create_qp()
1789 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, in c4iw_create_qp()
1790 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, in c4iw_create_qp()
1791 qhp->wq.rq.memsize, attrs->cap.max_recv_wr); in c4iw_create_qp()
1792 return &qhp->ibqp; in c4iw_create_qp()
1804 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); in c4iw_create_qp()
1806 destroy_qp(&rhp->rdev, &qhp->wq, in c4iw_create_qp()
1809 kfree(qhp); in c4iw_create_qp()
1817 struct c4iw_qp *qhp; in c4iw_ib_modify_qp() local
1832 qhp = to_c4iw_qp(ibqp); in c4iw_ib_modify_qp()
1833 rhp = qhp->rhp; in c4iw_ib_modify_qp()
1862 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); in c4iw_ib_modify_qp()
1874 struct c4iw_qp *qhp = to_c4iw_qp(ibqp); in c4iw_ib_query_qp() local
1878 attr->qp_state = to_ib_qp_state(qhp->attr.state); in c4iw_ib_query_qp()
1879 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; in c4iw_ib_query_qp()
1880 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; in c4iw_ib_query_qp()
1881 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; in c4iw_ib_query_qp()
1882 init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; in c4iw_ib_query_qp()
1884 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; in c4iw_ib_query_qp()