This source file includes following definitions.
- __iwch_poll_cq_one
- iwch_poll_cq_one
- iwch_poll_cq
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 #include "iwch_provider.h"
33 #include "iwch.h"
34
35 static int __iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
36 struct iwch_qp *qhp, struct ib_wc *wc)
37 {
38 struct t3_wq *wq = qhp ? &qhp->wq : NULL;
39 struct t3_cqe cqe;
40 u32 credit = 0;
41 u8 cqe_flushed;
42 u64 cookie;
43 int ret = 1;
44
45 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
46 &credit);
47 if (t3a_device(chp->rhp) && credit) {
48 pr_debug("%s updating %d cq credits on id %d\n", __func__,
49 credit, chp->cq.cqid);
50 cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
51 }
52
53 if (ret) {
54 ret = -EAGAIN;
55 goto out;
56 }
57 ret = 1;
58
59 wc->wr_id = cookie;
60 wc->qp = qhp ? &qhp->ibqp : NULL;
61 wc->vendor_err = CQE_STATUS(cqe);
62 wc->wc_flags = 0;
63
64 pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
65 __func__,
66 CQE_QPID(cqe), CQE_TYPE(cqe),
67 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
68 CQE_WRID_LOW(cqe), (unsigned long long)cookie);
69
70 if (CQE_TYPE(cqe) == 0) {
71 if (!CQE_STATUS(cqe))
72 wc->byte_len = CQE_LEN(cqe);
73 else
74 wc->byte_len = 0;
75 wc->opcode = IB_WC_RECV;
76 if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
77 CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
78 wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
79 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
80 }
81 } else {
82 switch (CQE_OPCODE(cqe)) {
83 case T3_RDMA_WRITE:
84 wc->opcode = IB_WC_RDMA_WRITE;
85 break;
86 case T3_READ_REQ:
87 wc->opcode = IB_WC_RDMA_READ;
88 wc->byte_len = CQE_LEN(cqe);
89 break;
90 case T3_SEND:
91 case T3_SEND_WITH_SE:
92 case T3_SEND_WITH_INV:
93 case T3_SEND_WITH_SE_INV:
94 wc->opcode = IB_WC_SEND;
95 break;
96 case T3_LOCAL_INV:
97 wc->opcode = IB_WC_LOCAL_INV;
98 break;
99 case T3_FAST_REGISTER:
100 wc->opcode = IB_WC_REG_MR;
101 break;
102 default:
103 pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
104 CQE_OPCODE(cqe), CQE_QPID(cqe));
105 ret = -EINVAL;
106 goto out;
107 }
108 }
109
110 if (cqe_flushed)
111 wc->status = IB_WC_WR_FLUSH_ERR;
112 else {
113
114 switch (CQE_STATUS(cqe)) {
115 case TPT_ERR_SUCCESS:
116 wc->status = IB_WC_SUCCESS;
117 break;
118 case TPT_ERR_STAG:
119 wc->status = IB_WC_LOC_ACCESS_ERR;
120 break;
121 case TPT_ERR_PDID:
122 wc->status = IB_WC_LOC_PROT_ERR;
123 break;
124 case TPT_ERR_QPID:
125 case TPT_ERR_ACCESS:
126 wc->status = IB_WC_LOC_ACCESS_ERR;
127 break;
128 case TPT_ERR_WRAP:
129 wc->status = IB_WC_GENERAL_ERR;
130 break;
131 case TPT_ERR_BOUND:
132 wc->status = IB_WC_LOC_LEN_ERR;
133 break;
134 case TPT_ERR_INVALIDATE_SHARED_MR:
135 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
136 wc->status = IB_WC_MW_BIND_ERR;
137 break;
138 case TPT_ERR_CRC:
139 case TPT_ERR_MARKER:
140 case TPT_ERR_PDU_LEN_ERR:
141 case TPT_ERR_OUT_OF_RQE:
142 case TPT_ERR_DDP_VERSION:
143 case TPT_ERR_RDMA_VERSION:
144 case TPT_ERR_DDP_QUEUE_NUM:
145 case TPT_ERR_MSN:
146 case TPT_ERR_TBIT:
147 case TPT_ERR_MO:
148 case TPT_ERR_MSN_RANGE:
149 case TPT_ERR_IRD_OVERFLOW:
150 case TPT_ERR_OPCODE:
151 wc->status = IB_WC_FATAL_ERR;
152 break;
153 case TPT_ERR_SWFLUSH:
154 wc->status = IB_WC_WR_FLUSH_ERR;
155 break;
156 default:
157 pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
158 CQE_STATUS(cqe), CQE_QPID(cqe));
159 ret = -EINVAL;
160 }
161 }
162 out:
163 return ret;
164 }
165
166
167
168
169
170
171
172
173
174
175 static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
176 struct ib_wc *wc)
177 {
178 struct iwch_qp *qhp;
179 struct t3_cqe *rd_cqe;
180 int ret;
181
182 rd_cqe = cxio_next_cqe(&chp->cq);
183
184 if (!rd_cqe)
185 return 0;
186
187 qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
188 if (qhp) {
189 spin_lock(&qhp->lock);
190 ret = __iwch_poll_cq_one(rhp, chp, qhp, wc);
191 spin_unlock(&qhp->lock);
192 } else {
193 ret = __iwch_poll_cq_one(rhp, chp, NULL, wc);
194 }
195 return ret;
196 }
197
198 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
199 {
200 struct iwch_dev *rhp;
201 struct iwch_cq *chp;
202 unsigned long flags;
203 int npolled;
204 int err = 0;
205
206 chp = to_iwch_cq(ibcq);
207 rhp = chp->rhp;
208
209 spin_lock_irqsave(&chp->lock, flags);
210 for (npolled = 0; npolled < num_entries; ++npolled) {
211
212
213
214
215
216
217 do {
218 err = iwch_poll_cq_one(rhp, chp, wc + npolled);
219 } while (err == -EAGAIN);
220 if (err <= 0)
221 break;
222 }
223 spin_unlock_irqrestore(&chp->lock, flags);
224
225 if (err < 0)
226 return err;
227 else {
228 return npolled;
229 }
230 }