This source file includes following definitions.
- set_data_seg
- set_raddr_seg
- hns_roce_v1_post_send
- hns_roce_v1_post_recv
- hns_roce_set_db_event_mode
- hns_roce_set_db_ext_mode
- hns_roce_set_sdb
- hns_roce_set_odb
- hns_roce_set_sdb_ext
- hns_roce_set_odb_ext
- hns_roce_db_ext_init
- hns_roce_v1_create_lp_qp
- hns_roce_v1_rsv_lp_qp
- hns_roce_v1_release_lp_qp
- hns_roce_db_init
- hns_roce_v1_recreate_lp_qp_work_fn
- hns_roce_v1_recreate_lp_qp
- hns_roce_v1_send_lp_wqe
- hns_roce_v1_mr_free_work_fn
- hns_roce_v1_dereg_mr
- hns_roce_db_free
- hns_roce_raq_init
- hns_roce_raq_free
- hns_roce_port_enable
- hns_roce_bt_init
- hns_roce_bt_free
- hns_roce_tptr_init
- hns_roce_tptr_free
- hns_roce_free_mr_init
- hns_roce_free_mr_free
- hns_roce_v1_reset
- hns_roce_v1_profile
- hns_roce_v1_init
- hns_roce_v1_exit
- hns_roce_v1_cmd_pending
- hns_roce_v1_post_mbox
- hns_roce_v1_chk_mbox
- hns_roce_v1_set_gid
- hns_roce_v1_set_mac
- hns_roce_v1_set_mtu
- hns_roce_v1_write_mtpt
- get_cqe
- get_sw_cqe
- next_cqe_sw
- hns_roce_v1_cq_set_ci
- __hns_roce_v1_cq_clean
- hns_roce_v1_cq_clean
- hns_roce_v1_write_cqc
- hns_roce_v1_modify_cq
- hns_roce_v1_req_notify_cq
- hns_roce_v1_poll_one
- hns_roce_v1_poll_cq
- hns_roce_v1_clear_hem
- hns_roce_v1_qp_modify
- hns_roce_v1_m_sqp
- hns_roce_v1_m_qp
- hns_roce_v1_modify_qp
- to_ib_qp_state
- hns_roce_v1_query_qpc
- hns_roce_v1_q_sqp
- hns_roce_v1_q_qp
- hns_roce_v1_query_qp
- hns_roce_v1_destroy_qp
- hns_roce_v1_destroy_cq
- set_eq_cons_index_v1
- hns_roce_v1_wq_catas_err_handle
- hns_roce_v1_local_wq_access_err_handle
- hns_roce_v1_qp_err_handle
- hns_roce_v1_cq_err_handle
- hns_roce_v1_db_overflow_handle
- get_aeqe_v1
- next_aeqe_sw_v1
- hns_roce_v1_aeq_int
- get_ceqe_v1
- next_ceqe_sw_v1
- hns_roce_v1_ceq_int
- hns_roce_v1_msix_interrupt_eq
- hns_roce_v1_msix_interrupt_abn
- hns_roce_v1_int_mask_enable
- hns_roce_v1_free_eq
- hns_roce_v1_enable_eq
- hns_roce_v1_create_eq
- hns_roce_v1_init_eq_table
- hns_roce_v1_cleanup_eq_table
- hns_roce_find_pdev
- hns_roce_get_cfg
- hns_roce_probe
- hns_roce_remove
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/platform_device.h>
34 #include <linux/acpi.h>
35 #include <linux/etherdevice.h>
36 #include <linux/interrupt.h>
37 #include <linux/of.h>
38 #include <linux/of_platform.h>
39 #include <rdma/ib_umem.h>
40 #include "hns_roce_common.h"
41 #include "hns_roce_device.h"
42 #include "hns_roce_cmd.h"
43 #include "hns_roce_hem.h"
44 #include "hns_roce_hw_v1.h"
45
46 static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
47 {
48 dseg->lkey = cpu_to_le32(sg->lkey);
49 dseg->addr = cpu_to_le64(sg->addr);
50 dseg->len = cpu_to_le32(sg->length);
51 }
52
53 static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
54 u32 rkey)
55 {
56 rseg->raddr = cpu_to_le64(remote_addr);
57 rseg->rkey = cpu_to_le32(rkey);
58 rseg->len = 0;
59 }
60
61 static int hns_roce_v1_post_send(struct ib_qp *ibqp,
62 const struct ib_send_wr *wr,
63 const struct ib_send_wr **bad_wr)
64 {
65 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
66 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
67 struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
68 struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
69 struct hns_roce_wqe_data_seg *dseg = NULL;
70 struct hns_roce_qp *qp = to_hr_qp(ibqp);
71 struct device *dev = &hr_dev->pdev->dev;
72 struct hns_roce_sq_db sq_db;
73 int ps_opcode = 0, i = 0;
74 unsigned long flags = 0;
75 void *wqe = NULL;
76 __le32 doorbell[2];
77 u32 wqe_idx = 0;
78 int nreq = 0;
79 int ret = 0;
80 u8 *smac;
81 int loopback;
82
83 if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
84 ibqp->qp_type != IB_QPT_RC)) {
85 dev_err(dev, "un-supported QP type\n");
86 *bad_wr = NULL;
87 return -EOPNOTSUPP;
88 }
89
90 spin_lock_irqsave(&qp->sq.lock, flags);
91
92 for (nreq = 0; wr; ++nreq, wr = wr->next) {
93 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
94 ret = -ENOMEM;
95 *bad_wr = wr;
96 goto out;
97 }
98
99 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
100
101 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
102 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
103 wr->num_sge, qp->sq.max_gs);
104 ret = -EINVAL;
105 *bad_wr = wr;
106 goto out;
107 }
108
109 wqe = get_send_wqe(qp, wqe_idx);
110 qp->sq.wrid[wqe_idx] = wr->wr_id;
111
112
113 if (ibqp->qp_type == IB_QPT_GSI) {
114 ud_sq_wqe = wqe;
115 roce_set_field(ud_sq_wqe->dmac_h,
116 UD_SEND_WQE_U32_4_DMAC_0_M,
117 UD_SEND_WQE_U32_4_DMAC_0_S,
118 ah->av.mac[0]);
119 roce_set_field(ud_sq_wqe->dmac_h,
120 UD_SEND_WQE_U32_4_DMAC_1_M,
121 UD_SEND_WQE_U32_4_DMAC_1_S,
122 ah->av.mac[1]);
123 roce_set_field(ud_sq_wqe->dmac_h,
124 UD_SEND_WQE_U32_4_DMAC_2_M,
125 UD_SEND_WQE_U32_4_DMAC_2_S,
126 ah->av.mac[2]);
127 roce_set_field(ud_sq_wqe->dmac_h,
128 UD_SEND_WQE_U32_4_DMAC_3_M,
129 UD_SEND_WQE_U32_4_DMAC_3_S,
130 ah->av.mac[3]);
131
132 roce_set_field(ud_sq_wqe->u32_8,
133 UD_SEND_WQE_U32_8_DMAC_4_M,
134 UD_SEND_WQE_U32_8_DMAC_4_S,
135 ah->av.mac[4]);
136 roce_set_field(ud_sq_wqe->u32_8,
137 UD_SEND_WQE_U32_8_DMAC_5_M,
138 UD_SEND_WQE_U32_8_DMAC_5_S,
139 ah->av.mac[5]);
140
141 smac = (u8 *)hr_dev->dev_addr[qp->port];
142 loopback = ether_addr_equal_unaligned(ah->av.mac,
143 smac) ? 1 : 0;
144 roce_set_bit(ud_sq_wqe->u32_8,
145 UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
146 loopback);
147
148 roce_set_field(ud_sq_wqe->u32_8,
149 UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
150 UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
151 HNS_ROCE_WQE_OPCODE_SEND);
152 roce_set_field(ud_sq_wqe->u32_8,
153 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
154 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
155 2);
156 roce_set_bit(ud_sq_wqe->u32_8,
157 UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
158 1);
159
160 ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
161 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
162 (wr->send_flags & IB_SEND_SOLICITED ?
163 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
164 ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
165 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
166
167 roce_set_field(ud_sq_wqe->u32_16,
168 UD_SEND_WQE_U32_16_DEST_QP_M,
169 UD_SEND_WQE_U32_16_DEST_QP_S,
170 ud_wr(wr)->remote_qpn);
171 roce_set_field(ud_sq_wqe->u32_16,
172 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
173 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
174 ah->av.stat_rate);
175
176 roce_set_field(ud_sq_wqe->u32_36,
177 UD_SEND_WQE_U32_36_FLOW_LABEL_M,
178 UD_SEND_WQE_U32_36_FLOW_LABEL_S,
179 ah->av.flowlabel);
180 roce_set_field(ud_sq_wqe->u32_36,
181 UD_SEND_WQE_U32_36_PRIORITY_M,
182 UD_SEND_WQE_U32_36_PRIORITY_S,
183 ah->av.sl);
184 roce_set_field(ud_sq_wqe->u32_36,
185 UD_SEND_WQE_U32_36_SGID_INDEX_M,
186 UD_SEND_WQE_U32_36_SGID_INDEX_S,
187 hns_get_gid_index(hr_dev, qp->phy_port,
188 ah->av.gid_index));
189
190 roce_set_field(ud_sq_wqe->u32_40,
191 UD_SEND_WQE_U32_40_HOP_LIMIT_M,
192 UD_SEND_WQE_U32_40_HOP_LIMIT_S,
193 ah->av.hop_limit);
194 roce_set_field(ud_sq_wqe->u32_40,
195 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
196 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
197 ah->av.tclass);
198
199 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
200
201 ud_sq_wqe->va0_l =
202 cpu_to_le32((u32)wr->sg_list[0].addr);
203 ud_sq_wqe->va0_h =
204 cpu_to_le32((wr->sg_list[0].addr) >> 32);
205 ud_sq_wqe->l_key0 =
206 cpu_to_le32(wr->sg_list[0].lkey);
207
208 ud_sq_wqe->va1_l =
209 cpu_to_le32((u32)wr->sg_list[1].addr);
210 ud_sq_wqe->va1_h =
211 cpu_to_le32((wr->sg_list[1].addr) >> 32);
212 ud_sq_wqe->l_key1 =
213 cpu_to_le32(wr->sg_list[1].lkey);
214 } else if (ibqp->qp_type == IB_QPT_RC) {
215 u32 tmp_len = 0;
216
217 ctrl = wqe;
218 memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
219 for (i = 0; i < wr->num_sge; i++)
220 tmp_len += wr->sg_list[i].length;
221
222 ctrl->msg_length =
223 cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
224
225 ctrl->sgl_pa_h = 0;
226 ctrl->flag = 0;
227
228 switch (wr->opcode) {
229 case IB_WR_SEND_WITH_IMM:
230 case IB_WR_RDMA_WRITE_WITH_IMM:
231 ctrl->imm_data = wr->ex.imm_data;
232 break;
233 case IB_WR_SEND_WITH_INV:
234 ctrl->inv_key =
235 cpu_to_le32(wr->ex.invalidate_rkey);
236 break;
237 default:
238 ctrl->imm_data = 0;
239 break;
240 }
241
242
243
244 ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
245 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
246 (wr->send_flags & IB_SEND_SOLICITED ?
247 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
248 ((wr->opcode == IB_WR_SEND_WITH_IMM ||
249 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
250 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
251 (wr->send_flags & IB_SEND_FENCE ?
252 (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
253
254 wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
255
256 switch (wr->opcode) {
257 case IB_WR_RDMA_READ:
258 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
259 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
260 rdma_wr(wr)->rkey);
261 break;
262 case IB_WR_RDMA_WRITE:
263 case IB_WR_RDMA_WRITE_WITH_IMM:
264 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
265 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
266 rdma_wr(wr)->rkey);
267 break;
268 case IB_WR_SEND:
269 case IB_WR_SEND_WITH_INV:
270 case IB_WR_SEND_WITH_IMM:
271 ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
272 break;
273 case IB_WR_LOCAL_INV:
274 break;
275 case IB_WR_ATOMIC_CMP_AND_SWP:
276 case IB_WR_ATOMIC_FETCH_AND_ADD:
277 case IB_WR_LSO:
278 default:
279 ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
280 break;
281 }
282 ctrl->flag |= cpu_to_le32(ps_opcode);
283 wqe += sizeof(struct hns_roce_wqe_raddr_seg);
284
285 dseg = wqe;
286 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
287 if (le32_to_cpu(ctrl->msg_length) >
288 hr_dev->caps.max_sq_inline) {
289 ret = -EINVAL;
290 *bad_wr = wr;
291 dev_err(dev, "inline len(1-%d)=%d, illegal",
292 ctrl->msg_length,
293 hr_dev->caps.max_sq_inline);
294 goto out;
295 }
296 for (i = 0; i < wr->num_sge; i++) {
297 memcpy(wqe, ((void *) (uintptr_t)
298 wr->sg_list[i].addr),
299 wr->sg_list[i].length);
300 wqe += wr->sg_list[i].length;
301 }
302 ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
303 } else {
304
305 for (i = 0; i < wr->num_sge; i++)
306 set_data_seg(dseg + i, wr->sg_list + i);
307
308 ctrl->flag |= cpu_to_le32(wr->num_sge <<
309 HNS_ROCE_WQE_SGE_NUM_BIT);
310 }
311 }
312 }
313
314 out:
315
316 if (likely(nreq)) {
317 qp->sq.head += nreq;
318
319 wmb();
320
321 sq_db.u32_4 = 0;
322 sq_db.u32_8 = 0;
323 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
324 SQ_DOORBELL_U32_4_SQ_HEAD_S,
325 (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
326 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
327 SQ_DOORBELL_U32_4_SL_S, qp->sl);
328 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
329 SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
330 roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
331 SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
332 roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
333
334 doorbell[0] = sq_db.u32_4;
335 doorbell[1] = sq_db.u32_8;
336
337 hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
338 }
339
340 spin_unlock_irqrestore(&qp->sq.lock, flags);
341
342 return ret;
343 }
344
345 static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
346 const struct ib_recv_wr *wr,
347 const struct ib_recv_wr **bad_wr)
348 {
349 struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
350 struct hns_roce_wqe_data_seg *scat = NULL;
351 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
352 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
353 struct device *dev = &hr_dev->pdev->dev;
354 struct hns_roce_rq_db rq_db;
355 __le32 doorbell[2] = {0};
356 unsigned long flags = 0;
357 unsigned int wqe_idx;
358 int ret = 0;
359 int nreq = 0;
360 int i = 0;
361 u32 reg_val;
362
363 spin_lock_irqsave(&hr_qp->rq.lock, flags);
364
365 for (nreq = 0; wr; ++nreq, wr = wr->next) {
366 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
367 hr_qp->ibqp.recv_cq)) {
368 ret = -ENOMEM;
369 *bad_wr = wr;
370 goto out;
371 }
372
373 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
374
375 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
376 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
377 wr->num_sge, hr_qp->rq.max_gs);
378 ret = -EINVAL;
379 *bad_wr = wr;
380 goto out;
381 }
382
383 ctrl = get_recv_wqe(hr_qp, wqe_idx);
384
385 roce_set_field(ctrl->rwqe_byte_12,
386 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
387 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
388 wr->num_sge);
389
390 scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
391
392 for (i = 0; i < wr->num_sge; i++)
393 set_data_seg(scat + i, wr->sg_list + i);
394
395 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
396 }
397
398 out:
399 if (likely(nreq)) {
400 hr_qp->rq.head += nreq;
401
402 wmb();
403
404 if (ibqp->qp_type == IB_QPT_GSI) {
405 __le32 tmp;
406
407
408 reg_val = roce_read(to_hr_dev(ibqp->device),
409 ROCEE_QP1C_CFG3_0_REG +
410 QP1C_CFGN_OFFSET * hr_qp->phy_port);
411 tmp = cpu_to_le32(reg_val);
412 roce_set_field(tmp,
413 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
414 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
415 hr_qp->rq.head);
416 reg_val = le32_to_cpu(tmp);
417 roce_write(to_hr_dev(ibqp->device),
418 ROCEE_QP1C_CFG3_0_REG +
419 QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
420 } else {
421 rq_db.u32_4 = 0;
422 rq_db.u32_8 = 0;
423
424 roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
425 RQ_DOORBELL_U32_4_RQ_HEAD_S,
426 hr_qp->rq.head);
427 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
428 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
429 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
430 RQ_DOORBELL_U32_8_CMD_S, 1);
431 roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
432 1);
433
434 doorbell[0] = rq_db.u32_4;
435 doorbell[1] = rq_db.u32_8;
436
437 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
438 }
439 }
440 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
441
442 return ret;
443 }
444
445 static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
446 int sdb_mode, int odb_mode)
447 {
448 __le32 tmp;
449 u32 val;
450
451 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
452 tmp = cpu_to_le32(val);
453 roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
454 roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
455 val = le32_to_cpu(tmp);
456 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
457 }
458
459 static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
460 u32 odb_mode)
461 {
462 __le32 tmp;
463 u32 val;
464
465
466 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
467 tmp = cpu_to_le32(val);
468 roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
469 roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
470 val = le32_to_cpu(tmp);
471 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
472 }
473
474 static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
475 u32 sdb_alful)
476 {
477 __le32 tmp;
478 u32 val;
479
480
481 val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
482 tmp = cpu_to_le32(val);
483 roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
484 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
485 roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
486 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
487 val = le32_to_cpu(tmp);
488 roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
489 }
490
491 static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
492 u32 odb_alful)
493 {
494 __le32 tmp;
495 u32 val;
496
497
498 val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
499 tmp = cpu_to_le32(val);
500 roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
501 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
502 roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
503 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
504 val = le32_to_cpu(tmp);
505 roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
506 }
507
508 static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
509 u32 ext_sdb_alful)
510 {
511 struct device *dev = &hr_dev->pdev->dev;
512 struct hns_roce_v1_priv *priv;
513 struct hns_roce_db_table *db;
514 dma_addr_t sdb_dma_addr;
515 __le32 tmp;
516 u32 val;
517
518 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
519 db = &priv->db_table;
520
521
522 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
523 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
524
525
526 sdb_dma_addr = db->ext_db->sdb_buf_list->map;
527 roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
528
529
530 val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
531 tmp = cpu_to_le32(val);
532 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
533 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
534 db->ext_db->esdb_dep);
535
536
537
538
539
540 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
541 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
542 val = le32_to_cpu(tmp);
543 roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
544
545 dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
546 dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
547 ext_sdb_alept, ext_sdb_alful);
548 }
549
550 static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
551 u32 ext_odb_alful)
552 {
553 struct device *dev = &hr_dev->pdev->dev;
554 struct hns_roce_v1_priv *priv;
555 struct hns_roce_db_table *db;
556 dma_addr_t odb_dma_addr;
557 __le32 tmp;
558 u32 val;
559
560 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
561 db = &priv->db_table;
562
563
564 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
565 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
566
567
568 odb_dma_addr = db->ext_db->odb_buf_list->map;
569 roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
570
571
572 val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
573 tmp = cpu_to_le32(val);
574 roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
575 ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
576 db->ext_db->eodb_dep);
577 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
578 ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
579 db->ext_db->eodb_dep);
580 val = le32_to_cpu(tmp);
581 roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
582
583 dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
584 dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
585 ext_odb_alept, ext_odb_alful);
586 }
587
588 static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
589 u32 odb_ext_mod)
590 {
591 struct device *dev = &hr_dev->pdev->dev;
592 struct hns_roce_v1_priv *priv;
593 struct hns_roce_db_table *db;
594 dma_addr_t sdb_dma_addr;
595 dma_addr_t odb_dma_addr;
596 int ret = 0;
597
598 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
599 db = &priv->db_table;
600
601 db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
602 if (!db->ext_db)
603 return -ENOMEM;
604
605 if (sdb_ext_mod) {
606 db->ext_db->sdb_buf_list = kmalloc(
607 sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
608 if (!db->ext_db->sdb_buf_list) {
609 ret = -ENOMEM;
610 goto ext_sdb_buf_fail_out;
611 }
612
613 db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
614 HNS_ROCE_V1_EXT_SDB_SIZE,
615 &sdb_dma_addr, GFP_KERNEL);
616 if (!db->ext_db->sdb_buf_list->buf) {
617 ret = -ENOMEM;
618 goto alloc_sq_db_buf_fail;
619 }
620 db->ext_db->sdb_buf_list->map = sdb_dma_addr;
621
622 db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
623 hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
624 HNS_ROCE_V1_EXT_SDB_ALFUL);
625 } else
626 hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
627 HNS_ROCE_V1_SDB_ALFUL);
628
629 if (odb_ext_mod) {
630 db->ext_db->odb_buf_list = kmalloc(
631 sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
632 if (!db->ext_db->odb_buf_list) {
633 ret = -ENOMEM;
634 goto ext_odb_buf_fail_out;
635 }
636
637 db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
638 HNS_ROCE_V1_EXT_ODB_SIZE,
639 &odb_dma_addr, GFP_KERNEL);
640 if (!db->ext_db->odb_buf_list->buf) {
641 ret = -ENOMEM;
642 goto alloc_otr_db_buf_fail;
643 }
644 db->ext_db->odb_buf_list->map = odb_dma_addr;
645
646 db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
647 hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
648 HNS_ROCE_V1_EXT_ODB_ALFUL);
649 } else
650 hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
651 HNS_ROCE_V1_ODB_ALFUL);
652
653 hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
654
655 return 0;
656
657 alloc_otr_db_buf_fail:
658 kfree(db->ext_db->odb_buf_list);
659
660 ext_odb_buf_fail_out:
661 if (sdb_ext_mod) {
662 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
663 db->ext_db->sdb_buf_list->buf,
664 db->ext_db->sdb_buf_list->map);
665 }
666
667 alloc_sq_db_buf_fail:
668 if (sdb_ext_mod)
669 kfree(db->ext_db->sdb_buf_list);
670
671 ext_sdb_buf_fail_out:
672 kfree(db->ext_db);
673 return ret;
674 }
675
676 static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
677 struct ib_pd *pd)
678 {
679 struct device *dev = &hr_dev->pdev->dev;
680 struct ib_qp_init_attr init_attr;
681 struct ib_qp *qp;
682
683 memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
684 init_attr.qp_type = IB_QPT_RC;
685 init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
686 init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
687 init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
688
689 qp = hns_roce_create_qp(pd, &init_attr, NULL);
690 if (IS_ERR(qp)) {
691 dev_err(dev, "Create loop qp for mr free failed!");
692 return NULL;
693 }
694
695 return to_hr_qp(qp);
696 }
697
698 static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
699 {
700 struct hns_roce_caps *caps = &hr_dev->caps;
701 struct device *dev = &hr_dev->pdev->dev;
702 struct ib_cq_init_attr cq_init_attr;
703 struct hns_roce_free_mr *free_mr;
704 struct ib_qp_attr attr = { 0 };
705 struct hns_roce_v1_priv *priv;
706 struct hns_roce_qp *hr_qp;
707 struct ib_device *ibdev;
708 struct ib_cq *cq;
709 struct ib_pd *pd;
710 union ib_gid dgid;
711 __be64 subnet_prefix;
712 int attr_mask = 0;
713 int ret;
714 int i, j;
715 u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
716 u8 phy_port;
717 u8 port = 0;
718 u8 sl;
719
720 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
721 free_mr = &priv->free_mr;
722
723
724 cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
725 cq_init_attr.comp_vector = 0;
726
727 ibdev = &hr_dev->ib_dev;
728 cq = rdma_zalloc_drv_obj(ibdev, ib_cq);
729 if (!cq)
730 return -ENOMEM;
731
732 ret = hns_roce_ib_create_cq(cq, &cq_init_attr, NULL);
733 if (ret) {
734 dev_err(dev, "Create cq for reserved loop qp failed!");
735 goto alloc_cq_failed;
736 }
737 free_mr->mr_free_cq = to_hr_cq(cq);
738 free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
739 free_mr->mr_free_cq->ib_cq.uobject = NULL;
740 free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
741 free_mr->mr_free_cq->ib_cq.event_handler = NULL;
742 free_mr->mr_free_cq->ib_cq.cq_context = NULL;
743 atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
744
745 pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
746 if (!pd) {
747 ret = -ENOMEM;
748 goto alloc_mem_failed;
749 }
750
751 pd->device = ibdev;
752 ret = hns_roce_alloc_pd(pd, NULL);
753 if (ret)
754 goto alloc_pd_failed;
755
756 free_mr->mr_free_pd = to_hr_pd(pd);
757 free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
758 free_mr->mr_free_pd->ibpd.uobject = NULL;
759 free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
760 atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
761
762 attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
763 attr.pkey_index = 0;
764 attr.min_rnr_timer = 0;
765
766 attr.max_dest_rd_atomic = 0;
767 attr.max_rd_atomic = 0;
768
769 attr.rq_psn = 0x0808;
770 attr.sq_psn = 0x0808;
771 attr.retry_cnt = 7;
772 attr.rnr_retry = 7;
773 attr.timeout = 0x12;
774 attr.path_mtu = IB_MTU_256;
775 attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
776 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
777 rdma_ah_set_static_rate(&attr.ah_attr, 3);
778
779 subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
780 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
781 phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
782 (i % HNS_ROCE_MAX_PORTS);
783 sl = i / HNS_ROCE_MAX_PORTS;
784
785 for (j = 0; j < caps->num_ports; j++) {
786 if (hr_dev->iboe.phy_port[j] == phy_port) {
787 queue_en[i] = 1;
788 port = j;
789 break;
790 }
791 }
792
793 if (!queue_en[i])
794 continue;
795
796 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
797 if (!free_mr->mr_free_qp[i]) {
798 dev_err(dev, "Create loop qp failed!\n");
799 ret = -ENOMEM;
800 goto create_lp_qp_failed;
801 }
802 hr_qp = free_mr->mr_free_qp[i];
803
804 hr_qp->port = port;
805 hr_qp->phy_port = phy_port;
806 hr_qp->ibqp.qp_type = IB_QPT_RC;
807 hr_qp->ibqp.device = &hr_dev->ib_dev;
808 hr_qp->ibqp.uobject = NULL;
809 atomic_set(&hr_qp->ibqp.usecnt, 0);
810 hr_qp->ibqp.pd = pd;
811 hr_qp->ibqp.recv_cq = cq;
812 hr_qp->ibqp.send_cq = cq;
813
814 rdma_ah_set_port_num(&attr.ah_attr, port + 1);
815 rdma_ah_set_sl(&attr.ah_attr, sl);
816 attr.port_num = port + 1;
817
818 attr.dest_qp_num = hr_qp->qpn;
819 memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
820 hr_dev->dev_addr[port],
821 ETH_ALEN);
822
823 memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
824 memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
825 memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
826 dgid.raw[11] = 0xff;
827 dgid.raw[12] = 0xfe;
828 dgid.raw[8] ^= 2;
829 rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
830
831 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
832 IB_QPS_RESET, IB_QPS_INIT);
833 if (ret) {
834 dev_err(dev, "modify qp failed(%d)!\n", ret);
835 goto create_lp_qp_failed;
836 }
837
838 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
839 IB_QPS_INIT, IB_QPS_RTR);
840 if (ret) {
841 dev_err(dev, "modify qp failed(%d)!\n", ret);
842 goto create_lp_qp_failed;
843 }
844
845 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
846 IB_QPS_RTR, IB_QPS_RTS);
847 if (ret) {
848 dev_err(dev, "modify qp failed(%d)!\n", ret);
849 goto create_lp_qp_failed;
850 }
851 }
852
853 return 0;
854
855 create_lp_qp_failed:
856 for (i -= 1; i >= 0; i--) {
857 hr_qp = free_mr->mr_free_qp[i];
858 if (hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL))
859 dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
860 }
861
862 hns_roce_dealloc_pd(pd, NULL);
863
864 alloc_pd_failed:
865 kfree(pd);
866
867 alloc_mem_failed:
868 hns_roce_ib_destroy_cq(cq, NULL);
869 alloc_cq_failed:
870 kfree(cq);
871 return ret;
872 }
873
874 static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
875 {
876 struct device *dev = &hr_dev->pdev->dev;
877 struct hns_roce_free_mr *free_mr;
878 struct hns_roce_v1_priv *priv;
879 struct hns_roce_qp *hr_qp;
880 int ret;
881 int i;
882
883 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
884 free_mr = &priv->free_mr;
885
886 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
887 hr_qp = free_mr->mr_free_qp[i];
888 if (!hr_qp)
889 continue;
890
891 ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL);
892 if (ret)
893 dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
894 i, ret);
895 }
896
897 hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
898 kfree(&free_mr->mr_free_cq->ib_cq);
899 hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
900 kfree(&free_mr->mr_free_pd->ibpd);
901 }
902
903 static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
904 {
905 struct device *dev = &hr_dev->pdev->dev;
906 struct hns_roce_v1_priv *priv;
907 struct hns_roce_db_table *db;
908 u32 sdb_ext_mod;
909 u32 odb_ext_mod;
910 u32 sdb_evt_mod;
911 u32 odb_evt_mod;
912 int ret = 0;
913
914 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
915 db = &priv->db_table;
916
917 memset(db, 0, sizeof(*db));
918
919
920 sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
921 odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
922 sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
923 odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
924
925 db->sdb_ext_mod = sdb_ext_mod;
926 db->odb_ext_mod = odb_ext_mod;
927
928
929 ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
930 if (ret) {
931 dev_err(dev, "Failed in extend DB configuration.\n");
932 return ret;
933 }
934
935 hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
936
937 return 0;
938 }
939
940 static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
941 {
942 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
943 struct hns_roce_dev *hr_dev;
944
945 lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
946 work);
947 hr_dev = to_hr_dev(lp_qp_work->ib_dev);
948
949 hns_roce_v1_release_lp_qp(hr_dev);
950
951 if (hns_roce_v1_rsv_lp_qp(hr_dev))
952 dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
953
954 if (lp_qp_work->comp_flag)
955 complete(lp_qp_work->comp);
956
957 kfree(lp_qp_work);
958 }
959
960 static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
961 {
962 struct device *dev = &hr_dev->pdev->dev;
963 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
964 struct hns_roce_free_mr *free_mr;
965 struct hns_roce_v1_priv *priv;
966 struct completion comp;
967 long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
968
969 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
970 free_mr = &priv->free_mr;
971
972 lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
973 GFP_KERNEL);
974 if (!lp_qp_work)
975 return -ENOMEM;
976
977 INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
978
979 lp_qp_work->ib_dev = &(hr_dev->ib_dev);
980 lp_qp_work->comp = ∁
981 lp_qp_work->comp_flag = 1;
982
983 init_completion(lp_qp_work->comp);
984
985 queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
986
987 while (end > 0) {
988 if (try_wait_for_completion(&comp))
989 return 0;
990 msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
991 end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE;
992 }
993
994 lp_qp_work->comp_flag = 0;
995 if (try_wait_for_completion(&comp))
996 return 0;
997
998 dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
999 return -ETIMEDOUT;
1000 }
1001
1002 static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
1003 {
1004 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
1005 struct device *dev = &hr_dev->pdev->dev;
1006 struct ib_send_wr send_wr;
1007 const struct ib_send_wr *bad_wr;
1008 int ret;
1009
1010 memset(&send_wr, 0, sizeof(send_wr));
1011 send_wr.next = NULL;
1012 send_wr.num_sge = 0;
1013 send_wr.send_flags = 0;
1014 send_wr.sg_list = NULL;
1015 send_wr.wr_id = (unsigned long long)&send_wr;
1016 send_wr.opcode = IB_WR_RDMA_WRITE;
1017
1018 ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
1019 if (ret) {
1020 dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
1021 return ret;
1022 }
1023
1024 return 0;
1025 }
1026
1027 static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
1028 {
1029 struct hns_roce_mr_free_work *mr_work;
1030 struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
1031 struct hns_roce_free_mr *free_mr;
1032 struct hns_roce_cq *mr_free_cq;
1033 struct hns_roce_v1_priv *priv;
1034 struct hns_roce_dev *hr_dev;
1035 struct hns_roce_mr *hr_mr;
1036 struct hns_roce_qp *hr_qp;
1037 struct device *dev;
1038 unsigned long end =
1039 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
1040 int i;
1041 int ret;
1042 int ne = 0;
1043
1044 mr_work = container_of(work, struct hns_roce_mr_free_work, work);
1045 hr_mr = (struct hns_roce_mr *)mr_work->mr;
1046 hr_dev = to_hr_dev(mr_work->ib_dev);
1047 dev = &hr_dev->pdev->dev;
1048
1049 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1050 free_mr = &priv->free_mr;
1051 mr_free_cq = free_mr->mr_free_cq;
1052
1053 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
1054 hr_qp = free_mr->mr_free_qp[i];
1055 if (!hr_qp)
1056 continue;
1057 ne++;
1058
1059 ret = hns_roce_v1_send_lp_wqe(hr_qp);
1060 if (ret) {
1061 dev_err(dev,
1062 "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
1063 hr_qp->qpn, ret);
1064 goto free_work;
1065 }
1066 }
1067
1068 if (!ne) {
1069 dev_err(dev, "Reserved loop qp is absent!\n");
1070 goto free_work;
1071 }
1072
1073 do {
1074 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
1075 if (ret < 0 && hr_qp) {
1076 dev_err(dev,
1077 "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
1078 hr_qp->qpn, ret, hr_mr->key, ne);
1079 goto free_work;
1080 }
1081 ne -= ret;
1082 usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
1083 (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
1084 } while (ne && time_before_eq(jiffies, end));
1085
1086 if (ne != 0)
1087 dev_err(dev,
1088 "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
1089 hr_mr->key, ne);
1090
1091 free_work:
1092 if (mr_work->comp_flag)
1093 complete(mr_work->comp);
1094 kfree(mr_work);
1095 }
1096
1097 static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
1098 struct hns_roce_mr *mr, struct ib_udata *udata)
1099 {
1100 struct device *dev = &hr_dev->pdev->dev;
1101 struct hns_roce_mr_free_work *mr_work;
1102 struct hns_roce_free_mr *free_mr;
1103 struct hns_roce_v1_priv *priv;
1104 struct completion comp;
1105 long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
1106 unsigned long start = jiffies;
1107 int npages;
1108 int ret = 0;
1109
1110 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1111 free_mr = &priv->free_mr;
1112
1113 if (mr->enabled) {
1114 if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
1115 & (hr_dev->caps.num_mtpts - 1)))
1116 dev_warn(dev, "HW2SW_MPT failed!\n");
1117 }
1118
1119 mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
1120 if (!mr_work) {
1121 ret = -ENOMEM;
1122 goto free_mr;
1123 }
1124
1125 INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
1126
1127 mr_work->ib_dev = &(hr_dev->ib_dev);
1128 mr_work->comp = ∁
1129 mr_work->comp_flag = 1;
1130 mr_work->mr = (void *)mr;
1131 init_completion(mr_work->comp);
1132
1133 queue_work(free_mr->free_mr_wq, &(mr_work->work));
1134
1135 while (end > 0) {
1136 if (try_wait_for_completion(&comp))
1137 goto free_mr;
1138 msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
1139 end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE;
1140 }
1141
1142 mr_work->comp_flag = 0;
1143 if (try_wait_for_completion(&comp))
1144 goto free_mr;
1145
1146 dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
1147 ret = -ETIMEDOUT;
1148
1149 free_mr:
1150 dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
1151 mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
1152
1153 if (mr->size != ~0ULL) {
1154 npages = ib_umem_page_count(mr->umem);
1155 dma_free_coherent(dev, npages * 8, mr->pbl_buf,
1156 mr->pbl_dma_addr);
1157 }
1158
1159 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
1160 key_to_hw_index(mr->key), 0);
1161
1162 ib_umem_release(mr->umem);
1163
1164 kfree(mr);
1165
1166 return ret;
1167 }
1168
1169 static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
1170 {
1171 struct device *dev = &hr_dev->pdev->dev;
1172 struct hns_roce_v1_priv *priv;
1173 struct hns_roce_db_table *db;
1174
1175 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1176 db = &priv->db_table;
1177
1178 if (db->sdb_ext_mod) {
1179 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
1180 db->ext_db->sdb_buf_list->buf,
1181 db->ext_db->sdb_buf_list->map);
1182 kfree(db->ext_db->sdb_buf_list);
1183 }
1184
1185 if (db->odb_ext_mod) {
1186 dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
1187 db->ext_db->odb_buf_list->buf,
1188 db->ext_db->odb_buf_list->map);
1189 kfree(db->ext_db->odb_buf_list);
1190 }
1191
1192 kfree(db->ext_db);
1193 }
1194
1195 static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
1196 {
1197 int ret;
1198 u32 val;
1199 __le32 tmp;
1200 int raq_shift = 0;
1201 dma_addr_t addr;
1202 struct hns_roce_v1_priv *priv;
1203 struct hns_roce_raq_table *raq;
1204 struct device *dev = &hr_dev->pdev->dev;
1205
1206 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1207 raq = &priv->raq_table;
1208
1209 raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
1210 if (!raq->e_raq_buf)
1211 return -ENOMEM;
1212
1213 raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
1214 &addr, GFP_KERNEL);
1215 if (!raq->e_raq_buf->buf) {
1216 ret = -ENOMEM;
1217 goto err_dma_alloc_raq;
1218 }
1219 raq->e_raq_buf->map = addr;
1220
1221
1222 roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
1223
1224
1225 raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
1226 val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
1227 tmp = cpu_to_le32(val);
1228 roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
1229 ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
1230
1231
1232
1233
1234
1235 roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
1236 ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
1237 raq->e_raq_buf->map >> 44);
1238 val = le32_to_cpu(tmp);
1239 roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
1240 dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
1241
1242
1243 val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
1244 tmp = cpu_to_le32(val);
1245 roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
1246 ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
1247 HNS_ROCE_V1_EXT_RAQ_WF);
1248 val = le32_to_cpu(tmp);
1249 roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
1250 dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
1251
1252
1253 val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
1254 tmp = cpu_to_le32(val);
1255 roce_set_field(tmp,
1256 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
1257 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
1258 POL_TIME_INTERVAL_VAL);
1259 roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
1260 roce_set_field(tmp,
1261 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
1262 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
1263 2);
1264 roce_set_bit(tmp,
1265 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
1266 val = le32_to_cpu(tmp);
1267 roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
1268 dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
1269
1270
1271 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1272 tmp = cpu_to_le32(val);
1273 roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
1274 val = le32_to_cpu(tmp);
1275 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1276 dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
1277
1278 return 0;
1279
1280 err_dma_alloc_raq:
1281 kfree(raq->e_raq_buf);
1282 return ret;
1283 }
1284
1285 static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
1286 {
1287 struct device *dev = &hr_dev->pdev->dev;
1288 struct hns_roce_v1_priv *priv;
1289 struct hns_roce_raq_table *raq;
1290
1291 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1292 raq = &priv->raq_table;
1293
1294 dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
1295 raq->e_raq_buf->map);
1296 kfree(raq->e_raq_buf);
1297 }
1298
1299 static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
1300 {
1301 __le32 tmp;
1302 u32 val;
1303
1304 if (enable_flag) {
1305 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1306
1307 tmp = cpu_to_le32(val);
1308 roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1309 ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
1310 ALL_PORT_VAL_OPEN);
1311 val = le32_to_cpu(tmp);
1312 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1313 } else {
1314 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1315
1316 tmp = cpu_to_le32(val);
1317 roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1318 ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
1319 val = le32_to_cpu(tmp);
1320 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1321 }
1322 }
1323
1324 static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
1325 {
1326 struct device *dev = &hr_dev->pdev->dev;
1327 struct hns_roce_v1_priv *priv;
1328 int ret;
1329
1330 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1331
1332 priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
1333 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
1334 GFP_KERNEL);
1335 if (!priv->bt_table.qpc_buf.buf)
1336 return -ENOMEM;
1337
1338 priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
1339 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
1340 GFP_KERNEL);
1341 if (!priv->bt_table.mtpt_buf.buf) {
1342 ret = -ENOMEM;
1343 goto err_failed_alloc_mtpt_buf;
1344 }
1345
1346 priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
1347 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
1348 GFP_KERNEL);
1349 if (!priv->bt_table.cqc_buf.buf) {
1350 ret = -ENOMEM;
1351 goto err_failed_alloc_cqc_buf;
1352 }
1353
1354 return 0;
1355
1356 err_failed_alloc_cqc_buf:
1357 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1358 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1359
1360 err_failed_alloc_mtpt_buf:
1361 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1362 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1363
1364 return ret;
1365 }
1366
1367 static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
1368 {
1369 struct device *dev = &hr_dev->pdev->dev;
1370 struct hns_roce_v1_priv *priv;
1371
1372 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1373
1374 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1375 priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
1376
1377 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1378 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1379
1380 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1381 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1382 }
1383
1384 static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
1385 {
1386 struct device *dev = &hr_dev->pdev->dev;
1387 struct hns_roce_buf_list *tptr_buf;
1388 struct hns_roce_v1_priv *priv;
1389
1390 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1391 tptr_buf = &priv->tptr_table.tptr_buf;
1392
1393
1394
1395
1396
1397
1398
1399 tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1400 &tptr_buf->map, GFP_KERNEL);
1401 if (!tptr_buf->buf)
1402 return -ENOMEM;
1403
1404 hr_dev->tptr_dma_addr = tptr_buf->map;
1405 hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
1406
1407 return 0;
1408 }
1409
1410 static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
1411 {
1412 struct device *dev = &hr_dev->pdev->dev;
1413 struct hns_roce_buf_list *tptr_buf;
1414 struct hns_roce_v1_priv *priv;
1415
1416 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1417 tptr_buf = &priv->tptr_table.tptr_buf;
1418
1419 dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1420 tptr_buf->buf, tptr_buf->map);
1421 }
1422
1423 static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
1424 {
1425 struct device *dev = &hr_dev->pdev->dev;
1426 struct hns_roce_free_mr *free_mr;
1427 struct hns_roce_v1_priv *priv;
1428 int ret = 0;
1429
1430 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1431 free_mr = &priv->free_mr;
1432
1433 free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
1434 if (!free_mr->free_mr_wq) {
1435 dev_err(dev, "Create free mr workqueue failed!\n");
1436 return -ENOMEM;
1437 }
1438
1439 ret = hns_roce_v1_rsv_lp_qp(hr_dev);
1440 if (ret) {
1441 dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
1442 flush_workqueue(free_mr->free_mr_wq);
1443 destroy_workqueue(free_mr->free_mr_wq);
1444 }
1445
1446 return ret;
1447 }
1448
1449 static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
1450 {
1451 struct hns_roce_free_mr *free_mr;
1452 struct hns_roce_v1_priv *priv;
1453
1454 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1455 free_mr = &priv->free_mr;
1456
1457 flush_workqueue(free_mr->free_mr_wq);
1458 destroy_workqueue(free_mr->free_mr_wq);
1459
1460 hns_roce_v1_release_lp_qp(hr_dev);
1461 }
1462
1463
1464
1465
1466
1467
1468
1469 static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
1470 {
1471 struct device_node *dsaf_node;
1472 struct device *dev = &hr_dev->pdev->dev;
1473 struct device_node *np = dev->of_node;
1474 struct fwnode_handle *fwnode;
1475 int ret;
1476
1477
1478 if (dev_of_node(dev)) {
1479 dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
1480 if (!dsaf_node) {
1481 dev_err(dev, "could not find dsaf-handle\n");
1482 return -EINVAL;
1483 }
1484 fwnode = &dsaf_node->fwnode;
1485 } else if (is_acpi_device_node(dev->fwnode)) {
1486 struct fwnode_reference_args args;
1487
1488 ret = acpi_node_get_property_reference(dev->fwnode,
1489 "dsaf-handle", 0, &args);
1490 if (ret) {
1491 dev_err(dev, "could not find dsaf-handle\n");
1492 return ret;
1493 }
1494 fwnode = args.fwnode;
1495 } else {
1496 dev_err(dev, "cannot read data from DT or ACPI\n");
1497 return -ENXIO;
1498 }
1499
1500 ret = hns_dsaf_roce_reset(fwnode, false);
1501 if (ret)
1502 return ret;
1503
1504 if (dereset) {
1505 msleep(SLEEP_TIME_INTERVAL);
1506 ret = hns_dsaf_roce_reset(fwnode, true);
1507 }
1508
1509 return ret;
1510 }
1511
1512 static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
1513 {
1514 int i = 0;
1515 struct hns_roce_caps *caps = &hr_dev->caps;
1516
1517 hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
1518 hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
1519 hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) |
1520 ((u64)roce_read(hr_dev,
1521 ROCEE_SYS_IMAGE_GUID_H_REG) << 32);
1522 hr_dev->hw_rev = HNS_ROCE_HW_VER1;
1523
1524 caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
1525 caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
1526 caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
1527 caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
1528 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1529 caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
1530 caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
1531 caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
1532 caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
1533 caps->num_uars = HNS_ROCE_V1_UAR_NUM;
1534 caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
1535 caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
1536 caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
1537 caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
1538 caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
1539 caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
1540 caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
1541 caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
1542 caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
1543 caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
1544 caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
1545 caps->qpc_entry_sz = HNS_ROCE_V1_QPC_ENTRY_SIZE;
1546 caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
1547 caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
1548 caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
1549 caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
1550 caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
1551 caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
1552 caps->reserved_lkey = 0;
1553 caps->reserved_pds = 0;
1554 caps->reserved_mrws = 1;
1555 caps->reserved_uars = 0;
1556 caps->reserved_cqs = 0;
1557 caps->reserved_qps = 12;
1558 caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
1559
1560 for (i = 0; i < caps->num_ports; i++)
1561 caps->pkey_table_len[i] = 1;
1562
1563 for (i = 0; i < caps->num_ports; i++) {
1564
1565 if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
1566 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1567 caps->num_ports;
1568 else
1569 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1570 caps->num_ports + 1;
1571 }
1572
1573 caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
1574 caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
1575 caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG);
1576 caps->max_mtu = IB_MTU_2048;
1577
1578 return 0;
1579 }
1580
1581 static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
1582 {
1583 int ret;
1584 u32 val;
1585 __le32 tmp;
1586 struct device *dev = &hr_dev->pdev->dev;
1587
1588
1589 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
1590 tmp = cpu_to_le32(val);
1591 roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
1592 ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
1593 roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
1594 ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
1595 1 << PAGES_SHIFT_16);
1596 val = le32_to_cpu(tmp);
1597 roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
1598
1599 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
1600 tmp = cpu_to_le32(val);
1601 roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
1602 ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
1603 roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
1604 ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
1605 1 << PAGES_SHIFT_16);
1606
1607 ret = hns_roce_db_init(hr_dev);
1608 if (ret) {
1609 dev_err(dev, "doorbell init failed!\n");
1610 return ret;
1611 }
1612
1613 ret = hns_roce_raq_init(hr_dev);
1614 if (ret) {
1615 dev_err(dev, "raq init failed!\n");
1616 goto error_failed_raq_init;
1617 }
1618
1619 ret = hns_roce_bt_init(hr_dev);
1620 if (ret) {
1621 dev_err(dev, "bt init failed!\n");
1622 goto error_failed_bt_init;
1623 }
1624
1625 ret = hns_roce_tptr_init(hr_dev);
1626 if (ret) {
1627 dev_err(dev, "tptr init failed!\n");
1628 goto error_failed_tptr_init;
1629 }
1630
1631 ret = hns_roce_free_mr_init(hr_dev);
1632 if (ret) {
1633 dev_err(dev, "free mr init failed!\n");
1634 goto error_failed_free_mr_init;
1635 }
1636
1637 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
1638
1639 return 0;
1640
1641 error_failed_free_mr_init:
1642 hns_roce_tptr_free(hr_dev);
1643
1644 error_failed_tptr_init:
1645 hns_roce_bt_free(hr_dev);
1646
1647 error_failed_bt_init:
1648 hns_roce_raq_free(hr_dev);
1649
1650 error_failed_raq_init:
1651 hns_roce_db_free(hr_dev);
1652 return ret;
1653 }
1654
1655 static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
1656 {
1657 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
1658 hns_roce_free_mr_free(hr_dev);
1659 hns_roce_tptr_free(hr_dev);
1660 hns_roce_bt_free(hr_dev);
1661 hns_roce_raq_free(hr_dev);
1662 hns_roce_db_free(hr_dev);
1663 }
1664
1665 static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
1666 {
1667 u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
1668
1669 return (!!(status & (1 << HCR_GO_BIT)));
1670 }
1671
1672 static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1673 u64 out_param, u32 in_modifier, u8 op_modifier,
1674 u16 op, u16 token, int event)
1675 {
1676 u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
1677 unsigned long end;
1678 u32 val = 0;
1679 __le32 tmp;
1680
1681 end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
1682 while (hns_roce_v1_cmd_pending(hr_dev)) {
1683 if (time_after(jiffies, end)) {
1684 dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
1685 (int)jiffies, (int)end);
1686 return -EAGAIN;
1687 }
1688 cond_resched();
1689 }
1690
1691 tmp = cpu_to_le32(val);
1692 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
1693 op);
1694 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
1695 ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
1696 roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
1697 roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
1698 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M,
1699 ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
1700
1701 val = le32_to_cpu(tmp);
1702 writeq(in_param, hcr + 0);
1703 writeq(out_param, hcr + 2);
1704 writel(in_modifier, hcr + 4);
1705
1706 wmb();
1707
1708 writel(val, hcr + 5);
1709
1710 return 0;
1711 }
1712
1713 static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
1714 unsigned long timeout)
1715 {
1716 u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
1717 unsigned long end = 0;
1718 u32 status = 0;
1719
1720 end = msecs_to_jiffies(timeout) + jiffies;
1721 while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
1722 cond_resched();
1723
1724 if (hns_roce_v1_cmd_pending(hr_dev)) {
1725 dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1726 return -ETIMEDOUT;
1727 }
1728
1729 status = le32_to_cpu((__force __le32)
1730 __raw_readl(hcr + HCR_STATUS_OFFSET));
1731 if ((status & STATUS_MASK) != 0x1) {
1732 dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
1733 return -EBUSY;
1734 }
1735
1736 return 0;
1737 }
1738
1739 static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
1740 int gid_index, const union ib_gid *gid,
1741 const struct ib_gid_attr *attr)
1742 {
1743 unsigned long flags;
1744 u32 *p = NULL;
1745 u8 gid_idx = 0;
1746
1747 gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
1748
1749 spin_lock_irqsave(&hr_dev->iboe.lock, flags);
1750
1751 p = (u32 *)&gid->raw[0];
1752 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
1753 (HNS_ROCE_V1_GID_NUM * gid_idx));
1754
1755 p = (u32 *)&gid->raw[4];
1756 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
1757 (HNS_ROCE_V1_GID_NUM * gid_idx));
1758
1759 p = (u32 *)&gid->raw[8];
1760 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
1761 (HNS_ROCE_V1_GID_NUM * gid_idx));
1762
1763 p = (u32 *)&gid->raw[0xc];
1764 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
1765 (HNS_ROCE_V1_GID_NUM * gid_idx));
1766
1767 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
1768
1769 return 0;
1770 }
1771
1772 static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1773 u8 *addr)
1774 {
1775 u32 reg_smac_l;
1776 u16 reg_smac_h;
1777 __le32 tmp;
1778 u16 *p_h;
1779 u32 *p;
1780 u32 val;
1781
1782
1783
1784
1785
1786
1787 if (hr_dev->hw->dereg_mr) {
1788 int ret;
1789
1790 ret = hns_roce_v1_recreate_lp_qp(hr_dev);
1791 if (ret && ret != -ETIMEDOUT)
1792 return ret;
1793 }
1794
1795 p = (u32 *)(&addr[0]);
1796 reg_smac_l = *p;
1797 roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
1798 PHY_PORT_OFFSET * phy_port);
1799
1800 val = roce_read(hr_dev,
1801 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1802 tmp = cpu_to_le32(val);
1803 p_h = (u16 *)(&addr[4]);
1804 reg_smac_h = *p_h;
1805 roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
1806 ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
1807 val = le32_to_cpu(tmp);
1808 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1809 val);
1810
1811 return 0;
1812 }
1813
1814 static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
1815 enum ib_mtu mtu)
1816 {
1817 __le32 tmp;
1818 u32 val;
1819
1820 val = roce_read(hr_dev,
1821 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1822 tmp = cpu_to_le32(val);
1823 roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
1824 ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
1825 val = le32_to_cpu(tmp);
1826 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1827 val);
1828 }
1829
1830 static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1831 unsigned long mtpt_idx)
1832 {
1833 struct hns_roce_v1_mpt_entry *mpt_entry;
1834 struct sg_dma_page_iter sg_iter;
1835 u64 *pages;
1836 int i;
1837
1838
1839 mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
1840 memset(mpt_entry, 0, sizeof(*mpt_entry));
1841
1842 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
1843 MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
1844 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
1845 MPT_BYTE_4_KEY_S, mr->key);
1846 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
1847 MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
1848 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
1849 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
1850 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1851 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
1852 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
1853 MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
1854 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
1855 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
1856 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1857 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
1858 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1859 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
1860 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1861 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
1862 0);
1863 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
1864
1865 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1866 MPT_BYTE_12_PBL_ADDR_H_S, 0);
1867 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
1868 MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
1869
1870 mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
1871 mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
1872 mpt_entry->length = cpu_to_le32((u32)mr->size);
1873
1874 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
1875 MPT_BYTE_28_PD_S, mr->pd);
1876 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
1877 MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
1878 roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
1879 MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
1880
1881
1882 if (mr->type == MR_TYPE_DMA)
1883 return 0;
1884
1885 pages = (u64 *) __get_free_page(GFP_KERNEL);
1886 if (!pages)
1887 return -ENOMEM;
1888
1889 i = 0;
1890 for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
1891 pages[i] = ((u64)sg_page_iter_dma_address(&sg_iter)) >> 12;
1892
1893
1894 if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
1895 break;
1896 i++;
1897 }
1898
1899
1900 for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
1901 switch (i) {
1902 case 0:
1903 mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
1904 roce_set_field(mpt_entry->mpt_byte_36,
1905 MPT_BYTE_36_PA0_H_M,
1906 MPT_BYTE_36_PA0_H_S,
1907 (u32)(pages[i] >> PAGES_SHIFT_32));
1908 break;
1909 case 1:
1910 roce_set_field(mpt_entry->mpt_byte_36,
1911 MPT_BYTE_36_PA1_L_M,
1912 MPT_BYTE_36_PA1_L_S, (u32)(pages[i]));
1913 roce_set_field(mpt_entry->mpt_byte_40,
1914 MPT_BYTE_40_PA1_H_M,
1915 MPT_BYTE_40_PA1_H_S,
1916 (u32)(pages[i] >> PAGES_SHIFT_24));
1917 break;
1918 case 2:
1919 roce_set_field(mpt_entry->mpt_byte_40,
1920 MPT_BYTE_40_PA2_L_M,
1921 MPT_BYTE_40_PA2_L_S, (u32)(pages[i]));
1922 roce_set_field(mpt_entry->mpt_byte_44,
1923 MPT_BYTE_44_PA2_H_M,
1924 MPT_BYTE_44_PA2_H_S,
1925 (u32)(pages[i] >> PAGES_SHIFT_16));
1926 break;
1927 case 3:
1928 roce_set_field(mpt_entry->mpt_byte_44,
1929 MPT_BYTE_44_PA3_L_M,
1930 MPT_BYTE_44_PA3_L_S, (u32)(pages[i]));
1931 roce_set_field(mpt_entry->mpt_byte_48,
1932 MPT_BYTE_48_PA3_H_M,
1933 MPT_BYTE_48_PA3_H_S,
1934 (u32)(pages[i] >> PAGES_SHIFT_8));
1935 break;
1936 case 4:
1937 mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
1938 roce_set_field(mpt_entry->mpt_byte_56,
1939 MPT_BYTE_56_PA4_H_M,
1940 MPT_BYTE_56_PA4_H_S,
1941 (u32)(pages[i] >> PAGES_SHIFT_32));
1942 break;
1943 case 5:
1944 roce_set_field(mpt_entry->mpt_byte_56,
1945 MPT_BYTE_56_PA5_L_M,
1946 MPT_BYTE_56_PA5_L_S, (u32)(pages[i]));
1947 roce_set_field(mpt_entry->mpt_byte_60,
1948 MPT_BYTE_60_PA5_H_M,
1949 MPT_BYTE_60_PA5_H_S,
1950 (u32)(pages[i] >> PAGES_SHIFT_24));
1951 break;
1952 case 6:
1953 roce_set_field(mpt_entry->mpt_byte_60,
1954 MPT_BYTE_60_PA6_L_M,
1955 MPT_BYTE_60_PA6_L_S, (u32)(pages[i]));
1956 roce_set_field(mpt_entry->mpt_byte_64,
1957 MPT_BYTE_64_PA6_H_M,
1958 MPT_BYTE_64_PA6_H_S,
1959 (u32)(pages[i] >> PAGES_SHIFT_16));
1960 break;
1961 default:
1962 break;
1963 }
1964 }
1965
1966 free_page((unsigned long) pages);
1967
1968 mpt_entry->pbl_addr_l = cpu_to_le32((u32)(mr->pbl_dma_addr));
1969
1970 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1971 MPT_BYTE_12_PBL_ADDR_H_S,
1972 ((u32)(mr->pbl_dma_addr >> 32)));
1973
1974 return 0;
1975 }
1976
1977 static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
1978 {
1979 return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
1980 n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
1981 }
1982
1983 static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
1984 {
1985 struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
1986
1987
1988 return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
1989 !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
1990 }
1991
1992 static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
1993 {
1994 return get_sw_cqe(hr_cq, hr_cq->cons_index);
1995 }
1996
1997 static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
1998 {
1999 __le32 doorbell[2];
2000
2001 doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1));
2002 doorbell[1] = 0;
2003 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2004 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2005 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2006 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2007 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
2008 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2009 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
2010
2011 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2012 }
2013
2014 static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2015 struct hns_roce_srq *srq)
2016 {
2017 struct hns_roce_cqe *cqe, *dest;
2018 u32 prod_index;
2019 int nfreed = 0;
2020 u8 owner_bit;
2021
2022 for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
2023 ++prod_index) {
2024 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
2025 break;
2026 }
2027
2028
2029
2030
2031
2032 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2033 cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2034 if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2035 CQE_BYTE_16_LOCAL_QPN_S) &
2036 HNS_ROCE_CQE_QPN_MASK) == qpn) {
2037
2038 ++nfreed;
2039 } else if (nfreed) {
2040 dest = get_cqe(hr_cq, (prod_index + nfreed) &
2041 hr_cq->ib_cq.cqe);
2042 owner_bit = roce_get_bit(dest->cqe_byte_4,
2043 CQE_BYTE_4_OWNER_S);
2044 memcpy(dest, cqe, sizeof(*cqe));
2045 roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
2046 owner_bit);
2047 }
2048 }
2049
2050 if (nfreed) {
2051 hr_cq->cons_index += nfreed;
2052
2053
2054
2055
2056 wmb();
2057
2058 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2059 }
2060 }
2061
2062 static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2063 struct hns_roce_srq *srq)
2064 {
2065 spin_lock_irq(&hr_cq->lock);
2066 __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
2067 spin_unlock_irq(&hr_cq->lock);
2068 }
2069
2070 static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
2071 struct hns_roce_cq *hr_cq, void *mb_buf,
2072 u64 *mtts, dma_addr_t dma_handle, int nent,
2073 u32 vector)
2074 {
2075 struct hns_roce_cq_context *cq_context = NULL;
2076 struct hns_roce_buf_list *tptr_buf;
2077 struct hns_roce_v1_priv *priv;
2078 dma_addr_t tptr_dma_addr;
2079 int offset;
2080
2081 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
2082 tptr_buf = &priv->tptr_table.tptr_buf;
2083
2084 cq_context = mb_buf;
2085 memset(cq_context, 0, sizeof(*cq_context));
2086
2087
2088 offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
2089 tptr_dma_addr = tptr_buf->map + offset;
2090 hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
2091
2092
2093 roce_set_field(cq_context->cqc_byte_4,
2094 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
2095 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
2096 roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
2097 CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
2098
2099 cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle);
2100
2101 roce_set_field(cq_context->cqc_byte_12,
2102 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
2103 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
2104 ((u64)dma_handle >> 32));
2105 roce_set_field(cq_context->cqc_byte_12,
2106 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
2107 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
2108 ilog2((unsigned int)nent));
2109 roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
2110 CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
2111
2112 cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
2113
2114 roce_set_field(cq_context->cqc_byte_20,
2115 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
2116 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32);
2117
2118 roce_set_field(cq_context->cqc_byte_20,
2119 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
2120 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
2121
2122
2123
2124
2125
2126 roce_set_field(cq_context->cqc_byte_20,
2127 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
2128 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
2129 tptr_dma_addr >> 44);
2130
2131 cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12));
2132
2133 roce_set_field(cq_context->cqc_byte_32,
2134 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
2135 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
2136 roce_set_bit(cq_context->cqc_byte_32,
2137 CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
2138 roce_set_bit(cq_context->cqc_byte_32,
2139 CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
2140 roce_set_bit(cq_context->cqc_byte_32,
2141 CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
2142 roce_set_bit(cq_context->cqc_byte_32,
2143 CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
2144 0);
2145
2146 roce_set_field(cq_context->cqc_byte_32,
2147 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
2148 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
2149 }
2150
2151 static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2152 {
2153 return -EOPNOTSUPP;
2154 }
2155
2156 static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
2157 enum ib_cq_notify_flags flags)
2158 {
2159 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2160 u32 notification_flag;
2161 __le32 doorbell[2] = {};
2162
2163 notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
2164 IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
2165
2166
2167
2168
2169 doorbell[0] =
2170 cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2171 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2172 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2173 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2174 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2175 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
2176 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2177 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
2178 hr_cq->cqn | notification_flag);
2179
2180 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2181
2182 return 0;
2183 }
2184
2185 static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2186 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2187 {
2188 int qpn;
2189 int is_send;
2190 u16 wqe_ctr;
2191 u32 status;
2192 u32 opcode;
2193 struct hns_roce_cqe *cqe;
2194 struct hns_roce_qp *hr_qp;
2195 struct hns_roce_wq *wq;
2196 struct hns_roce_wqe_ctrl_seg *sq_wqe;
2197 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2198 struct device *dev = &hr_dev->pdev->dev;
2199
2200
2201 cqe = next_cqe_sw(hr_cq);
2202 if (!cqe)
2203 return -EAGAIN;
2204
2205 ++hr_cq->cons_index;
2206
2207 rmb();
2208
2209 is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
2210
2211
2212 if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2213 CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
2214 qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
2215 CQE_BYTE_20_PORT_NUM_S) +
2216 roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2217 CQE_BYTE_16_LOCAL_QPN_S) *
2218 HNS_ROCE_MAX_PORTS;
2219 } else {
2220 qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2221 CQE_BYTE_16_LOCAL_QPN_S);
2222 }
2223
2224 if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2225 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2226 if (unlikely(!hr_qp)) {
2227 dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
2228 hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
2229 return -EINVAL;
2230 }
2231
2232 *cur_qp = hr_qp;
2233 }
2234
2235 wc->qp = &(*cur_qp)->ibqp;
2236 wc->vendor_err = 0;
2237
2238 status = roce_get_field(cqe->cqe_byte_4,
2239 CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
2240 CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
2241 HNS_ROCE_CQE_STATUS_MASK;
2242 switch (status) {
2243 case HNS_ROCE_CQE_SUCCESS:
2244 wc->status = IB_WC_SUCCESS;
2245 break;
2246 case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
2247 wc->status = IB_WC_LOC_LEN_ERR;
2248 break;
2249 case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
2250 wc->status = IB_WC_LOC_QP_OP_ERR;
2251 break;
2252 case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
2253 wc->status = IB_WC_LOC_PROT_ERR;
2254 break;
2255 case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
2256 wc->status = IB_WC_WR_FLUSH_ERR;
2257 break;
2258 case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
2259 wc->status = IB_WC_MW_BIND_ERR;
2260 break;
2261 case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
2262 wc->status = IB_WC_BAD_RESP_ERR;
2263 break;
2264 case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
2265 wc->status = IB_WC_LOC_ACCESS_ERR;
2266 break;
2267 case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
2268 wc->status = IB_WC_REM_INV_REQ_ERR;
2269 break;
2270 case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
2271 wc->status = IB_WC_REM_ACCESS_ERR;
2272 break;
2273 case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
2274 wc->status = IB_WC_REM_OP_ERR;
2275 break;
2276 case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
2277 wc->status = IB_WC_RETRY_EXC_ERR;
2278 break;
2279 case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
2280 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2281 break;
2282 default:
2283 wc->status = IB_WC_GENERAL_ERR;
2284 break;
2285 }
2286
2287
2288 if (wc->status != IB_WC_SUCCESS)
2289 return 0;
2290
2291 if (is_send) {
2292
2293 sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
2294 CQE_BYTE_4_WQE_INDEX_M,
2295 CQE_BYTE_4_WQE_INDEX_S)&
2296 ((*cur_qp)->sq.wqe_cnt-1));
2297 switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
2298 case HNS_ROCE_WQE_OPCODE_SEND:
2299 wc->opcode = IB_WC_SEND;
2300 break;
2301 case HNS_ROCE_WQE_OPCODE_RDMA_READ:
2302 wc->opcode = IB_WC_RDMA_READ;
2303 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2304 break;
2305 case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
2306 wc->opcode = IB_WC_RDMA_WRITE;
2307 break;
2308 case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
2309 wc->opcode = IB_WC_LOCAL_INV;
2310 break;
2311 case HNS_ROCE_WQE_OPCODE_UD_SEND:
2312 wc->opcode = IB_WC_SEND;
2313 break;
2314 default:
2315 wc->status = IB_WC_GENERAL_ERR;
2316 break;
2317 }
2318 wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
2319 IB_WC_WITH_IMM : 0);
2320
2321 wq = &(*cur_qp)->sq;
2322 if ((*cur_qp)->sq_signal_bits) {
2323
2324
2325
2326
2327
2328 wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
2329 CQE_BYTE_4_WQE_INDEX_M,
2330 CQE_BYTE_4_WQE_INDEX_S);
2331 wq->tail += (wqe_ctr - (u16)wq->tail) &
2332 (wq->wqe_cnt - 1);
2333 }
2334 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2335 ++wq->tail;
2336 } else {
2337
2338 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2339 opcode = roce_get_field(cqe->cqe_byte_4,
2340 CQE_BYTE_4_OPERATION_TYPE_M,
2341 CQE_BYTE_4_OPERATION_TYPE_S) &
2342 HNS_ROCE_CQE_OPCODE_MASK;
2343 switch (opcode) {
2344 case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
2345 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2346 wc->wc_flags = IB_WC_WITH_IMM;
2347 wc->ex.imm_data =
2348 cpu_to_be32(le32_to_cpu(cqe->immediate_data));
2349 break;
2350 case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
2351 if (roce_get_bit(cqe->cqe_byte_4,
2352 CQE_BYTE_4_IMM_INDICATOR_S)) {
2353 wc->opcode = IB_WC_RECV;
2354 wc->wc_flags = IB_WC_WITH_IMM;
2355 wc->ex.imm_data = cpu_to_be32(
2356 le32_to_cpu(cqe->immediate_data));
2357 } else {
2358 wc->opcode = IB_WC_RECV;
2359 wc->wc_flags = 0;
2360 }
2361 break;
2362 default:
2363 wc->status = IB_WC_GENERAL_ERR;
2364 break;
2365 }
2366
2367
2368 wq = &(*cur_qp)->rq;
2369 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2370 ++wq->tail;
2371 wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
2372 CQE_BYTE_20_SL_S);
2373 wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
2374 CQE_BYTE_20_REMOTE_QPN_M,
2375 CQE_BYTE_20_REMOTE_QPN_S);
2376 wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
2377 CQE_BYTE_20_GRH_PRESENT_S) ?
2378 IB_WC_GRH : 0);
2379 wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
2380 CQE_BYTE_28_P_KEY_IDX_M,
2381 CQE_BYTE_28_P_KEY_IDX_S);
2382 }
2383
2384 return 0;
2385 }
2386
2387 int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2388 {
2389 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2390 struct hns_roce_qp *cur_qp = NULL;
2391 unsigned long flags;
2392 int npolled;
2393 int ret = 0;
2394
2395 spin_lock_irqsave(&hr_cq->lock, flags);
2396
2397 for (npolled = 0; npolled < num_entries; ++npolled) {
2398 ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
2399 if (ret)
2400 break;
2401 }
2402
2403 if (npolled) {
2404 *hr_cq->tptr_addr = hr_cq->cons_index &
2405 ((hr_cq->cq_depth << 1) - 1);
2406
2407
2408 wmb();
2409 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2410 }
2411
2412 spin_unlock_irqrestore(&hr_cq->lock, flags);
2413
2414 if (ret == 0 || ret == -EAGAIN)
2415 return npolled;
2416 else
2417 return ret;
2418 }
2419
2420 static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
2421 struct hns_roce_hem_table *table, int obj,
2422 int step_idx)
2423 {
2424 struct device *dev = &hr_dev->pdev->dev;
2425 struct hns_roce_v1_priv *priv;
2426 unsigned long flags = 0;
2427 long end = HW_SYNC_TIMEOUT_MSECS;
2428 __le32 bt_cmd_val[2] = {0};
2429 void __iomem *bt_cmd;
2430 u64 bt_ba = 0;
2431
2432 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
2433
2434 switch (table->type) {
2435 case HEM_TYPE_QPC:
2436 bt_ba = priv->bt_table.qpc_buf.map >> 12;
2437 break;
2438 case HEM_TYPE_MTPT:
2439 bt_ba = priv->bt_table.mtpt_buf.map >> 12;
2440 break;
2441 case HEM_TYPE_CQC:
2442 bt_ba = priv->bt_table.cqc_buf.map >> 12;
2443 break;
2444 case HEM_TYPE_SRQC:
2445 dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
2446 return -EINVAL;
2447 default:
2448 return 0;
2449 }
2450 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2451 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
2452 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
2453 ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
2454 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
2455 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
2456
2457 spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
2458
2459 bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
2460
2461 while (1) {
2462 if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
2463 if (!end) {
2464 dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
2465 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
2466 flags);
2467 return -EBUSY;
2468 }
2469 } else {
2470 break;
2471 }
2472 mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
2473 end -= HW_SYNC_SLEEP_TIME_INTERVAL;
2474 }
2475
2476 bt_cmd_val[0] = cpu_to_le32(bt_ba);
2477 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
2478 ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
2479 hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
2480
2481 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
2482
2483 return 0;
2484 }
2485
2486 static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
2487 struct hns_roce_mtt *mtt,
2488 enum hns_roce_qp_state cur_state,
2489 enum hns_roce_qp_state new_state,
2490 struct hns_roce_qp_context *context,
2491 struct hns_roce_qp *hr_qp)
2492 {
2493 static const u16
2494 op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
2495 [HNS_ROCE_QP_STATE_RST] = {
2496 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2497 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2498 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2499 },
2500 [HNS_ROCE_QP_STATE_INIT] = {
2501 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2502 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2503
2504
2505
2506 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2507 [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
2508 },
2509 [HNS_ROCE_QP_STATE_RTR] = {
2510 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2511 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2512 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
2513 },
2514 [HNS_ROCE_QP_STATE_RTS] = {
2515 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2516 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2517 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
2518 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
2519 },
2520 [HNS_ROCE_QP_STATE_SQD] = {
2521 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2522 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2523 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
2524 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
2525 },
2526 [HNS_ROCE_QP_STATE_ERR] = {
2527 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2528 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2529 }
2530 };
2531
2532 struct hns_roce_cmd_mailbox *mailbox;
2533 struct device *dev = &hr_dev->pdev->dev;
2534 int ret = 0;
2535
2536 if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
2537 new_state >= HNS_ROCE_QP_NUM_STATE ||
2538 !op[cur_state][new_state]) {
2539 dev_err(dev, "[modify_qp]not support state %d to %d\n",
2540 cur_state, new_state);
2541 return -EINVAL;
2542 }
2543
2544 if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
2545 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2546 HNS_ROCE_CMD_2RST_QP,
2547 HNS_ROCE_CMD_TIMEOUT_MSECS);
2548
2549 if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
2550 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2551 HNS_ROCE_CMD_2ERR_QP,
2552 HNS_ROCE_CMD_TIMEOUT_MSECS);
2553
2554 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2555 if (IS_ERR(mailbox))
2556 return PTR_ERR(mailbox);
2557
2558 memcpy(mailbox->buf, context, sizeof(*context));
2559
2560 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2561 op[cur_state][new_state],
2562 HNS_ROCE_CMD_TIMEOUT_MSECS);
2563
2564 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2565 return ret;
2566 }
2567
2568 static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2569 int attr_mask, enum ib_qp_state cur_state,
2570 enum ib_qp_state new_state)
2571 {
2572 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2573 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2574 struct hns_roce_sqp_context *context;
2575 struct device *dev = &hr_dev->pdev->dev;
2576 dma_addr_t dma_handle = 0;
2577 u32 __iomem *addr;
2578 int rq_pa_start;
2579 __le32 tmp;
2580 u32 reg_val;
2581 u64 *mtts;
2582
2583 context = kzalloc(sizeof(*context), GFP_KERNEL);
2584 if (!context)
2585 return -ENOMEM;
2586
2587
2588 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2589 hr_qp->mtt.first_seg, &dma_handle);
2590 if (!mtts) {
2591 dev_err(dev, "qp buf pa find failed\n");
2592 goto out;
2593 }
2594
2595 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2596 roce_set_field(context->qp1c_bytes_4,
2597 QP1C_BYTES_4_SQ_WQE_SHIFT_M,
2598 QP1C_BYTES_4_SQ_WQE_SHIFT_S,
2599 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2600 roce_set_field(context->qp1c_bytes_4,
2601 QP1C_BYTES_4_RQ_WQE_SHIFT_M,
2602 QP1C_BYTES_4_RQ_WQE_SHIFT_S,
2603 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2604 roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
2605 QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
2606
2607 context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
2608 roce_set_field(context->qp1c_bytes_12,
2609 QP1C_BYTES_12_SQ_RQ_BT_H_M,
2610 QP1C_BYTES_12_SQ_RQ_BT_H_S,
2611 ((u32)(dma_handle >> 32)));
2612
2613 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
2614 QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
2615 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
2616 QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
2617 roce_set_bit(context->qp1c_bytes_16,
2618 QP1C_BYTES_16_SIGNALING_TYPE_S,
2619 hr_qp->sq_signal_bits);
2620 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
2621 1);
2622 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
2623 1);
2624 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
2625 0);
2626
2627 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
2628 QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
2629 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
2630 QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
2631
2632 rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
2633 context->cur_rq_wqe_ba_l =
2634 cpu_to_le32((u32)(mtts[rq_pa_start]));
2635
2636 roce_set_field(context->qp1c_bytes_28,
2637 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
2638 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
2639 (mtts[rq_pa_start]) >> 32);
2640 roce_set_field(context->qp1c_bytes_28,
2641 QP1C_BYTES_28_RQ_CUR_IDX_M,
2642 QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
2643
2644 roce_set_field(context->qp1c_bytes_32,
2645 QP1C_BYTES_32_RX_CQ_NUM_M,
2646 QP1C_BYTES_32_RX_CQ_NUM_S,
2647 to_hr_cq(ibqp->recv_cq)->cqn);
2648 roce_set_field(context->qp1c_bytes_32,
2649 QP1C_BYTES_32_TX_CQ_NUM_M,
2650 QP1C_BYTES_32_TX_CQ_NUM_S,
2651 to_hr_cq(ibqp->send_cq)->cqn);
2652
2653 context->cur_sq_wqe_ba_l = cpu_to_le32((u32)mtts[0]);
2654
2655 roce_set_field(context->qp1c_bytes_40,
2656 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
2657 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
2658 (mtts[0]) >> 32);
2659 roce_set_field(context->qp1c_bytes_40,
2660 QP1C_BYTES_40_SQ_CUR_IDX_M,
2661 QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
2662
2663
2664 addr = (u32 __iomem *)(hr_dev->reg_base +
2665 ROCEE_QP1C_CFG0_0_REG +
2666 hr_qp->phy_port * sizeof(*context));
2667
2668 writel(le32_to_cpu(context->qp1c_bytes_4), addr);
2669 writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1);
2670 writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2);
2671 writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3);
2672 writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4);
2673 writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5);
2674 writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6);
2675 writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7);
2676 writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8);
2677 writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9);
2678 }
2679
2680
2681 reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2682 hr_qp->phy_port * sizeof(*context));
2683 tmp = cpu_to_le32(reg_val);
2684 roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
2685 ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
2686 reg_val = le32_to_cpu(tmp);
2687 roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2688 hr_qp->phy_port * sizeof(*context), reg_val);
2689
2690 hr_qp->state = new_state;
2691 if (new_state == IB_QPS_RESET) {
2692 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
2693 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
2694 if (ibqp->send_cq != ibqp->recv_cq)
2695 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
2696 hr_qp->qpn, NULL);
2697
2698 hr_qp->rq.head = 0;
2699 hr_qp->rq.tail = 0;
2700 hr_qp->sq.head = 0;
2701 hr_qp->sq.tail = 0;
2702 }
2703
2704 kfree(context);
2705 return 0;
2706
2707 out:
2708 kfree(context);
2709 return -EINVAL;
2710 }
2711
2712 static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2713 int attr_mask, enum ib_qp_state cur_state,
2714 enum ib_qp_state new_state)
2715 {
2716 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2717 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2718 struct device *dev = &hr_dev->pdev->dev;
2719 struct hns_roce_qp_context *context;
2720 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2721 dma_addr_t dma_handle_2 = 0;
2722 dma_addr_t dma_handle = 0;
2723 __le32 doorbell[2] = {0};
2724 int rq_pa_start = 0;
2725 u64 *mtts_2 = NULL;
2726 int ret = -EINVAL;
2727 u64 *mtts = NULL;
2728 int port;
2729 u8 port_num;
2730 u8 *dmac;
2731 u8 *smac;
2732
2733 context = kzalloc(sizeof(*context), GFP_KERNEL);
2734 if (!context)
2735 return -ENOMEM;
2736
2737
2738 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2739 hr_qp->mtt.first_seg, &dma_handle);
2740 if (mtts == NULL) {
2741 dev_err(dev, "qp buf pa find failed\n");
2742 goto out;
2743 }
2744
2745
2746 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
2747 hr_qp->qpn, &dma_handle_2);
2748 if (mtts_2 == NULL) {
2749 dev_err(dev, "qp irrl_table find failed\n");
2750 goto out;
2751 }
2752
2753
2754
2755
2756
2757
2758
2759 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2760 roce_set_field(context->qpc_bytes_4,
2761 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2762 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2763 to_hr_qp_type(hr_qp->ibqp.qp_type));
2764
2765 roce_set_bit(context->qpc_bytes_4,
2766 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2767 roce_set_bit(context->qpc_bytes_4,
2768 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2769 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2770 roce_set_bit(context->qpc_bytes_4,
2771 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2772 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
2773 );
2774 roce_set_bit(context->qpc_bytes_4,
2775 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
2776 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
2777 );
2778 roce_set_bit(context->qpc_bytes_4,
2779 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2780 roce_set_field(context->qpc_bytes_4,
2781 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2782 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2783 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2784 roce_set_field(context->qpc_bytes_4,
2785 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2786 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2787 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2788 roce_set_field(context->qpc_bytes_4,
2789 QP_CONTEXT_QPC_BYTES_4_PD_M,
2790 QP_CONTEXT_QPC_BYTES_4_PD_S,
2791 to_hr_pd(ibqp->pd)->pdn);
2792 hr_qp->access_flags = attr->qp_access_flags;
2793 roce_set_field(context->qpc_bytes_8,
2794 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2795 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2796 to_hr_cq(ibqp->send_cq)->cqn);
2797 roce_set_field(context->qpc_bytes_8,
2798 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2799 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2800 to_hr_cq(ibqp->recv_cq)->cqn);
2801
2802 if (ibqp->srq)
2803 roce_set_field(context->qpc_bytes_12,
2804 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2805 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2806 to_hr_srq(ibqp->srq)->srqn);
2807
2808 roce_set_field(context->qpc_bytes_12,
2809 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2810 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2811 attr->pkey_index);
2812 hr_qp->pkey_index = attr->pkey_index;
2813 roce_set_field(context->qpc_bytes_16,
2814 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2815 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2816
2817 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
2818 roce_set_field(context->qpc_bytes_4,
2819 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2820 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2821 to_hr_qp_type(hr_qp->ibqp.qp_type));
2822 roce_set_bit(context->qpc_bytes_4,
2823 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2824 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2825 roce_set_bit(context->qpc_bytes_4,
2826 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2827 !!(attr->qp_access_flags &
2828 IB_ACCESS_REMOTE_READ));
2829 roce_set_bit(context->qpc_bytes_4,
2830 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2831 !!(attr->qp_access_flags &
2832 IB_ACCESS_REMOTE_WRITE));
2833 } else {
2834 roce_set_bit(context->qpc_bytes_4,
2835 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2836 !!(hr_qp->access_flags &
2837 IB_ACCESS_REMOTE_READ));
2838 roce_set_bit(context->qpc_bytes_4,
2839 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2840 !!(hr_qp->access_flags &
2841 IB_ACCESS_REMOTE_WRITE));
2842 }
2843
2844 roce_set_bit(context->qpc_bytes_4,
2845 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2846 roce_set_field(context->qpc_bytes_4,
2847 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2848 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2849 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2850 roce_set_field(context->qpc_bytes_4,
2851 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2852 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2853 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2854 roce_set_field(context->qpc_bytes_4,
2855 QP_CONTEXT_QPC_BYTES_4_PD_M,
2856 QP_CONTEXT_QPC_BYTES_4_PD_S,
2857 to_hr_pd(ibqp->pd)->pdn);
2858
2859 roce_set_field(context->qpc_bytes_8,
2860 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2861 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2862 to_hr_cq(ibqp->send_cq)->cqn);
2863 roce_set_field(context->qpc_bytes_8,
2864 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2865 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2866 to_hr_cq(ibqp->recv_cq)->cqn);
2867
2868 if (ibqp->srq)
2869 roce_set_field(context->qpc_bytes_12,
2870 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2871 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2872 to_hr_srq(ibqp->srq)->srqn);
2873 if (attr_mask & IB_QP_PKEY_INDEX)
2874 roce_set_field(context->qpc_bytes_12,
2875 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2876 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2877 attr->pkey_index);
2878 else
2879 roce_set_field(context->qpc_bytes_12,
2880 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2881 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2882 hr_qp->pkey_index);
2883
2884 roce_set_field(context->qpc_bytes_16,
2885 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2886 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2887 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
2888 if ((attr_mask & IB_QP_ALT_PATH) ||
2889 (attr_mask & IB_QP_ACCESS_FLAGS) ||
2890 (attr_mask & IB_QP_PKEY_INDEX) ||
2891 (attr_mask & IB_QP_QKEY)) {
2892 dev_err(dev, "INIT2RTR attr_mask error\n");
2893 goto out;
2894 }
2895
2896 dmac = (u8 *)attr->ah_attr.roce.dmac;
2897
2898 context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
2899 roce_set_field(context->qpc_bytes_24,
2900 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
2901 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
2902 ((u32)(dma_handle >> 32)));
2903 roce_set_bit(context->qpc_bytes_24,
2904 QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
2905 1);
2906 roce_set_field(context->qpc_bytes_24,
2907 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
2908 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
2909 attr->min_rnr_timer);
2910 context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2));
2911 roce_set_field(context->qpc_bytes_32,
2912 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
2913 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
2914 ((u32)(dma_handle_2 >> 32)) &
2915 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
2916 roce_set_field(context->qpc_bytes_32,
2917 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
2918 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
2919 roce_set_bit(context->qpc_bytes_32,
2920 QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
2921 1);
2922 roce_set_bit(context->qpc_bytes_32,
2923 QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
2924 hr_qp->sq_signal_bits);
2925
2926 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
2927 hr_qp->port;
2928 smac = (u8 *)hr_dev->dev_addr[port];
2929
2930 if (ether_addr_equal_unaligned(dmac, smac) ||
2931 hr_dev->loop_idc == 0x1)
2932 roce_set_bit(context->qpc_bytes_32,
2933 QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
2934
2935 roce_set_bit(context->qpc_bytes_32,
2936 QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
2937 rdma_ah_get_ah_flags(&attr->ah_attr));
2938 roce_set_field(context->qpc_bytes_32,
2939 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
2940 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
2941 ilog2((unsigned int)attr->max_dest_rd_atomic));
2942
2943 if (attr_mask & IB_QP_DEST_QPN)
2944 roce_set_field(context->qpc_bytes_36,
2945 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
2946 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
2947 attr->dest_qp_num);
2948
2949
2950 port_num = rdma_ah_get_port_num(&attr->ah_attr);
2951 roce_set_field(context->qpc_bytes_36,
2952 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
2953 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
2954 hns_get_gid_index(hr_dev,
2955 port_num - 1,
2956 grh->sgid_index));
2957
2958 memcpy(&(context->dmac_l), dmac, 4);
2959
2960 roce_set_field(context->qpc_bytes_44,
2961 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2962 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
2963 *((u16 *)(&dmac[4])));
2964 roce_set_field(context->qpc_bytes_44,
2965 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
2966 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
2967 rdma_ah_get_static_rate(&attr->ah_attr));
2968 roce_set_field(context->qpc_bytes_44,
2969 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
2970 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
2971 grh->hop_limit);
2972
2973 roce_set_field(context->qpc_bytes_48,
2974 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
2975 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
2976 grh->flow_label);
2977 roce_set_field(context->qpc_bytes_48,
2978 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
2979 QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
2980 grh->traffic_class);
2981 roce_set_field(context->qpc_bytes_48,
2982 QP_CONTEXT_QPC_BYTES_48_MTU_M,
2983 QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
2984
2985 memcpy(context->dgid, grh->dgid.raw,
2986 sizeof(grh->dgid.raw));
2987
2988 dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
2989 roce_get_field(context->qpc_bytes_44,
2990 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2991 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
2992
2993 roce_set_field(context->qpc_bytes_68,
2994 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
2995 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
2996 hr_qp->rq.head);
2997 roce_set_field(context->qpc_bytes_68,
2998 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
2999 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
3000
3001 rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
3002 context->cur_rq_wqe_ba_l =
3003 cpu_to_le32((u32)(mtts[rq_pa_start]));
3004
3005 roce_set_field(context->qpc_bytes_76,
3006 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
3007 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
3008 mtts[rq_pa_start] >> 32);
3009 roce_set_field(context->qpc_bytes_76,
3010 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
3011 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
3012
3013 context->rx_rnr_time = 0;
3014
3015 roce_set_field(context->qpc_bytes_84,
3016 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
3017 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
3018 attr->rq_psn - 1);
3019 roce_set_field(context->qpc_bytes_84,
3020 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
3021 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
3022
3023 roce_set_field(context->qpc_bytes_88,
3024 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3025 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
3026 attr->rq_psn);
3027 roce_set_bit(context->qpc_bytes_88,
3028 QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
3029 roce_set_bit(context->qpc_bytes_88,
3030 QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
3031 roce_set_field(context->qpc_bytes_88,
3032 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
3033 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
3034 0);
3035 roce_set_field(context->qpc_bytes_88,
3036 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
3037 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
3038 0);
3039
3040 context->dma_length = 0;
3041 context->r_key = 0;
3042 context->va_l = 0;
3043 context->va_h = 0;
3044
3045 roce_set_field(context->qpc_bytes_108,
3046 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
3047 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
3048 roce_set_bit(context->qpc_bytes_108,
3049 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
3050 roce_set_bit(context->qpc_bytes_108,
3051 QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
3052
3053 roce_set_field(context->qpc_bytes_112,
3054 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
3055 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
3056 roce_set_field(context->qpc_bytes_112,
3057 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
3058 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
3059
3060
3061 roce_set_field(context->qpc_bytes_156,
3062 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3063 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3064 hr_qp->phy_port);
3065 roce_set_field(context->qpc_bytes_156,
3066 QP_CONTEXT_QPC_BYTES_156_SL_M,
3067 QP_CONTEXT_QPC_BYTES_156_SL_S,
3068 rdma_ah_get_sl(&attr->ah_attr));
3069 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3070 } else if (cur_state == IB_QPS_RTR &&
3071 new_state == IB_QPS_RTS) {
3072
3073 if ((attr_mask & IB_QP_ALT_PATH) ||
3074 (attr_mask & IB_QP_ACCESS_FLAGS) ||
3075 (attr_mask & IB_QP_QKEY) ||
3076 (attr_mask & IB_QP_PATH_MIG_STATE) ||
3077 (attr_mask & IB_QP_CUR_STATE) ||
3078 (attr_mask & IB_QP_MIN_RNR_TIMER)) {
3079 dev_err(dev, "RTR2RTS attr_mask error\n");
3080 goto out;
3081 }
3082
3083 context->rx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
3084
3085 roce_set_field(context->qpc_bytes_120,
3086 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
3087 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
3088 (mtts[0]) >> 32);
3089
3090 roce_set_field(context->qpc_bytes_124,
3091 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
3092 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
3093 roce_set_field(context->qpc_bytes_124,
3094 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
3095 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
3096
3097 roce_set_field(context->qpc_bytes_128,
3098 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
3099 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
3100 attr->sq_psn);
3101 roce_set_bit(context->qpc_bytes_128,
3102 QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
3103 roce_set_field(context->qpc_bytes_128,
3104 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
3105 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
3106 0);
3107 roce_set_bit(context->qpc_bytes_128,
3108 QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
3109
3110 roce_set_field(context->qpc_bytes_132,
3111 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
3112 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
3113 roce_set_field(context->qpc_bytes_132,
3114 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
3115 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
3116
3117 roce_set_field(context->qpc_bytes_136,
3118 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
3119 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
3120 attr->sq_psn);
3121 roce_set_field(context->qpc_bytes_136,
3122 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
3123 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
3124 attr->sq_psn);
3125
3126 roce_set_field(context->qpc_bytes_140,
3127 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
3128 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
3129 (attr->sq_psn >> SQ_PSN_SHIFT));
3130 roce_set_field(context->qpc_bytes_140,
3131 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
3132 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
3133 roce_set_bit(context->qpc_bytes_140,
3134 QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
3135
3136 roce_set_field(context->qpc_bytes_148,
3137 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
3138 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
3139 roce_set_field(context->qpc_bytes_148,
3140 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3141 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
3142 attr->retry_cnt);
3143 roce_set_field(context->qpc_bytes_148,
3144 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
3145 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
3146 attr->rnr_retry);
3147 roce_set_field(context->qpc_bytes_148,
3148 QP_CONTEXT_QPC_BYTES_148_LSN_M,
3149 QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
3150
3151 context->rnr_retry = 0;
3152
3153 roce_set_field(context->qpc_bytes_156,
3154 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
3155 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
3156 attr->retry_cnt);
3157 if (attr->timeout < 0x12) {
3158 dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
3159 attr->timeout);
3160 roce_set_field(context->qpc_bytes_156,
3161 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3162 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3163 0x12);
3164 } else {
3165 roce_set_field(context->qpc_bytes_156,
3166 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3167 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3168 attr->timeout);
3169 }
3170 roce_set_field(context->qpc_bytes_156,
3171 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
3172 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
3173 attr->rnr_retry);
3174 roce_set_field(context->qpc_bytes_156,
3175 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3176 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3177 hr_qp->phy_port);
3178 roce_set_field(context->qpc_bytes_156,
3179 QP_CONTEXT_QPC_BYTES_156_SL_M,
3180 QP_CONTEXT_QPC_BYTES_156_SL_S,
3181 rdma_ah_get_sl(&attr->ah_attr));
3182 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3183 roce_set_field(context->qpc_bytes_156,
3184 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3185 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
3186 ilog2((unsigned int)attr->max_rd_atomic));
3187 roce_set_field(context->qpc_bytes_156,
3188 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
3189 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
3190 context->pkt_use_len = 0;
3191
3192 roce_set_field(context->qpc_bytes_164,
3193 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3194 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
3195 roce_set_field(context->qpc_bytes_164,
3196 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
3197 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
3198
3199 roce_set_field(context->qpc_bytes_168,
3200 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
3201 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
3202 attr->sq_psn);
3203 roce_set_field(context->qpc_bytes_168,
3204 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
3205 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
3206 roce_set_field(context->qpc_bytes_168,
3207 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
3208 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
3209 roce_set_bit(context->qpc_bytes_168,
3210 QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
3211 roce_set_bit(context->qpc_bytes_168,
3212 QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
3213 roce_set_bit(context->qpc_bytes_168,
3214 QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
3215 context->sge_use_len = 0;
3216
3217 roce_set_field(context->qpc_bytes_176,
3218 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
3219 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
3220 roce_set_field(context->qpc_bytes_176,
3221 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
3222 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
3223 0);
3224 roce_set_field(context->qpc_bytes_180,
3225 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
3226 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
3227 roce_set_field(context->qpc_bytes_180,
3228 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
3229 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
3230
3231 context->tx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
3232
3233 roce_set_field(context->qpc_bytes_188,
3234 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
3235 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
3236 (mtts[0]) >> 32);
3237 roce_set_bit(context->qpc_bytes_188,
3238 QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
3239 roce_set_field(context->qpc_bytes_188,
3240 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
3241 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
3242 0);
3243 } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
3244 (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
3245 (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
3246 (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
3247 (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
3248 (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
3249 (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
3250 (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
3251 dev_err(dev, "not support this status migration\n");
3252 goto out;
3253 }
3254
3255
3256 roce_set_field(context->qpc_bytes_144,
3257 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3258 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
3259
3260
3261 ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
3262 to_hns_roce_state(cur_state),
3263 to_hns_roce_state(new_state), context,
3264 hr_qp);
3265 if (ret) {
3266 dev_err(dev, "hns_roce_qp_modify failed\n");
3267 goto out;
3268 }
3269
3270
3271
3272
3273
3274 if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3275
3276 wmb();
3277
3278 roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
3279 RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
3280 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
3281 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
3282 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
3283 RQ_DOORBELL_U32_8_CMD_S, 1);
3284 roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
3285
3286 if (ibqp->uobject) {
3287 hr_qp->rq.db_reg_l = hr_dev->reg_base +
3288 hr_dev->odb_offset +
3289 DB_REG_OFFSET * hr_dev->priv_uar.index;
3290 }
3291
3292 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
3293 }
3294
3295 hr_qp->state = new_state;
3296
3297 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3298 hr_qp->resp_depth = attr->max_dest_rd_atomic;
3299 if (attr_mask & IB_QP_PORT) {
3300 hr_qp->port = attr->port_num - 1;
3301 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3302 }
3303
3304 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3305 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3306 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3307 if (ibqp->send_cq != ibqp->recv_cq)
3308 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
3309 hr_qp->qpn, NULL);
3310
3311 hr_qp->rq.head = 0;
3312 hr_qp->rq.tail = 0;
3313 hr_qp->sq.head = 0;
3314 hr_qp->sq.tail = 0;
3315 }
3316 out:
3317 kfree(context);
3318 return ret;
3319 }
3320
3321 static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
3322 const struct ib_qp_attr *attr, int attr_mask,
3323 enum ib_qp_state cur_state,
3324 enum ib_qp_state new_state)
3325 {
3326
3327 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
3328 return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
3329 new_state);
3330 else
3331 return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
3332 new_state);
3333 }
3334
3335 static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
3336 {
3337 switch (state) {
3338 case HNS_ROCE_QP_STATE_RST:
3339 return IB_QPS_RESET;
3340 case HNS_ROCE_QP_STATE_INIT:
3341 return IB_QPS_INIT;
3342 case HNS_ROCE_QP_STATE_RTR:
3343 return IB_QPS_RTR;
3344 case HNS_ROCE_QP_STATE_RTS:
3345 return IB_QPS_RTS;
3346 case HNS_ROCE_QP_STATE_SQD:
3347 return IB_QPS_SQD;
3348 case HNS_ROCE_QP_STATE_ERR:
3349 return IB_QPS_ERR;
3350 default:
3351 return IB_QPS_ERR;
3352 }
3353 }
3354
3355 static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
3356 struct hns_roce_qp *hr_qp,
3357 struct hns_roce_qp_context *hr_context)
3358 {
3359 struct hns_roce_cmd_mailbox *mailbox;
3360 int ret;
3361
3362 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3363 if (IS_ERR(mailbox))
3364 return PTR_ERR(mailbox);
3365
3366 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3367 HNS_ROCE_CMD_QUERY_QP,
3368 HNS_ROCE_CMD_TIMEOUT_MSECS);
3369 if (!ret)
3370 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3371 else
3372 dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
3373
3374 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3375
3376 return ret;
3377 }
3378
3379 static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3380 int qp_attr_mask,
3381 struct ib_qp_init_attr *qp_init_attr)
3382 {
3383 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3384 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3385 struct hns_roce_sqp_context context;
3386 u32 addr;
3387
3388 mutex_lock(&hr_qp->mutex);
3389
3390 if (hr_qp->state == IB_QPS_RESET) {
3391 qp_attr->qp_state = IB_QPS_RESET;
3392 goto done;
3393 }
3394
3395 addr = ROCEE_QP1C_CFG0_0_REG +
3396 hr_qp->port * sizeof(struct hns_roce_sqp_context);
3397 context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr));
3398 context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1));
3399 context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2));
3400 context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3));
3401 context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4));
3402 context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5));
3403 context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6));
3404 context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7));
3405 context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8));
3406 context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9));
3407
3408 hr_qp->state = roce_get_field(context.qp1c_bytes_4,
3409 QP1C_BYTES_4_QP_STATE_M,
3410 QP1C_BYTES_4_QP_STATE_S);
3411 qp_attr->qp_state = hr_qp->state;
3412 qp_attr->path_mtu = IB_MTU_256;
3413 qp_attr->path_mig_state = IB_MIG_ARMED;
3414 qp_attr->qkey = QKEY_VAL;
3415 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
3416 qp_attr->rq_psn = 0;
3417 qp_attr->sq_psn = 0;
3418 qp_attr->dest_qp_num = 1;
3419 qp_attr->qp_access_flags = 6;
3420
3421 qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
3422 QP1C_BYTES_20_PKEY_IDX_M,
3423 QP1C_BYTES_20_PKEY_IDX_S);
3424 qp_attr->port_num = hr_qp->port + 1;
3425 qp_attr->sq_draining = 0;
3426 qp_attr->max_rd_atomic = 0;
3427 qp_attr->max_dest_rd_atomic = 0;
3428 qp_attr->min_rnr_timer = 0;
3429 qp_attr->timeout = 0;
3430 qp_attr->retry_cnt = 0;
3431 qp_attr->rnr_retry = 0;
3432 qp_attr->alt_timeout = 0;
3433
3434 done:
3435 qp_attr->cur_qp_state = qp_attr->qp_state;
3436 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3437 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3438 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3439 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3440 qp_attr->cap.max_inline_data = 0;
3441 qp_init_attr->cap = qp_attr->cap;
3442 qp_init_attr->create_flags = 0;
3443
3444 mutex_unlock(&hr_qp->mutex);
3445
3446 return 0;
3447 }
3448
3449 static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3450 int qp_attr_mask,
3451 struct ib_qp_init_attr *qp_init_attr)
3452 {
3453 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3454 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3455 struct device *dev = &hr_dev->pdev->dev;
3456 struct hns_roce_qp_context *context;
3457 int tmp_qp_state = 0;
3458 int ret = 0;
3459 int state;
3460
3461 context = kzalloc(sizeof(*context), GFP_KERNEL);
3462 if (!context)
3463 return -ENOMEM;
3464
3465 memset(qp_attr, 0, sizeof(*qp_attr));
3466 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3467
3468 mutex_lock(&hr_qp->mutex);
3469
3470 if (hr_qp->state == IB_QPS_RESET) {
3471 qp_attr->qp_state = IB_QPS_RESET;
3472 goto done;
3473 }
3474
3475 ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
3476 if (ret) {
3477 dev_err(dev, "query qpc error\n");
3478 ret = -EINVAL;
3479 goto out;
3480 }
3481
3482 state = roce_get_field(context->qpc_bytes_144,
3483 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3484 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
3485 tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
3486 if (tmp_qp_state == -1) {
3487 dev_err(dev, "to_ib_qp_state error\n");
3488 ret = -EINVAL;
3489 goto out;
3490 }
3491 hr_qp->state = (u8)tmp_qp_state;
3492 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3493 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
3494 QP_CONTEXT_QPC_BYTES_48_MTU_M,
3495 QP_CONTEXT_QPC_BYTES_48_MTU_S);
3496 qp_attr->path_mig_state = IB_MIG_ARMED;
3497 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
3498 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3499 qp_attr->qkey = QKEY_VAL;
3500
3501 qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
3502 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3503 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
3504 qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
3505 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3506 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
3507 qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
3508 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
3509 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
3510 qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
3511 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
3512 ((roce_get_bit(context->qpc_bytes_4,
3513 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
3514 ((roce_get_bit(context->qpc_bytes_4,
3515 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
3516
3517 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3518 hr_qp->ibqp.qp_type == IB_QPT_UC) {
3519 struct ib_global_route *grh =
3520 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3521
3522 rdma_ah_set_sl(&qp_attr->ah_attr,
3523 roce_get_field(context->qpc_bytes_156,
3524 QP_CONTEXT_QPC_BYTES_156_SL_M,
3525 QP_CONTEXT_QPC_BYTES_156_SL_S));
3526 rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
3527 grh->flow_label =
3528 roce_get_field(context->qpc_bytes_48,
3529 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
3530 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
3531 grh->sgid_index =
3532 roce_get_field(context->qpc_bytes_36,
3533 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
3534 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
3535 grh->hop_limit =
3536 roce_get_field(context->qpc_bytes_44,
3537 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
3538 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
3539 grh->traffic_class =
3540 roce_get_field(context->qpc_bytes_48,
3541 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
3542 QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
3543
3544 memcpy(grh->dgid.raw, context->dgid,
3545 sizeof(grh->dgid.raw));
3546 }
3547
3548 qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
3549 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
3550 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
3551 qp_attr->port_num = hr_qp->port + 1;
3552 qp_attr->sq_draining = 0;
3553 qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
3554 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3555 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
3556 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
3557 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
3558 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
3559 qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
3560 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
3561 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
3562 qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
3563 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3564 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
3565 qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
3566 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3567 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
3568 qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry);
3569
3570 done:
3571 qp_attr->cur_qp_state = qp_attr->qp_state;
3572 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3573 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3574
3575 if (!ibqp->uobject) {
3576 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3577 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3578 } else {
3579 qp_attr->cap.max_send_wr = 0;
3580 qp_attr->cap.max_send_sge = 0;
3581 }
3582
3583 qp_init_attr->cap = qp_attr->cap;
3584
3585 out:
3586 mutex_unlock(&hr_qp->mutex);
3587 kfree(context);
3588 return ret;
3589 }
3590
3591 static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3592 int qp_attr_mask,
3593 struct ib_qp_init_attr *qp_init_attr)
3594 {
3595 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3596
3597 return hr_qp->doorbell_qpn <= 1 ?
3598 hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
3599 hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
3600 }
3601
3602 int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
3603 {
3604 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3605 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3606 struct hns_roce_cq *send_cq, *recv_cq;
3607 int ret;
3608
3609 ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET);
3610 if (ret)
3611 return ret;
3612
3613 send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
3614 recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
3615
3616 hns_roce_lock_cqs(send_cq, recv_cq);
3617 if (!udata) {
3618 __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
3619 to_hr_srq(hr_qp->ibqp.srq) : NULL);
3620 if (send_cq != recv_cq)
3621 __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
3622 }
3623 hns_roce_unlock_cqs(send_cq, recv_cq);
3624
3625 hns_roce_qp_remove(hr_dev, hr_qp);
3626 hns_roce_qp_free(hr_dev, hr_qp);
3627
3628
3629 if (hr_qp->ibqp.qp_type == IB_QPT_RC)
3630 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
3631
3632 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
3633
3634 ib_umem_release(hr_qp->umem);
3635 if (!udata) {
3636 kfree(hr_qp->sq.wrid);
3637 kfree(hr_qp->rq.wrid);
3638
3639 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
3640 }
3641
3642 if (hr_qp->ibqp.qp_type == IB_QPT_RC)
3643 kfree(hr_qp);
3644 else
3645 kfree(hr_to_hr_sqp(hr_qp));
3646 return 0;
3647 }
3648
3649 static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
3650 {
3651 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3652 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3653 struct device *dev = &hr_dev->pdev->dev;
3654 u32 cqe_cnt_ori;
3655 u32 cqe_cnt_cur;
3656 u32 cq_buf_size;
3657 int wait_time = 0;
3658
3659 hns_roce_free_cq(hr_dev, hr_cq);
3660
3661
3662
3663
3664
3665 cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3666 while (1) {
3667 if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
3668 HNS_ROCE_CQE_WCMD_EMPTY_BIT)
3669 break;
3670
3671 cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3672 if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
3673 break;
3674
3675 msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
3676 if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
3677 dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
3678 hr_cq->cqn);
3679 break;
3680 }
3681 wait_time++;
3682 }
3683
3684 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
3685
3686 ib_umem_release(hr_cq->umem);
3687 if (!udata) {
3688
3689 cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
3690 hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
3691 }
3692 }
3693
3694 static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
3695 {
3696 roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
3697 (req_not << eq->log_entries), eq->doorbell);
3698 }
3699
3700 static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
3701 struct hns_roce_aeqe *aeqe, int qpn)
3702 {
3703 struct device *dev = &hr_dev->pdev->dev;
3704
3705 dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
3706 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3707 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3708 case HNS_ROCE_LWQCE_QPC_ERROR:
3709 dev_warn(dev, "QP %d, QPC error.\n", qpn);
3710 break;
3711 case HNS_ROCE_LWQCE_MTU_ERROR:
3712 dev_warn(dev, "QP %d, MTU error.\n", qpn);
3713 break;
3714 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
3715 dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
3716 break;
3717 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
3718 dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
3719 break;
3720 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
3721 dev_warn(dev, "QP %d, WQE shift error\n", qpn);
3722 break;
3723 case HNS_ROCE_LWQCE_SL_ERROR:
3724 dev_warn(dev, "QP %d, SL error.\n", qpn);
3725 break;
3726 case HNS_ROCE_LWQCE_PORT_ERROR:
3727 dev_warn(dev, "QP %d, port error.\n", qpn);
3728 break;
3729 default:
3730 break;
3731 }
3732 }
3733
3734 static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
3735 struct hns_roce_aeqe *aeqe,
3736 int qpn)
3737 {
3738 struct device *dev = &hr_dev->pdev->dev;
3739
3740 dev_warn(dev, "Local Access Violation Work Queue Error.\n");
3741 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3742 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3743 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
3744 dev_warn(dev, "QP %d, R_key violation.\n", qpn);
3745 break;
3746 case HNS_ROCE_LAVWQE_LENGTH_ERROR:
3747 dev_warn(dev, "QP %d, length error.\n", qpn);
3748 break;
3749 case HNS_ROCE_LAVWQE_VA_ERROR:
3750 dev_warn(dev, "QP %d, VA error.\n", qpn);
3751 break;
3752 case HNS_ROCE_LAVWQE_PD_ERROR:
3753 dev_err(dev, "QP %d, PD error.\n", qpn);
3754 break;
3755 case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
3756 dev_warn(dev, "QP %d, rw acc error.\n", qpn);
3757 break;
3758 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
3759 dev_warn(dev, "QP %d, key state error.\n", qpn);
3760 break;
3761 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
3762 dev_warn(dev, "QP %d, MR operation error.\n", qpn);
3763 break;
3764 default:
3765 break;
3766 }
3767 }
3768
3769 static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
3770 struct hns_roce_aeqe *aeqe,
3771 int event_type)
3772 {
3773 struct device *dev = &hr_dev->pdev->dev;
3774 int phy_port;
3775 int qpn;
3776
3777 qpn = roce_get_field(aeqe->event.qp_event.qp,
3778 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
3779 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
3780 phy_port = roce_get_field(aeqe->event.qp_event.qp,
3781 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
3782 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
3783 if (qpn <= 1)
3784 qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
3785
3786 switch (event_type) {
3787 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3788 dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
3789 "QP %d, phy_port %d.\n", qpn, phy_port);
3790 break;
3791 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3792 hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
3793 break;
3794 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3795 hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
3796 break;
3797 default:
3798 break;
3799 }
3800
3801 hns_roce_qp_event(hr_dev, qpn, event_type);
3802 }
3803
3804 static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
3805 struct hns_roce_aeqe *aeqe,
3806 int event_type)
3807 {
3808 struct device *dev = &hr_dev->pdev->dev;
3809 u32 cqn;
3810
3811 cqn = roce_get_field(aeqe->event.cq_event.cq,
3812 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
3813 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
3814
3815 switch (event_type) {
3816 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3817 dev_warn(dev, "CQ 0x%x access err.\n", cqn);
3818 break;
3819 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3820 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
3821 break;
3822 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3823 dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
3824 break;
3825 default:
3826 break;
3827 }
3828
3829 hns_roce_cq_event(hr_dev, cqn, event_type);
3830 }
3831
3832 static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
3833 struct hns_roce_aeqe *aeqe)
3834 {
3835 struct device *dev = &hr_dev->pdev->dev;
3836
3837 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3838 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3839 case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
3840 dev_warn(dev, "SDB overflow.\n");
3841 break;
3842 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
3843 dev_warn(dev, "SDB almost overflow.\n");
3844 break;
3845 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
3846 dev_warn(dev, "SDB almost empty.\n");
3847 break;
3848 case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
3849 dev_warn(dev, "ODB overflow.\n");
3850 break;
3851 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
3852 dev_warn(dev, "ODB almost overflow.\n");
3853 break;
3854 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
3855 dev_warn(dev, "SDB almost empty.\n");
3856 break;
3857 default:
3858 break;
3859 }
3860 }
3861
3862 static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
3863 {
3864 unsigned long off = (entry & (eq->entries - 1)) *
3865 HNS_ROCE_AEQ_ENTRY_SIZE;
3866
3867 return (struct hns_roce_aeqe *)((u8 *)
3868 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3869 off % HNS_ROCE_BA_SIZE);
3870 }
3871
3872 static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
3873 {
3874 struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
3875
3876 return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
3877 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
3878 }
3879
3880 static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
3881 struct hns_roce_eq *eq)
3882 {
3883 struct device *dev = &hr_dev->pdev->dev;
3884 struct hns_roce_aeqe *aeqe;
3885 int aeqes_found = 0;
3886 int event_type;
3887
3888 while ((aeqe = next_aeqe_sw_v1(eq))) {
3889
3890
3891
3892
3893 dma_rmb();
3894
3895 dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n",
3896 aeqe,
3897 roce_get_field(aeqe->asyn,
3898 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3899 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
3900 event_type = roce_get_field(aeqe->asyn,
3901 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3902 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
3903 switch (event_type) {
3904 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
3905 dev_warn(dev, "PATH MIG not supported\n");
3906 break;
3907 case HNS_ROCE_EVENT_TYPE_COMM_EST:
3908 dev_warn(dev, "COMMUNICATION established\n");
3909 break;
3910 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
3911 dev_warn(dev, "SQ DRAINED not supported\n");
3912 break;
3913 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
3914 dev_warn(dev, "PATH MIG failed\n");
3915 break;
3916 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3917 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3918 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3919 hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
3920 break;
3921 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
3922 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
3923 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
3924 dev_warn(dev, "SRQ not support!\n");
3925 break;
3926 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3927 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3928 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3929 hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
3930 break;
3931 case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
3932 dev_warn(dev, "port change.\n");
3933 break;
3934 case HNS_ROCE_EVENT_TYPE_MB:
3935 hns_roce_cmd_event(hr_dev,
3936 le16_to_cpu(aeqe->event.cmd.token),
3937 aeqe->event.cmd.status,
3938 le64_to_cpu(aeqe->event.cmd.out_param
3939 ));
3940 break;
3941 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
3942 hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
3943 break;
3944 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
3945 dev_warn(dev, "CEQ 0x%lx overflow.\n",
3946 roce_get_field(aeqe->event.ce_event.ceqe,
3947 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
3948 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
3949 break;
3950 default:
3951 dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
3952 event_type, eq->eqn, eq->cons_index);
3953 break;
3954 }
3955
3956 eq->cons_index++;
3957 aeqes_found = 1;
3958
3959 if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
3960 dev_warn(dev, "cons_index overflow, set back to 0.\n");
3961 eq->cons_index = 0;
3962 }
3963 }
3964
3965 set_eq_cons_index_v1(eq, 0);
3966
3967 return aeqes_found;
3968 }
3969
3970 static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
3971 {
3972 unsigned long off = (entry & (eq->entries - 1)) *
3973 HNS_ROCE_CEQ_ENTRY_SIZE;
3974
3975 return (struct hns_roce_ceqe *)((u8 *)
3976 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3977 off % HNS_ROCE_BA_SIZE);
3978 }
3979
3980 static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
3981 {
3982 struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
3983
3984 return (!!(roce_get_bit(ceqe->comp,
3985 HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
3986 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
3987 }
3988
3989 static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
3990 struct hns_roce_eq *eq)
3991 {
3992 struct hns_roce_ceqe *ceqe;
3993 int ceqes_found = 0;
3994 u32 cqn;
3995
3996 while ((ceqe = next_ceqe_sw_v1(eq))) {
3997
3998
3999
4000
4001 dma_rmb();
4002
4003 cqn = roce_get_field(ceqe->comp,
4004 HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
4005 HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
4006 hns_roce_cq_completion(hr_dev, cqn);
4007
4008 ++eq->cons_index;
4009 ceqes_found = 1;
4010
4011 if (eq->cons_index >
4012 EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) {
4013 dev_warn(&eq->hr_dev->pdev->dev,
4014 "cons_index overflow, set back to 0.\n");
4015 eq->cons_index = 0;
4016 }
4017 }
4018
4019 set_eq_cons_index_v1(eq, 0);
4020
4021 return ceqes_found;
4022 }
4023
4024 static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
4025 {
4026 struct hns_roce_eq *eq = eq_ptr;
4027 struct hns_roce_dev *hr_dev = eq->hr_dev;
4028 int int_work = 0;
4029
4030 if (eq->type_flag == HNS_ROCE_CEQ)
4031
4032 int_work = hns_roce_v1_ceq_int(hr_dev, eq);
4033 else
4034
4035 int_work = hns_roce_v1_aeq_int(hr_dev, eq);
4036
4037 return IRQ_RETVAL(int_work);
4038 }
4039
4040 static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
4041 {
4042 struct hns_roce_dev *hr_dev = dev_id;
4043 struct device *dev = &hr_dev->pdev->dev;
4044 int int_work = 0;
4045 u32 caepaemask_val;
4046 u32 cealmovf_val;
4047 u32 caepaest_val;
4048 u32 aeshift_val;
4049 u32 ceshift_val;
4050 u32 cemask_val;
4051 __le32 tmp;
4052 int i;
4053
4054
4055
4056
4057
4058
4059 aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
4060 tmp = cpu_to_le32(aeshift_val);
4061
4062
4063 if (roce_get_bit(tmp,
4064 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
4065 dev_warn(dev, "AEQ overflow!\n");
4066
4067
4068 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4069 tmp = cpu_to_le32(caepaemask_val);
4070 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4071 HNS_ROCE_INT_MASK_ENABLE);
4072 caepaemask_val = le32_to_cpu(tmp);
4073 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4074
4075
4076 caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
4077 tmp = cpu_to_le32(caepaest_val);
4078 roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
4079 caepaest_val = le32_to_cpu(tmp);
4080 roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
4081
4082
4083 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4084 tmp = cpu_to_le32(caepaemask_val);
4085 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4086 HNS_ROCE_INT_MASK_DISABLE);
4087 caepaemask_val = le32_to_cpu(tmp);
4088 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4089 }
4090
4091
4092 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4093 ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
4094 i * CEQ_REG_OFFSET);
4095 tmp = cpu_to_le32(ceshift_val);
4096
4097 if (roce_get_bit(tmp,
4098 ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
4099 dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
4100 int_work++;
4101
4102
4103 cemask_val = roce_read(hr_dev,
4104 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4105 i * CEQ_REG_OFFSET);
4106 tmp = cpu_to_le32(cemask_val);
4107 roce_set_bit(tmp,
4108 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4109 HNS_ROCE_INT_MASK_ENABLE);
4110 cemask_val = le32_to_cpu(tmp);
4111 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4112 i * CEQ_REG_OFFSET, cemask_val);
4113
4114
4115 cealmovf_val = roce_read(hr_dev,
4116 ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4117 i * CEQ_REG_OFFSET);
4118 tmp = cpu_to_le32(cealmovf_val);
4119 roce_set_bit(tmp,
4120 ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
4121 1);
4122 cealmovf_val = le32_to_cpu(tmp);
4123 roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4124 i * CEQ_REG_OFFSET, cealmovf_val);
4125
4126
4127 cemask_val = roce_read(hr_dev,
4128 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4129 i * CEQ_REG_OFFSET);
4130 tmp = cpu_to_le32(cemask_val);
4131 roce_set_bit(tmp,
4132 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4133 HNS_ROCE_INT_MASK_DISABLE);
4134 cemask_val = le32_to_cpu(tmp);
4135 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4136 i * CEQ_REG_OFFSET, cemask_val);
4137 }
4138 }
4139
4140
4141 dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
4142 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
4143 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
4144 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
4145
4146 dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
4147 roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
4148 roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
4149 roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
4150
4151 return IRQ_RETVAL(int_work);
4152 }
4153
4154 static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
4155 {
4156 u32 aemask_val;
4157 int masken = 0;
4158 __le32 tmp;
4159 int i;
4160
4161
4162 aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4163 tmp = cpu_to_le32(aemask_val);
4164 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4165 masken);
4166 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
4167 aemask_val = le32_to_cpu(tmp);
4168 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
4169
4170
4171 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4172
4173 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4174 i * CEQ_REG_OFFSET, masken);
4175 }
4176 }
4177
4178 static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
4179 struct hns_roce_eq *eq)
4180 {
4181 int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
4182 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4183 int i;
4184
4185 if (!eq->buf_list)
4186 return;
4187
4188 for (i = 0; i < npages; ++i)
4189 dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
4190 eq->buf_list[i].buf, eq->buf_list[i].map);
4191
4192 kfree(eq->buf_list);
4193 }
4194
4195 static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
4196 int enable_flag)
4197 {
4198 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
4199 __le32 tmp;
4200 u32 val;
4201
4202 val = readl(eqc);
4203 tmp = cpu_to_le32(val);
4204
4205 if (enable_flag)
4206 roce_set_field(tmp,
4207 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4208 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4209 HNS_ROCE_EQ_STAT_VALID);
4210 else
4211 roce_set_field(tmp,
4212 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4213 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4214 HNS_ROCE_EQ_STAT_INVALID);
4215
4216 val = le32_to_cpu(tmp);
4217 writel(val, eqc);
4218 }
4219
4220 static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
4221 struct hns_roce_eq *eq)
4222 {
4223 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
4224 struct device *dev = &hr_dev->pdev->dev;
4225 dma_addr_t tmp_dma_addr;
4226 u32 eqconsindx_val = 0;
4227 u32 eqcuridx_val = 0;
4228 u32 eqshift_val = 0;
4229 __le32 tmp2 = 0;
4230 __le32 tmp1 = 0;
4231 __le32 tmp = 0;
4232 int num_bas;
4233 int ret;
4234 int i;
4235
4236 num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
4237 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4238
4239 if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
4240 dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
4241 (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
4242 num_bas);
4243 return -EINVAL;
4244 }
4245
4246 eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
4247 if (!eq->buf_list)
4248 return -ENOMEM;
4249
4250 for (i = 0; i < num_bas; ++i) {
4251 eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
4252 &tmp_dma_addr,
4253 GFP_KERNEL);
4254 if (!eq->buf_list[i].buf) {
4255 ret = -ENOMEM;
4256 goto err_out_free_pages;
4257 }
4258
4259 eq->buf_list[i].map = tmp_dma_addr;
4260 }
4261 eq->cons_index = 0;
4262 roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4263 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4264 HNS_ROCE_EQ_STAT_INVALID);
4265 roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
4266 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
4267 eq->log_entries);
4268 eqshift_val = le32_to_cpu(tmp);
4269 writel(eqshift_val, eqc);
4270
4271
4272 writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
4273
4274
4275
4276
4277
4278
4279
4280 roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
4281 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
4282 eq->buf_list[0].map >> 44);
4283 roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
4284 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
4285 eqcuridx_val = le32_to_cpu(tmp1);
4286 writel(eqcuridx_val, eqc + 8);
4287
4288
4289 roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
4290 ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
4291 eqconsindx_val = le32_to_cpu(tmp2);
4292 writel(eqconsindx_val, eqc + 0xc);
4293
4294 return 0;
4295
4296 err_out_free_pages:
4297 for (i -= 1; i >= 0; i--)
4298 dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
4299 eq->buf_list[i].map);
4300
4301 kfree(eq->buf_list);
4302 return ret;
4303 }
4304
4305 static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
4306 {
4307 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4308 struct device *dev = &hr_dev->pdev->dev;
4309 struct hns_roce_eq *eq;
4310 int irq_num;
4311 int eq_num;
4312 int ret;
4313 int i, j;
4314
4315 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4316 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4317
4318 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
4319 if (!eq_table->eq)
4320 return -ENOMEM;
4321
4322 eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
4323 GFP_KERNEL);
4324 if (!eq_table->eqc_base) {
4325 ret = -ENOMEM;
4326 goto err_eqc_base_alloc_fail;
4327 }
4328
4329 for (i = 0; i < eq_num; i++) {
4330 eq = &eq_table->eq[i];
4331 eq->hr_dev = hr_dev;
4332 eq->eqn = i;
4333 eq->irq = hr_dev->irq[i];
4334 eq->log_page_size = PAGE_SHIFT;
4335
4336 if (i < hr_dev->caps.num_comp_vectors) {
4337
4338 eq_table->eqc_base[i] = hr_dev->reg_base +
4339 ROCEE_CAEP_CEQC_SHIFT_0_REG +
4340 CEQ_REG_OFFSET * i;
4341 eq->type_flag = HNS_ROCE_CEQ;
4342 eq->doorbell = hr_dev->reg_base +
4343 ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
4344 CEQ_REG_OFFSET * i;
4345 eq->entries = hr_dev->caps.ceqe_depth;
4346 eq->log_entries = ilog2(eq->entries);
4347 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
4348 } else {
4349
4350 eq_table->eqc_base[i] = hr_dev->reg_base +
4351 ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
4352 eq->type_flag = HNS_ROCE_AEQ;
4353 eq->doorbell = hr_dev->reg_base +
4354 ROCEE_CAEP_AEQE_CONS_IDX_REG;
4355 eq->entries = hr_dev->caps.aeqe_depth;
4356 eq->log_entries = ilog2(eq->entries);
4357 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
4358 }
4359 }
4360
4361
4362 hns_roce_v1_int_mask_enable(hr_dev);
4363
4364
4365 roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
4366 HNS_ROCE_CEQ_DEFAULT_INTERVAL);
4367
4368
4369 roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
4370 HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
4371
4372 for (i = 0; i < eq_num; i++) {
4373 ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
4374 if (ret) {
4375 dev_err(dev, "eq create failed\n");
4376 goto err_create_eq_fail;
4377 }
4378 }
4379
4380 for (j = 0; j < irq_num; j++) {
4381 if (j < eq_num)
4382 ret = request_irq(hr_dev->irq[j],
4383 hns_roce_v1_msix_interrupt_eq, 0,
4384 hr_dev->irq_names[j],
4385 &eq_table->eq[j]);
4386 else
4387 ret = request_irq(hr_dev->irq[j],
4388 hns_roce_v1_msix_interrupt_abn, 0,
4389 hr_dev->irq_names[j], hr_dev);
4390
4391 if (ret) {
4392 dev_err(dev, "request irq error!\n");
4393 goto err_request_irq_fail;
4394 }
4395 }
4396
4397 for (i = 0; i < eq_num; i++)
4398 hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
4399
4400 return 0;
4401
4402 err_request_irq_fail:
4403 for (j -= 1; j >= 0; j--)
4404 free_irq(hr_dev->irq[j], &eq_table->eq[j]);
4405
4406 err_create_eq_fail:
4407 for (i -= 1; i >= 0; i--)
4408 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4409
4410 kfree(eq_table->eqc_base);
4411
4412 err_eqc_base_alloc_fail:
4413 kfree(eq_table->eq);
4414
4415 return ret;
4416 }
4417
4418 static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
4419 {
4420 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4421 int irq_num;
4422 int eq_num;
4423 int i;
4424
4425 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4426 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4427 for (i = 0; i < eq_num; i++) {
4428
4429 hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
4430
4431 free_irq(hr_dev->irq[i], &eq_table->eq[i]);
4432
4433 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4434 }
4435 for (i = eq_num; i < irq_num; i++)
4436 free_irq(hr_dev->irq[i], hr_dev);
4437
4438 kfree(eq_table->eqc_base);
4439 kfree(eq_table->eq);
4440 }
4441
4442 static const struct ib_device_ops hns_roce_v1_dev_ops = {
4443 .destroy_qp = hns_roce_v1_destroy_qp,
4444 .modify_cq = hns_roce_v1_modify_cq,
4445 .poll_cq = hns_roce_v1_poll_cq,
4446 .post_recv = hns_roce_v1_post_recv,
4447 .post_send = hns_roce_v1_post_send,
4448 .query_qp = hns_roce_v1_query_qp,
4449 .req_notify_cq = hns_roce_v1_req_notify_cq,
4450 };
4451
4452 static const struct hns_roce_hw hns_roce_hw_v1 = {
4453 .reset = hns_roce_v1_reset,
4454 .hw_profile = hns_roce_v1_profile,
4455 .hw_init = hns_roce_v1_init,
4456 .hw_exit = hns_roce_v1_exit,
4457 .post_mbox = hns_roce_v1_post_mbox,
4458 .chk_mbox = hns_roce_v1_chk_mbox,
4459 .set_gid = hns_roce_v1_set_gid,
4460 .set_mac = hns_roce_v1_set_mac,
4461 .set_mtu = hns_roce_v1_set_mtu,
4462 .write_mtpt = hns_roce_v1_write_mtpt,
4463 .write_cqc = hns_roce_v1_write_cqc,
4464 .modify_cq = hns_roce_v1_modify_cq,
4465 .clear_hem = hns_roce_v1_clear_hem,
4466 .modify_qp = hns_roce_v1_modify_qp,
4467 .query_qp = hns_roce_v1_query_qp,
4468 .destroy_qp = hns_roce_v1_destroy_qp,
4469 .post_send = hns_roce_v1_post_send,
4470 .post_recv = hns_roce_v1_post_recv,
4471 .req_notify_cq = hns_roce_v1_req_notify_cq,
4472 .poll_cq = hns_roce_v1_poll_cq,
4473 .dereg_mr = hns_roce_v1_dereg_mr,
4474 .destroy_cq = hns_roce_v1_destroy_cq,
4475 .init_eq = hns_roce_v1_init_eq_table,
4476 .cleanup_eq = hns_roce_v1_cleanup_eq_table,
4477 .hns_roce_dev_ops = &hns_roce_v1_dev_ops,
4478 };
4479
4480 static const struct of_device_id hns_roce_of_match[] = {
4481 { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
4482 {},
4483 };
4484 MODULE_DEVICE_TABLE(of, hns_roce_of_match);
4485
4486 static const struct acpi_device_id hns_roce_acpi_match[] = {
4487 { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
4488 {},
4489 };
4490 MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
4491
4492 static struct
4493 platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
4494 {
4495 struct device *dev;
4496
4497
4498 dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
4499
4500 return dev ? to_platform_device(dev) : NULL;
4501 }
4502
4503 static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
4504 {
4505 struct device *dev = &hr_dev->pdev->dev;
4506 struct platform_device *pdev = NULL;
4507 struct net_device *netdev = NULL;
4508 struct device_node *net_node;
4509 int port_cnt = 0;
4510 u8 phy_port;
4511 int ret;
4512 int i;
4513
4514
4515 if (dev_of_node(dev)) {
4516 const struct of_device_id *of_id;
4517
4518 of_id = of_match_node(hns_roce_of_match, dev->of_node);
4519 if (!of_id) {
4520 dev_err(dev, "device is not compatible!\n");
4521 return -ENXIO;
4522 }
4523 hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
4524 if (!hr_dev->hw) {
4525 dev_err(dev, "couldn't get H/W specific DT data!\n");
4526 return -ENXIO;
4527 }
4528 } else if (is_acpi_device_node(dev->fwnode)) {
4529 const struct acpi_device_id *acpi_id;
4530
4531 acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
4532 if (!acpi_id) {
4533 dev_err(dev, "device is not compatible!\n");
4534 return -ENXIO;
4535 }
4536 hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
4537 if (!hr_dev->hw) {
4538 dev_err(dev, "couldn't get H/W specific ACPI data!\n");
4539 return -ENXIO;
4540 }
4541 } else {
4542 dev_err(dev, "can't read compatibility data from DT or ACPI\n");
4543 return -ENXIO;
4544 }
4545
4546
4547 hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0);
4548 if (IS_ERR(hr_dev->reg_base))
4549 return PTR_ERR(hr_dev->reg_base);
4550
4551
4552 ret = device_property_read_u8_array(dev, "node-guid",
4553 (u8 *)&hr_dev->ib_dev.node_guid,
4554 GUID_LEN);
4555 if (ret) {
4556 dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
4557 return ret;
4558 }
4559
4560
4561 for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
4562 if (dev_of_node(dev)) {
4563 net_node = of_parse_phandle(dev->of_node, "eth-handle",
4564 i);
4565 if (!net_node)
4566 continue;
4567 pdev = of_find_device_by_node(net_node);
4568 } else if (is_acpi_device_node(dev->fwnode)) {
4569 struct fwnode_reference_args args;
4570
4571 ret = acpi_node_get_property_reference(dev->fwnode,
4572 "eth-handle",
4573 i, &args);
4574 if (ret)
4575 continue;
4576 pdev = hns_roce_find_pdev(args.fwnode);
4577 } else {
4578 dev_err(dev, "cannot read data from DT or ACPI\n");
4579 return -ENXIO;
4580 }
4581
4582 if (pdev) {
4583 netdev = platform_get_drvdata(pdev);
4584 phy_port = (u8)i;
4585 if (netdev) {
4586 hr_dev->iboe.netdevs[port_cnt] = netdev;
4587 hr_dev->iboe.phy_port[port_cnt] = phy_port;
4588 } else {
4589 dev_err(dev, "no netdev found with pdev %s\n",
4590 pdev->name);
4591 return -ENODEV;
4592 }
4593 port_cnt++;
4594 }
4595 }
4596
4597 if (port_cnt == 0) {
4598 dev_err(dev, "unable to get eth-handle for available ports!\n");
4599 return -EINVAL;
4600 }
4601
4602 hr_dev->caps.num_ports = port_cnt;
4603
4604
4605 hr_dev->cmd_mod = 1;
4606 hr_dev->loop_idc = 0;
4607 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
4608 hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
4609
4610
4611 ret = device_property_read_string_array(dev, "interrupt-names",
4612 hr_dev->irq_names,
4613 HNS_ROCE_V1_MAX_IRQ_NUM);
4614 if (ret < 0) {
4615 dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
4616 return ret;
4617 }
4618
4619
4620 for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
4621 hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
4622 if (hr_dev->irq[i] <= 0)
4623 return -EINVAL;
4624 }
4625
4626 return 0;
4627 }
4628
4629
4630
4631
4632
4633
4634
4635 static int hns_roce_probe(struct platform_device *pdev)
4636 {
4637 int ret;
4638 struct hns_roce_dev *hr_dev;
4639 struct device *dev = &pdev->dev;
4640
4641 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
4642 if (!hr_dev)
4643 return -ENOMEM;
4644
4645 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
4646 if (!hr_dev->priv) {
4647 ret = -ENOMEM;
4648 goto error_failed_kzalloc;
4649 }
4650
4651 hr_dev->pdev = pdev;
4652 hr_dev->dev = dev;
4653 platform_set_drvdata(pdev, hr_dev);
4654
4655 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
4656 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
4657 dev_err(dev, "Not usable DMA addressing mode\n");
4658 ret = -EIO;
4659 goto error_failed_get_cfg;
4660 }
4661
4662 ret = hns_roce_get_cfg(hr_dev);
4663 if (ret) {
4664 dev_err(dev, "Get Configuration failed!\n");
4665 goto error_failed_get_cfg;
4666 }
4667
4668 ret = hns_roce_init(hr_dev);
4669 if (ret) {
4670 dev_err(dev, "RoCE engine init failed!\n");
4671 goto error_failed_get_cfg;
4672 }
4673
4674 return 0;
4675
4676 error_failed_get_cfg:
4677 kfree(hr_dev->priv);
4678
4679 error_failed_kzalloc:
4680 ib_dealloc_device(&hr_dev->ib_dev);
4681
4682 return ret;
4683 }
4684
4685
4686
4687
4688
4689 static int hns_roce_remove(struct platform_device *pdev)
4690 {
4691 struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
4692
4693 hns_roce_exit(hr_dev);
4694 kfree(hr_dev->priv);
4695 ib_dealloc_device(&hr_dev->ib_dev);
4696
4697 return 0;
4698 }
4699
4700 static struct platform_driver hns_roce_driver = {
4701 .probe = hns_roce_probe,
4702 .remove = hns_roce_remove,
4703 .driver = {
4704 .name = DRV_NAME,
4705 .of_match_table = hns_roce_of_match,
4706 .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
4707 },
4708 };
4709
4710 module_platform_driver(hns_roce_driver);
4711
4712 MODULE_LICENSE("Dual BSD/GPL");
4713 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
4714 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
4715 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
4716 MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");