This source file includes following definitions.
- set_data_seg_v2
- set_frmr_seg
- set_atomic_seg
- set_extend_sge
- set_rwqe_data_seg
- hns_roce_v2_post_send
- hns_roce_v2_post_recv
- hns_roce_v2_cmd_hw_reseted
- hns_roce_v2_cmd_hw_resetting
- hns_roce_v2_cmd_sw_resetting
- hns_roce_v2_rst_process_cmd
- hns_roce_cmq_space
- hns_roce_alloc_cmq_desc
- hns_roce_free_cmq_desc
- hns_roce_init_cmq_ring
- hns_roce_cmq_init_regs
- hns_roce_v2_cmq_init
- hns_roce_v2_cmq_exit
- hns_roce_cmq_setup_basic_desc
- hns_roce_cmq_csq_done
- hns_roce_cmq_csq_clean
- __hns_roce_cmq_send
- hns_roce_cmq_send
- hns_roce_cmq_query_hw_info
- hns_roce_func_clr_chk_rst
- hns_roce_func_clr_rst_prc
- hns_roce_function_clear
- hns_roce_query_fw_ver
- hns_roce_config_global_param
- hns_roce_query_pf_resource
- hns_roce_query_pf_timer_resource
- hns_roce_set_vf_switch_param
- hns_roce_alloc_vf_resource
- hns_roce_v2_set_bt
- hns_roce_v2_profile
- hns_roce_config_link_table
- hns_roce_init_link_table
- hns_roce_free_link_table
- hns_roce_v2_init
- hns_roce_v2_exit
- hns_roce_query_mbox_status
- hns_roce_v2_cmd_pending
- hns_roce_v2_cmd_complete
- hns_roce_mbox_post
- hns_roce_v2_post_mbox
- hns_roce_v2_chk_mbox
- hns_roce_config_sgid_table
- hns_roce_v2_set_gid
- hns_roce_v2_set_mac
- set_mtpt_pbl
- hns_roce_v2_write_mtpt
- hns_roce_v2_rereg_write_mtpt
- hns_roce_v2_frmr_write_mtpt
- hns_roce_v2_mw_write_mtpt
- get_cqe_v2
- get_sw_cqe_v2
- next_cqe_sw_v2
- get_srq_wqe
- hns_roce_free_srq_wqe
- hns_roce_v2_cq_set_ci
- __hns_roce_v2_cq_clean
- hns_roce_v2_cq_clean
- hns_roce_v2_write_cqc
- hns_roce_v2_req_notify_cq
- hns_roce_handle_recv_inl_wqe
- hns_roce_v2_poll_one
- hns_roce_v2_poll_cq
- get_op_for_set_hem
- hns_roce_v2_set_hem
- hns_roce_v2_clear_hem
- hns_roce_v2_qp_modify
- set_access_flags
- set_qpc_wqe_cnt
- modify_qp_reset_to_init
- modify_qp_init_to_init
- check_wqe_rq_mtt_count
- modify_qp_init_to_rtr
- modify_qp_rtr_to_rts
- hns_roce_v2_check_qp_stat
- hns_roce_v2_set_path
- hns_roce_v2_set_abs_fields
- hns_roce_v2_set_opt_fields
- hns_roce_v2_record_opt_fields
- hns_roce_v2_modify_qp
- to_ib_qp_st
- hns_roce_v2_query_qpc
- hns_roce_v2_query_qp
- hns_roce_v2_destroy_qp_common
- hns_roce_v2_destroy_qp
- hns_roce_v2_qp_flow_control_init
- hns_roce_v2_modify_cq
- hns_roce_set_qps_to_err
- hns_roce_irq_work_handle
- hns_roce_v2_init_irq_work
- set_eq_cons_index_v2
- get_aeqe_v2
- mhop_get_aeqe
- next_aeqe_sw_v2
- hns_roce_v2_aeq_int
- get_ceqe_v2
- mhop_get_ceqe
- next_ceqe_sw_v2
- hns_roce_v2_ceq_int
- hns_roce_v2_msix_interrupt_eq
- hns_roce_v2_msix_interrupt_abn
- hns_roce_v2_int_mask_enable
- hns_roce_v2_destroy_eqc
- hns_roce_mhop_free_eq
- hns_roce_v2_free_eq
- hns_roce_config_eqc
- hns_roce_mhop_alloc_eq
- hns_roce_v2_create_eq
- __hns_roce_request_irq
- __hns_roce_free_irq
- hns_roce_v2_init_eq_table
- hns_roce_v2_cleanup_eq_table
- hns_roce_v2_write_srqc
- hns_roce_v2_modify_srq
- hns_roce_v2_query_srq
- find_empty_entry
- fill_idx_queue
- hns_roce_v2_post_srq_recv
- hns_roce_hw_v2_get_cfg
- __hns_roce_hw_v2_init_instance
- __hns_roce_hw_v2_uninit_instance
- hns_roce_hw_v2_init_instance
- hns_roce_hw_v2_uninit_instance
- hns_roce_hw_v2_reset_notify_down
- hns_roce_hw_v2_reset_notify_init
- hns_roce_hw_v2_reset_notify_uninit
- hns_roce_hw_v2_reset_notify
- hns_roce_hw_v2_init
- hns_roce_hw_v2_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/kernel.h>
37 #include <linux/types.h>
38 #include <net/addrconf.h>
39 #include <rdma/ib_addr.h>
40 #include <rdma/ib_cache.h>
41 #include <rdma/ib_umem.h>
42 #include <rdma/uverbs_ioctl.h>
43
44 #include "hnae3.h"
45 #include "hns_roce_common.h"
46 #include "hns_roce_device.h"
47 #include "hns_roce_cmd.h"
48 #include "hns_roce_hem.h"
49 #include "hns_roce_hw_v2.h"
50
51 static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
52 struct ib_sge *sg)
53 {
54 dseg->lkey = cpu_to_le32(sg->lkey);
55 dseg->addr = cpu_to_le64(sg->addr);
56 dseg->len = cpu_to_le32(sg->length);
57 }
58
59 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
60 struct hns_roce_wqe_frmr_seg *fseg,
61 const struct ib_reg_wr *wr)
62 {
63 struct hns_roce_mr *mr = to_hr_mr(wr->mr);
64
65
66 roce_set_bit(rc_sq_wqe->byte_4,
67 V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
68 wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
69 roce_set_bit(rc_sq_wqe->byte_4,
70 V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
71 wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
72 roce_set_bit(rc_sq_wqe->byte_4,
73 V2_RC_FRMR_WQE_BYTE_4_RR_S,
74 wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
75 roce_set_bit(rc_sq_wqe->byte_4,
76 V2_RC_FRMR_WQE_BYTE_4_RW_S,
77 wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
78 roce_set_bit(rc_sq_wqe->byte_4,
79 V2_RC_FRMR_WQE_BYTE_4_LW_S,
80 wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
81
82
83 rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
84 rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
85
86 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
87 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
88 rc_sq_wqe->rkey = cpu_to_le32(wr->key);
89 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
90
91 fseg->pbl_size = cpu_to_le32(mr->pbl_size);
92 roce_set_field(fseg->mode_buf_pg_sz,
93 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
94 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
95 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
96 roce_set_bit(fseg->mode_buf_pg_sz,
97 V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
98 }
99
100 static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
101 const struct ib_atomic_wr *wr)
102 {
103 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
104 aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
105 aseg->cmp_data = cpu_to_le64(wr->compare_add);
106 } else {
107 aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
108 aseg->cmp_data = 0;
109 }
110 }
111
112 static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
113 unsigned int *sge_ind, int valid_num_sge)
114 {
115 struct hns_roce_v2_wqe_data_seg *dseg;
116 struct ib_sge *sg;
117 int num_in_wqe = 0;
118 int extend_sge_num;
119 int fi_sge_num;
120 int se_sge_num;
121 int shift;
122 int i;
123
124 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
125 num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
126 extend_sge_num = valid_num_sge - num_in_wqe;
127 sg = wr->sg_list + num_in_wqe;
128 shift = qp->hr_buf.page_shift;
129
130
131
132
133
134
135 dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
136 fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
137 (uintptr_t)dseg) /
138 sizeof(struct hns_roce_v2_wqe_data_seg);
139 if (extend_sge_num > fi_sge_num) {
140 se_sge_num = extend_sge_num - fi_sge_num;
141 for (i = 0; i < fi_sge_num; i++) {
142 set_data_seg_v2(dseg++, sg + i);
143 (*sge_ind)++;
144 }
145 dseg = get_send_extend_sge(qp,
146 (*sge_ind) & (qp->sge.sge_cnt - 1));
147 for (i = 0; i < se_sge_num; i++) {
148 set_data_seg_v2(dseg++, sg + fi_sge_num + i);
149 (*sge_ind)++;
150 }
151 } else {
152 for (i = 0; i < extend_sge_num; i++) {
153 set_data_seg_v2(dseg++, sg + i);
154 (*sge_ind)++;
155 }
156 }
157 }
158
159 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
160 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
161 void *wqe, unsigned int *sge_ind,
162 int valid_num_sge,
163 const struct ib_send_wr **bad_wr)
164 {
165 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
166 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
167 struct hns_roce_qp *qp = to_hr_qp(ibqp);
168 int j = 0;
169 int i;
170
171 if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) {
172 if (le32_to_cpu(rc_sq_wqe->msg_len) >
173 hr_dev->caps.max_sq_inline) {
174 *bad_wr = wr;
175 dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
176 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
177 return -EINVAL;
178 }
179
180 if (wr->opcode == IB_WR_RDMA_READ) {
181 *bad_wr = wr;
182 dev_err(hr_dev->dev, "Not support inline data!\n");
183 return -EINVAL;
184 }
185
186 for (i = 0; i < wr->num_sge; i++) {
187 memcpy(wqe, ((void *)wr->sg_list[i].addr),
188 wr->sg_list[i].length);
189 wqe += wr->sg_list[i].length;
190 }
191
192 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
193 1);
194 } else {
195 if (valid_num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
196 for (i = 0; i < wr->num_sge; i++) {
197 if (likely(wr->sg_list[i].length)) {
198 set_data_seg_v2(dseg, wr->sg_list + i);
199 dseg++;
200 }
201 }
202 } else {
203 roce_set_field(rc_sq_wqe->byte_20,
204 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
205 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
206 (*sge_ind) & (qp->sge.sge_cnt - 1));
207
208 for (i = 0; i < wr->num_sge &&
209 j < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
210 if (likely(wr->sg_list[i].length)) {
211 set_data_seg_v2(dseg, wr->sg_list + i);
212 dseg++;
213 j++;
214 }
215 }
216
217 set_extend_sge(qp, wr, sge_ind, valid_num_sge);
218 }
219
220 roce_set_field(rc_sq_wqe->byte_16,
221 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
222 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
223 }
224
225 return 0;
226 }
227
228 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
229 const struct ib_qp_attr *attr,
230 int attr_mask, enum ib_qp_state cur_state,
231 enum ib_qp_state new_state);
232
233 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
234 const struct ib_send_wr *wr,
235 const struct ib_send_wr **bad_wr)
236 {
237 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
238 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
239 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
240 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
241 struct hns_roce_qp *qp = to_hr_qp(ibqp);
242 struct hns_roce_wqe_frmr_seg *fseg;
243 struct device *dev = hr_dev->dev;
244 struct hns_roce_v2_db sq_db;
245 struct ib_qp_attr attr;
246 unsigned int owner_bit;
247 unsigned int sge_idx;
248 unsigned int wqe_idx;
249 unsigned long flags;
250 int valid_num_sge;
251 void *wqe = NULL;
252 bool loopback;
253 int attr_mask;
254 u32 tmp_len;
255 int ret = 0;
256 u32 hr_op;
257 u8 *smac;
258 int nreq;
259 int i;
260
261 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
262 ibqp->qp_type != IB_QPT_GSI &&
263 ibqp->qp_type != IB_QPT_UD)) {
264 dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
265 *bad_wr = wr;
266 return -EOPNOTSUPP;
267 }
268
269 if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
270 qp->state == IB_QPS_RTR)) {
271 dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
272 *bad_wr = wr;
273 return -EINVAL;
274 }
275
276 spin_lock_irqsave(&qp->sq.lock, flags);
277 sge_idx = qp->next_sge;
278
279 for (nreq = 0; wr; ++nreq, wr = wr->next) {
280 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
281 ret = -ENOMEM;
282 *bad_wr = wr;
283 goto out;
284 }
285
286 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
287
288 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
289 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
290 wr->num_sge, qp->sq.max_gs);
291 ret = -EINVAL;
292 *bad_wr = wr;
293 goto out;
294 }
295
296 wqe = get_send_wqe(qp, wqe_idx);
297 qp->sq.wrid[wqe_idx] = wr->wr_id;
298 owner_bit =
299 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
300 valid_num_sge = 0;
301 tmp_len = 0;
302
303 for (i = 0; i < wr->num_sge; i++) {
304 if (likely(wr->sg_list[i].length)) {
305 tmp_len += wr->sg_list[i].length;
306 valid_num_sge++;
307 }
308 }
309
310
311 if (ibqp->qp_type == IB_QPT_GSI) {
312 ud_sq_wqe = wqe;
313 memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
314
315 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
316 V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
317 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
318 V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
319 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
320 V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
321 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
322 V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
323 roce_set_field(ud_sq_wqe->byte_48,
324 V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
325 V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
326 ah->av.mac[4]);
327 roce_set_field(ud_sq_wqe->byte_48,
328 V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
329 V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
330 ah->av.mac[5]);
331
332
333 smac = (u8 *)hr_dev->dev_addr[qp->port];
334 loopback = ether_addr_equal_unaligned(ah->av.mac,
335 smac) ? 1 : 0;
336
337 roce_set_bit(ud_sq_wqe->byte_40,
338 V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
339
340 roce_set_field(ud_sq_wqe->byte_4,
341 V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
342 V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
343 HNS_ROCE_V2_WQE_OP_SEND);
344
345 ud_sq_wqe->msg_len =
346 cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
347
348 switch (wr->opcode) {
349 case IB_WR_SEND_WITH_IMM:
350 case IB_WR_RDMA_WRITE_WITH_IMM:
351 ud_sq_wqe->immtdata =
352 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
353 break;
354 default:
355 ud_sq_wqe->immtdata = 0;
356 break;
357 }
358
359
360 roce_set_bit(ud_sq_wqe->byte_4,
361 V2_UD_SEND_WQE_BYTE_4_CQE_S,
362 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
363
364
365 roce_set_bit(ud_sq_wqe->byte_4,
366 V2_UD_SEND_WQE_BYTE_4_SE_S,
367 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
368
369 roce_set_bit(ud_sq_wqe->byte_4,
370 V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
371
372 roce_set_field(ud_sq_wqe->byte_16,
373 V2_UD_SEND_WQE_BYTE_16_PD_M,
374 V2_UD_SEND_WQE_BYTE_16_PD_S,
375 to_hr_pd(ibqp->pd)->pdn);
376
377 roce_set_field(ud_sq_wqe->byte_16,
378 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
379 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
380 valid_num_sge);
381
382 roce_set_field(ud_sq_wqe->byte_20,
383 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
384 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
385 sge_idx & (qp->sge.sge_cnt - 1));
386
387 roce_set_field(ud_sq_wqe->byte_24,
388 V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
389 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
390 ud_sq_wqe->qkey =
391 cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
392 qp->qkey : ud_wr(wr)->remote_qkey);
393 roce_set_field(ud_sq_wqe->byte_32,
394 V2_UD_SEND_WQE_BYTE_32_DQPN_M,
395 V2_UD_SEND_WQE_BYTE_32_DQPN_S,
396 ud_wr(wr)->remote_qpn);
397
398 roce_set_field(ud_sq_wqe->byte_36,
399 V2_UD_SEND_WQE_BYTE_36_VLAN_M,
400 V2_UD_SEND_WQE_BYTE_36_VLAN_S,
401 ah->av.vlan);
402 roce_set_field(ud_sq_wqe->byte_36,
403 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
404 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
405 ah->av.hop_limit);
406 roce_set_field(ud_sq_wqe->byte_36,
407 V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
408 V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
409 ah->av.tclass);
410 roce_set_field(ud_sq_wqe->byte_40,
411 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
412 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
413 ah->av.flowlabel);
414 roce_set_field(ud_sq_wqe->byte_40,
415 V2_UD_SEND_WQE_BYTE_40_SL_M,
416 V2_UD_SEND_WQE_BYTE_40_SL_S,
417 ah->av.sl);
418 roce_set_field(ud_sq_wqe->byte_40,
419 V2_UD_SEND_WQE_BYTE_40_PORTN_M,
420 V2_UD_SEND_WQE_BYTE_40_PORTN_S,
421 qp->port);
422
423 roce_set_bit(ud_sq_wqe->byte_40,
424 V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
425 ah->av.vlan_en ? 1 : 0);
426 roce_set_field(ud_sq_wqe->byte_48,
427 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
428 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
429 hns_get_gid_index(hr_dev, qp->phy_port,
430 ah->av.gid_index));
431
432 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
433 GID_LEN_V2);
434
435 set_extend_sge(qp, wr, &sge_idx, valid_num_sge);
436 } else if (ibqp->qp_type == IB_QPT_RC) {
437 rc_sq_wqe = wqe;
438 memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
439
440 rc_sq_wqe->msg_len =
441 cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
442
443 switch (wr->opcode) {
444 case IB_WR_SEND_WITH_IMM:
445 case IB_WR_RDMA_WRITE_WITH_IMM:
446 rc_sq_wqe->immtdata =
447 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
448 break;
449 case IB_WR_SEND_WITH_INV:
450 rc_sq_wqe->inv_key =
451 cpu_to_le32(wr->ex.invalidate_rkey);
452 break;
453 default:
454 rc_sq_wqe->immtdata = 0;
455 break;
456 }
457
458 roce_set_bit(rc_sq_wqe->byte_4,
459 V2_RC_SEND_WQE_BYTE_4_FENCE_S,
460 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
461
462 roce_set_bit(rc_sq_wqe->byte_4,
463 V2_RC_SEND_WQE_BYTE_4_SE_S,
464 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
465
466 roce_set_bit(rc_sq_wqe->byte_4,
467 V2_RC_SEND_WQE_BYTE_4_CQE_S,
468 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
469
470 roce_set_bit(rc_sq_wqe->byte_4,
471 V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
472
473 wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
474 switch (wr->opcode) {
475 case IB_WR_RDMA_READ:
476 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
477 rc_sq_wqe->rkey =
478 cpu_to_le32(rdma_wr(wr)->rkey);
479 rc_sq_wqe->va =
480 cpu_to_le64(rdma_wr(wr)->remote_addr);
481 break;
482 case IB_WR_RDMA_WRITE:
483 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
484 rc_sq_wqe->rkey =
485 cpu_to_le32(rdma_wr(wr)->rkey);
486 rc_sq_wqe->va =
487 cpu_to_le64(rdma_wr(wr)->remote_addr);
488 break;
489 case IB_WR_RDMA_WRITE_WITH_IMM:
490 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
491 rc_sq_wqe->rkey =
492 cpu_to_le32(rdma_wr(wr)->rkey);
493 rc_sq_wqe->va =
494 cpu_to_le64(rdma_wr(wr)->remote_addr);
495 break;
496 case IB_WR_SEND:
497 hr_op = HNS_ROCE_V2_WQE_OP_SEND;
498 break;
499 case IB_WR_SEND_WITH_INV:
500 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
501 break;
502 case IB_WR_SEND_WITH_IMM:
503 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
504 break;
505 case IB_WR_LOCAL_INV:
506 hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
507 roce_set_bit(rc_sq_wqe->byte_4,
508 V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
509 rc_sq_wqe->inv_key =
510 cpu_to_le32(wr->ex.invalidate_rkey);
511 break;
512 case IB_WR_REG_MR:
513 hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
514 fseg = wqe;
515 set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
516 break;
517 case IB_WR_ATOMIC_CMP_AND_SWP:
518 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
519 rc_sq_wqe->rkey =
520 cpu_to_le32(atomic_wr(wr)->rkey);
521 rc_sq_wqe->va =
522 cpu_to_le64(atomic_wr(wr)->remote_addr);
523 break;
524 case IB_WR_ATOMIC_FETCH_AND_ADD:
525 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
526 rc_sq_wqe->rkey =
527 cpu_to_le32(atomic_wr(wr)->rkey);
528 rc_sq_wqe->va =
529 cpu_to_le64(atomic_wr(wr)->remote_addr);
530 break;
531 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
532 hr_op =
533 HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
534 break;
535 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
536 hr_op =
537 HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
538 break;
539 default:
540 hr_op = HNS_ROCE_V2_WQE_OP_MASK;
541 break;
542 }
543
544 roce_set_field(rc_sq_wqe->byte_4,
545 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
546 V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
547
548 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
549 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
550 struct hns_roce_v2_wqe_data_seg *dseg;
551
552 dseg = wqe;
553 set_data_seg_v2(dseg, wr->sg_list);
554 wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
555 set_atomic_seg(wqe, atomic_wr(wr));
556 roce_set_field(rc_sq_wqe->byte_16,
557 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
558 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
559 valid_num_sge);
560 } else if (wr->opcode != IB_WR_REG_MR) {
561 ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
562 wqe, &sge_idx,
563 valid_num_sge, bad_wr);
564 if (ret)
565 goto out;
566 }
567 } else {
568 dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
569 spin_unlock_irqrestore(&qp->sq.lock, flags);
570 *bad_wr = wr;
571 return -EOPNOTSUPP;
572 }
573 }
574
575 out:
576 if (likely(nreq)) {
577 qp->sq.head += nreq;
578
579 wmb();
580
581 sq_db.byte_4 = 0;
582 sq_db.parameter = 0;
583
584 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
585 V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
586 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
587 V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
588 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
589 V2_DB_PARAMETER_IDX_S,
590 qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
591 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
592 V2_DB_PARAMETER_SL_S, qp->sl);
593
594 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
595
596 qp->next_sge = sge_idx;
597
598 if (qp->state == IB_QPS_ERR) {
599 attr_mask = IB_QP_STATE;
600 attr.qp_state = IB_QPS_ERR;
601
602 ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
603 qp->state, IB_QPS_ERR);
604 if (ret) {
605 spin_unlock_irqrestore(&qp->sq.lock, flags);
606 *bad_wr = wr;
607 return ret;
608 }
609 }
610 }
611
612 spin_unlock_irqrestore(&qp->sq.lock, flags);
613
614 return ret;
615 }
616
617 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
618 const struct ib_recv_wr *wr,
619 const struct ib_recv_wr **bad_wr)
620 {
621 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
622 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
623 struct hns_roce_v2_wqe_data_seg *dseg;
624 struct hns_roce_rinl_sge *sge_list;
625 struct device *dev = hr_dev->dev;
626 struct ib_qp_attr attr;
627 unsigned long flags;
628 void *wqe = NULL;
629 int attr_mask;
630 u32 wqe_idx;
631 int ret = 0;
632 int nreq;
633 int i;
634
635 spin_lock_irqsave(&hr_qp->rq.lock, flags);
636
637 if (hr_qp->state == IB_QPS_RESET) {
638 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
639 *bad_wr = wr;
640 return -EINVAL;
641 }
642
643 for (nreq = 0; wr; ++nreq, wr = wr->next) {
644 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
645 hr_qp->ibqp.recv_cq)) {
646 ret = -ENOMEM;
647 *bad_wr = wr;
648 goto out;
649 }
650
651 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
652
653 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
654 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
655 wr->num_sge, hr_qp->rq.max_gs);
656 ret = -EINVAL;
657 *bad_wr = wr;
658 goto out;
659 }
660
661 wqe = get_recv_wqe(hr_qp, wqe_idx);
662 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
663 for (i = 0; i < wr->num_sge; i++) {
664 if (!wr->sg_list[i].length)
665 continue;
666 set_data_seg_v2(dseg, wr->sg_list + i);
667 dseg++;
668 }
669
670 if (i < hr_qp->rq.max_gs) {
671 dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
672 dseg->addr = 0;
673 }
674
675
676 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
677 sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
678 hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
679 (u32)wr->num_sge;
680 for (i = 0; i < wr->num_sge; i++) {
681 sge_list[i].addr =
682 (void *)(u64)wr->sg_list[i].addr;
683 sge_list[i].len = wr->sg_list[i].length;
684 }
685 }
686
687 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
688 }
689
690 out:
691 if (likely(nreq)) {
692 hr_qp->rq.head += nreq;
693
694 wmb();
695
696 *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
697
698 if (hr_qp->state == IB_QPS_ERR) {
699 attr_mask = IB_QP_STATE;
700 attr.qp_state = IB_QPS_ERR;
701
702 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
703 attr_mask, hr_qp->state,
704 IB_QPS_ERR);
705 if (ret) {
706 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
707 *bad_wr = wr;
708 return ret;
709 }
710 }
711 }
712 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
713
714 return ret;
715 }
716
717 static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
718 unsigned long instance_stage,
719 unsigned long reset_stage)
720 {
721
722
723
724
725
726
727
728
729
730 hr_dev->is_reset = true;
731 hr_dev->dis_db = true;
732
733 if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
734 instance_stage == HNS_ROCE_STATE_INIT)
735 return CMD_RST_PRC_EBUSY;
736
737 return CMD_RST_PRC_SUCCESS;
738 }
739
740 static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
741 unsigned long instance_stage,
742 unsigned long reset_stage)
743 {
744 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
745 struct hnae3_handle *handle = priv->handle;
746 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
747
748
749
750
751
752
753
754
755
756
757 hr_dev->dis_db = true;
758 if (!ops->get_hw_reset_stat(handle))
759 hr_dev->is_reset = true;
760
761 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
762 instance_stage == HNS_ROCE_STATE_INIT)
763 return CMD_RST_PRC_EBUSY;
764
765 return CMD_RST_PRC_SUCCESS;
766 }
767
768 static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
769 {
770 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
771 struct hnae3_handle *handle = priv->handle;
772 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
773
774
775
776
777
778 hr_dev->dis_db = true;
779 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
780 hr_dev->is_reset = true;
781
782 return CMD_RST_PRC_EBUSY;
783 }
784
785 static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
786 {
787 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
788 struct hnae3_handle *handle = priv->handle;
789 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
790 unsigned long instance_stage;
791 unsigned long reset_stage;
792 unsigned long reset_cnt;
793 bool sw_resetting;
794 bool hw_resetting;
795
796 if (hr_dev->is_reset)
797 return CMD_RST_PRC_SUCCESS;
798
799
800
801
802
803
804
805
806 instance_stage = handle->rinfo.instance_state;
807 reset_stage = handle->rinfo.reset_state;
808 reset_cnt = ops->ae_dev_reset_cnt(handle);
809 hw_resetting = ops->get_hw_reset_stat(handle);
810 sw_resetting = ops->ae_dev_resetting(handle);
811
812 if (reset_cnt != hr_dev->reset_cnt)
813 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
814 reset_stage);
815 else if (hw_resetting)
816 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
817 reset_stage);
818 else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
819 return hns_roce_v2_cmd_sw_resetting(hr_dev);
820
821 return 0;
822 }
823
824 static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
825 {
826 int ntu = ring->next_to_use;
827 int ntc = ring->next_to_clean;
828 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
829
830 return ring->desc_num - used - 1;
831 }
832
833 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
834 struct hns_roce_v2_cmq_ring *ring)
835 {
836 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
837
838 ring->desc = kzalloc(size, GFP_KERNEL);
839 if (!ring->desc)
840 return -ENOMEM;
841
842 ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
843 DMA_BIDIRECTIONAL);
844 if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
845 ring->desc_dma_addr = 0;
846 kfree(ring->desc);
847 ring->desc = NULL;
848 return -ENOMEM;
849 }
850
851 return 0;
852 }
853
854 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
855 struct hns_roce_v2_cmq_ring *ring)
856 {
857 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
858 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
859 DMA_BIDIRECTIONAL);
860
861 ring->desc_dma_addr = 0;
862 kfree(ring->desc);
863 }
864
865 static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
866 {
867 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
868 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
869 &priv->cmq.csq : &priv->cmq.crq;
870
871 ring->flag = ring_type;
872 ring->next_to_clean = 0;
873 ring->next_to_use = 0;
874
875 return hns_roce_alloc_cmq_desc(hr_dev, ring);
876 }
877
878 static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
879 {
880 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
881 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
882 &priv->cmq.csq : &priv->cmq.crq;
883 dma_addr_t dma = ring->desc_dma_addr;
884
885 if (ring_type == TYPE_CSQ) {
886 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
887 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
888 upper_32_bits(dma));
889 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
890 ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
891 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
892 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
893 } else {
894 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
895 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
896 upper_32_bits(dma));
897 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
898 ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
899 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
900 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
901 }
902 }
903
904 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
905 {
906 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
907 int ret;
908
909
910 priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
911 priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
912
913
914 spin_lock_init(&priv->cmq.csq.lock);
915 spin_lock_init(&priv->cmq.crq.lock);
916
917
918 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
919
920
921 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
922 if (ret) {
923 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
924 return ret;
925 }
926
927
928 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
929 if (ret) {
930 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
931 goto err_crq;
932 }
933
934
935 hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
936
937
938 hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
939
940 return 0;
941
942 err_crq:
943 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
944
945 return ret;
946 }
947
948 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
949 {
950 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
951
952 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
953 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
954 }
955
956 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
957 enum hns_roce_opcode_type opcode,
958 bool is_read)
959 {
960 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
961 desc->opcode = cpu_to_le16(opcode);
962 desc->flag =
963 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
964 if (is_read)
965 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
966 else
967 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
968 }
969
970 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
971 {
972 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
973 u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
974
975 return head == priv->cmq.csq.next_to_use;
976 }
977
978 static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
979 {
980 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
981 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
982 struct hns_roce_cmq_desc *desc;
983 u16 ntc = csq->next_to_clean;
984 u32 head;
985 int clean = 0;
986
987 desc = &csq->desc[ntc];
988 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
989 while (head != ntc) {
990 memset(desc, 0, sizeof(*desc));
991 ntc++;
992 if (ntc == csq->desc_num)
993 ntc = 0;
994 desc = &csq->desc[ntc];
995 clean++;
996 }
997 csq->next_to_clean = ntc;
998
999 return clean;
1000 }
1001
1002 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1003 struct hns_roce_cmq_desc *desc, int num)
1004 {
1005 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1006 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1007 struct hns_roce_cmq_desc *desc_to_use;
1008 bool complete = false;
1009 u32 timeout = 0;
1010 int handle = 0;
1011 u16 desc_ret;
1012 int ret = 0;
1013 int ntc;
1014
1015 spin_lock_bh(&csq->lock);
1016
1017 if (num > hns_roce_cmq_space(csq)) {
1018 spin_unlock_bh(&csq->lock);
1019 return -EBUSY;
1020 }
1021
1022
1023
1024
1025
1026 ntc = csq->next_to_use;
1027
1028 while (handle < num) {
1029 desc_to_use = &csq->desc[csq->next_to_use];
1030 *desc_to_use = desc[handle];
1031 dev_dbg(hr_dev->dev, "set cmq desc:\n");
1032 csq->next_to_use++;
1033 if (csq->next_to_use == csq->desc_num)
1034 csq->next_to_use = 0;
1035 handle++;
1036 }
1037
1038
1039 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
1040
1041
1042
1043
1044
1045 if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
1046 do {
1047 if (hns_roce_cmq_csq_done(hr_dev))
1048 break;
1049 udelay(1);
1050 timeout++;
1051 } while (timeout < priv->cmq.tx_timeout);
1052 }
1053
1054 if (hns_roce_cmq_csq_done(hr_dev)) {
1055 complete = true;
1056 handle = 0;
1057 while (handle < num) {
1058
1059 desc_to_use = &csq->desc[ntc];
1060 desc[handle] = *desc_to_use;
1061 dev_dbg(hr_dev->dev, "Get cmq desc:\n");
1062 desc_ret = le16_to_cpu(desc[handle].retval);
1063 if (desc_ret == CMD_EXEC_SUCCESS)
1064 ret = 0;
1065 else
1066 ret = -EIO;
1067 priv->cmq.last_status = desc_ret;
1068 ntc++;
1069 handle++;
1070 if (ntc == csq->desc_num)
1071 ntc = 0;
1072 }
1073 }
1074
1075 if (!complete)
1076 ret = -EAGAIN;
1077
1078
1079 handle = hns_roce_cmq_csq_clean(hr_dev);
1080 if (handle != num)
1081 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
1082 handle, num);
1083
1084 spin_unlock_bh(&csq->lock);
1085
1086 return ret;
1087 }
1088
1089 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1090 struct hns_roce_cmq_desc *desc, int num)
1091 {
1092 int retval;
1093 int ret;
1094
1095 ret = hns_roce_v2_rst_process_cmd(hr_dev);
1096 if (ret == CMD_RST_PRC_SUCCESS)
1097 return 0;
1098 if (ret == CMD_RST_PRC_EBUSY)
1099 return -EBUSY;
1100
1101 ret = __hns_roce_cmq_send(hr_dev, desc, num);
1102 if (ret) {
1103 retval = hns_roce_v2_rst_process_cmd(hr_dev);
1104 if (retval == CMD_RST_PRC_SUCCESS)
1105 return 0;
1106 else if (retval == CMD_RST_PRC_EBUSY)
1107 return -EBUSY;
1108 }
1109
1110 return ret;
1111 }
1112
1113 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1114 {
1115 struct hns_roce_query_version *resp;
1116 struct hns_roce_cmq_desc desc;
1117 int ret;
1118
1119 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1120 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1121 if (ret)
1122 return ret;
1123
1124 resp = (struct hns_roce_query_version *)desc.data;
1125 hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1126 hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1127
1128 return 0;
1129 }
1130
1131 static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
1132 {
1133 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1134 struct hnae3_handle *handle = priv->handle;
1135 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1136 unsigned long reset_cnt;
1137 bool sw_resetting;
1138 bool hw_resetting;
1139
1140 reset_cnt = ops->ae_dev_reset_cnt(handle);
1141 hw_resetting = ops->get_hw_reset_stat(handle);
1142 sw_resetting = ops->ae_dev_resetting(handle);
1143
1144 if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting)
1145 return true;
1146
1147 return false;
1148 }
1149
1150 static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
1151 int flag)
1152 {
1153 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1154 struct hnae3_handle *handle = priv->handle;
1155 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1156 unsigned long instance_stage;
1157 unsigned long reset_cnt;
1158 unsigned long end;
1159 bool sw_resetting;
1160 bool hw_resetting;
1161
1162 instance_stage = handle->rinfo.instance_state;
1163 reset_cnt = ops->ae_dev_reset_cnt(handle);
1164 hw_resetting = ops->get_hw_reset_stat(handle);
1165 sw_resetting = ops->ae_dev_resetting(handle);
1166
1167 if (reset_cnt != hr_dev->reset_cnt) {
1168 hr_dev->dis_db = true;
1169 hr_dev->is_reset = true;
1170 dev_info(hr_dev->dev, "Func clear success after reset.\n");
1171 } else if (hw_resetting) {
1172 hr_dev->dis_db = true;
1173
1174 dev_warn(hr_dev->dev,
1175 "Func clear is pending, device in resetting state.\n");
1176 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1177 while (end) {
1178 if (!ops->get_hw_reset_stat(handle)) {
1179 hr_dev->is_reset = true;
1180 dev_info(hr_dev->dev,
1181 "Func clear success after reset.\n");
1182 return;
1183 }
1184 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1185 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1186 }
1187
1188 dev_warn(hr_dev->dev, "Func clear failed.\n");
1189 } else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) {
1190 hr_dev->dis_db = true;
1191
1192 dev_warn(hr_dev->dev,
1193 "Func clear is pending, device in resetting state.\n");
1194 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1195 while (end) {
1196 if (ops->ae_dev_reset_cnt(handle) !=
1197 hr_dev->reset_cnt) {
1198 hr_dev->is_reset = true;
1199 dev_info(hr_dev->dev,
1200 "Func clear success after sw reset\n");
1201 return;
1202 }
1203 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1204 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1205 }
1206
1207 dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
1208 } else {
1209 if (retval && !flag)
1210 dev_warn(hr_dev->dev,
1211 "Func clear read failed, ret = %d.\n", retval);
1212
1213 dev_warn(hr_dev->dev, "Func clear failed.\n");
1214 }
1215 }
1216 static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1217 {
1218 bool fclr_write_fail_flag = false;
1219 struct hns_roce_func_clear *resp;
1220 struct hns_roce_cmq_desc desc;
1221 unsigned long end;
1222 int ret = 0;
1223
1224 if (hns_roce_func_clr_chk_rst(hr_dev))
1225 goto out;
1226
1227 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1228 resp = (struct hns_roce_func_clear *)desc.data;
1229
1230 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1231 if (ret) {
1232 fclr_write_fail_flag = true;
1233 dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1234 ret);
1235 goto out;
1236 }
1237
1238 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1239 end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1240 while (end) {
1241 if (hns_roce_func_clr_chk_rst(hr_dev))
1242 goto out;
1243 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1244 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1245
1246 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1247 true);
1248
1249 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1250 if (ret)
1251 continue;
1252
1253 if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
1254 hr_dev->is_reset = true;
1255 return;
1256 }
1257 }
1258
1259 out:
1260 dev_err(hr_dev->dev, "Func clear fail.\n");
1261 hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
1262 }
1263
1264 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1265 {
1266 struct hns_roce_query_fw_info *resp;
1267 struct hns_roce_cmq_desc desc;
1268 int ret;
1269
1270 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1271 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1272 if (ret)
1273 return ret;
1274
1275 resp = (struct hns_roce_query_fw_info *)desc.data;
1276 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1277
1278 return 0;
1279 }
1280
1281 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1282 {
1283 struct hns_roce_cfg_global_param *req;
1284 struct hns_roce_cmq_desc desc;
1285
1286 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1287 false);
1288
1289 req = (struct hns_roce_cfg_global_param *)desc.data;
1290 memset(req, 0, sizeof(*req));
1291 roce_set_field(req->time_cfg_udp_port,
1292 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
1293 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
1294 roce_set_field(req->time_cfg_udp_port,
1295 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
1296 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
1297
1298 return hns_roce_cmq_send(hr_dev, &desc, 1);
1299 }
1300
1301 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1302 {
1303 struct hns_roce_cmq_desc desc[2];
1304 struct hns_roce_pf_res_a *req_a;
1305 struct hns_roce_pf_res_b *req_b;
1306 int ret;
1307 int i;
1308
1309 for (i = 0; i < 2; i++) {
1310 hns_roce_cmq_setup_basic_desc(&desc[i],
1311 HNS_ROCE_OPC_QUERY_PF_RES, true);
1312
1313 if (i == 0)
1314 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1315 else
1316 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1317 }
1318
1319 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1320 if (ret)
1321 return ret;
1322
1323 req_a = (struct hns_roce_pf_res_a *)desc[0].data;
1324 req_b = (struct hns_roce_pf_res_b *)desc[1].data;
1325
1326 hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
1327 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
1328 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
1329 hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
1330 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
1331 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
1332 hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
1333 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
1334 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
1335 hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
1336 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
1337 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
1338
1339 hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
1340 PF_RES_DATA_3_PF_SL_NUM_M,
1341 PF_RES_DATA_3_PF_SL_NUM_S);
1342 hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
1343 PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
1344 PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
1345
1346 return 0;
1347 }
1348
1349 static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
1350 {
1351 struct hns_roce_pf_timer_res_a *req_a;
1352 struct hns_roce_cmq_desc desc[2];
1353 int ret, i;
1354
1355 for (i = 0; i < 2; i++) {
1356 hns_roce_cmq_setup_basic_desc(&desc[i],
1357 HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1358 true);
1359
1360 if (i == 0)
1361 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1362 else
1363 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1364 }
1365
1366 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1367 if (ret)
1368 return ret;
1369
1370 req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
1371
1372 hr_dev->caps.qpc_timer_bt_num =
1373 roce_get_field(req_a->qpc_timer_bt_idx_num,
1374 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
1375 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
1376 hr_dev->caps.cqc_timer_bt_num =
1377 roce_get_field(req_a->cqc_timer_bt_idx_num,
1378 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
1379 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
1380
1381 return 0;
1382 }
1383
1384 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1385 int vf_id)
1386 {
1387 struct hns_roce_cmq_desc desc;
1388 struct hns_roce_vf_switch *swt;
1389 int ret;
1390
1391 swt = (struct hns_roce_vf_switch *)desc.data;
1392 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1393 swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1394 roce_set_field(swt->fun_id,
1395 VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1396 VF_SWITCH_DATA_FUN_ID_VF_ID_S,
1397 vf_id);
1398 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1399 if (ret)
1400 return ret;
1401 desc.flag =
1402 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1403 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1404 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1405 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
1406 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1407
1408 return hns_roce_cmq_send(hr_dev, &desc, 1);
1409 }
1410
1411 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1412 {
1413 struct hns_roce_cmq_desc desc[2];
1414 struct hns_roce_vf_res_a *req_a;
1415 struct hns_roce_vf_res_b *req_b;
1416 int i;
1417
1418 req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1419 req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1420 memset(req_a, 0, sizeof(*req_a));
1421 memset(req_b, 0, sizeof(*req_b));
1422 for (i = 0; i < 2; i++) {
1423 hns_roce_cmq_setup_basic_desc(&desc[i],
1424 HNS_ROCE_OPC_ALLOC_VF_RES, false);
1425
1426 if (i == 0)
1427 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1428 else
1429 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1430
1431 if (i == 0) {
1432 roce_set_field(req_a->vf_qpc_bt_idx_num,
1433 VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1434 VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1435 roce_set_field(req_a->vf_qpc_bt_idx_num,
1436 VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1437 VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
1438 HNS_ROCE_VF_QPC_BT_NUM);
1439
1440 roce_set_field(req_a->vf_srqc_bt_idx_num,
1441 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1442 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1443 roce_set_field(req_a->vf_srqc_bt_idx_num,
1444 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1445 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1446 HNS_ROCE_VF_SRQC_BT_NUM);
1447
1448 roce_set_field(req_a->vf_cqc_bt_idx_num,
1449 VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1450 VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1451 roce_set_field(req_a->vf_cqc_bt_idx_num,
1452 VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1453 VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
1454 HNS_ROCE_VF_CQC_BT_NUM);
1455
1456 roce_set_field(req_a->vf_mpt_bt_idx_num,
1457 VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1458 VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1459 roce_set_field(req_a->vf_mpt_bt_idx_num,
1460 VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1461 VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
1462 HNS_ROCE_VF_MPT_BT_NUM);
1463
1464 roce_set_field(req_a->vf_eqc_bt_idx_num,
1465 VF_RES_A_DATA_5_VF_EQC_IDX_M,
1466 VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1467 roce_set_field(req_a->vf_eqc_bt_idx_num,
1468 VF_RES_A_DATA_5_VF_EQC_NUM_M,
1469 VF_RES_A_DATA_5_VF_EQC_NUM_S,
1470 HNS_ROCE_VF_EQC_NUM);
1471 } else {
1472 roce_set_field(req_b->vf_smac_idx_num,
1473 VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1474 VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1475 roce_set_field(req_b->vf_smac_idx_num,
1476 VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1477 VF_RES_B_DATA_1_VF_SMAC_NUM_S,
1478 HNS_ROCE_VF_SMAC_NUM);
1479
1480 roce_set_field(req_b->vf_sgid_idx_num,
1481 VF_RES_B_DATA_2_VF_SGID_IDX_M,
1482 VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1483 roce_set_field(req_b->vf_sgid_idx_num,
1484 VF_RES_B_DATA_2_VF_SGID_NUM_M,
1485 VF_RES_B_DATA_2_VF_SGID_NUM_S,
1486 HNS_ROCE_VF_SGID_NUM);
1487
1488 roce_set_field(req_b->vf_qid_idx_sl_num,
1489 VF_RES_B_DATA_3_VF_QID_IDX_M,
1490 VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1491 roce_set_field(req_b->vf_qid_idx_sl_num,
1492 VF_RES_B_DATA_3_VF_SL_NUM_M,
1493 VF_RES_B_DATA_3_VF_SL_NUM_S,
1494 HNS_ROCE_VF_SL_NUM);
1495
1496 roce_set_field(req_b->vf_sccc_idx_num,
1497 VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
1498 VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
1499 roce_set_field(req_b->vf_sccc_idx_num,
1500 VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
1501 VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
1502 HNS_ROCE_VF_SCCC_BT_NUM);
1503 }
1504 }
1505
1506 return hns_roce_cmq_send(hr_dev, desc, 2);
1507 }
1508
1509 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1510 {
1511 u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1512 u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1513 u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1514 u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1515 u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
1516 struct hns_roce_cfg_bt_attr *req;
1517 struct hns_roce_cmq_desc desc;
1518
1519 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1520 req = (struct hns_roce_cfg_bt_attr *)desc.data;
1521 memset(req, 0, sizeof(*req));
1522
1523 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1524 CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1525 hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1526 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1527 CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1528 hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1529 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1530 CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1531 qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1532
1533 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1534 CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1535 hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1536 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1537 CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1538 hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1539 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1540 CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1541 srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1542
1543 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1544 CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1545 hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1546 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1547 CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1548 hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1549 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1550 CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1551 cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1552
1553 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1554 CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1555 hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1556 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1557 CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1558 hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1559 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1560 CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1561 mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1562
1563 roce_set_field(req->vf_sccc_cfg,
1564 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
1565 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
1566 hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1567 roce_set_field(req->vf_sccc_cfg,
1568 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
1569 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
1570 hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1571 roce_set_field(req->vf_sccc_cfg,
1572 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
1573 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
1574 sccc_hop_num ==
1575 HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
1576
1577 return hns_roce_cmq_send(hr_dev, &desc, 1);
1578 }
1579
1580 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1581 {
1582 struct hns_roce_caps *caps = &hr_dev->caps;
1583 int ret;
1584
1585 ret = hns_roce_cmq_query_hw_info(hr_dev);
1586 if (ret) {
1587 dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
1588 ret);
1589 return ret;
1590 }
1591
1592 ret = hns_roce_query_fw_ver(hr_dev);
1593 if (ret) {
1594 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1595 ret);
1596 return ret;
1597 }
1598
1599 ret = hns_roce_config_global_param(hr_dev);
1600 if (ret) {
1601 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1602 ret);
1603 return ret;
1604 }
1605
1606
1607 ret = hns_roce_query_pf_resource(hr_dev);
1608 if (ret) {
1609 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1610 ret);
1611 return ret;
1612 }
1613
1614 if (hr_dev->pci_dev->revision == 0x21) {
1615 ret = hns_roce_query_pf_timer_resource(hr_dev);
1616 if (ret) {
1617 dev_err(hr_dev->dev,
1618 "Query pf timer resource fail, ret = %d.\n",
1619 ret);
1620 return ret;
1621 }
1622 }
1623
1624 ret = hns_roce_alloc_vf_resource(hr_dev);
1625 if (ret) {
1626 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
1627 ret);
1628 return ret;
1629 }
1630
1631 if (hr_dev->pci_dev->revision == 0x21) {
1632 ret = hns_roce_set_vf_switch_param(hr_dev, 0);
1633 if (ret) {
1634 dev_err(hr_dev->dev,
1635 "Set function switch param fail, ret = %d.\n",
1636 ret);
1637 return ret;
1638 }
1639 }
1640
1641 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
1642 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
1643
1644 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1645 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1646 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1647 caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
1648 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1649 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1650 caps->max_srqwqes = HNS_ROCE_V2_MAX_SRQWQE_NUM;
1651 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1652 caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1653 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1654 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
1655 caps->max_srq_sg = HNS_ROCE_V2_MAX_SRQ_SGE_NUM;
1656 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1657 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
1658 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1659 caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
1660 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1661 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1662 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1663 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
1664 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1665 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
1666 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1667 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1668 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1669 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1670 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1671 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1672 caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
1673 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1674 caps->trrl_entry_sz = HNS_ROCE_V2_TRRL_ENTRY_SZ;
1675 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1676 caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1677 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1678 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
1679 caps->idx_entry_sz = 4;
1680 caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
1681 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1682 caps->reserved_lkey = 0;
1683 caps->reserved_pds = 0;
1684 caps->reserved_mrws = 1;
1685 caps->reserved_uars = 0;
1686 caps->reserved_cqs = 0;
1687 caps->reserved_srqs = 0;
1688 caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
1689
1690 caps->qpc_ba_pg_sz = 0;
1691 caps->qpc_buf_pg_sz = 0;
1692 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1693 caps->srqc_ba_pg_sz = 0;
1694 caps->srqc_buf_pg_sz = 0;
1695 caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1696 caps->cqc_ba_pg_sz = 0;
1697 caps->cqc_buf_pg_sz = 0;
1698 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1699 caps->mpt_ba_pg_sz = 0;
1700 caps->mpt_buf_pg_sz = 0;
1701 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1702 caps->pbl_ba_pg_sz = 2;
1703 caps->pbl_buf_pg_sz = 0;
1704 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
1705 caps->mtt_ba_pg_sz = 0;
1706 caps->mtt_buf_pg_sz = 0;
1707 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
1708 caps->wqe_sq_hop_num = 2;
1709 caps->wqe_sge_hop_num = 1;
1710 caps->wqe_rq_hop_num = 2;
1711 caps->cqe_ba_pg_sz = 6;
1712 caps->cqe_buf_pg_sz = 0;
1713 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
1714 caps->srqwqe_ba_pg_sz = 0;
1715 caps->srqwqe_buf_pg_sz = 0;
1716 caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
1717 caps->idx_ba_pg_sz = 0;
1718 caps->idx_buf_pg_sz = 0;
1719 caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
1720 caps->eqe_ba_pg_sz = 0;
1721 caps->eqe_buf_pg_sz = 0;
1722 caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
1723 caps->tsq_buf_pg_sz = 0;
1724 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1725
1726 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
1727 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1728 HNS_ROCE_CAP_FLAG_RQ_INLINE |
1729 HNS_ROCE_CAP_FLAG_RECORD_DB |
1730 HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
1731
1732 if (hr_dev->pci_dev->revision == 0x21)
1733 caps->flags |= HNS_ROCE_CAP_FLAG_MW |
1734 HNS_ROCE_CAP_FLAG_FRMR;
1735
1736 caps->pkey_table_len[0] = 1;
1737 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
1738 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
1739 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
1740 caps->local_ca_ack_delay = 0;
1741 caps->max_mtu = IB_MTU_4096;
1742
1743 caps->max_srqs = HNS_ROCE_V2_MAX_SRQ;
1744 caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
1745 caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
1746
1747 if (hr_dev->pci_dev->revision == 0x21) {
1748 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
1749 HNS_ROCE_CAP_FLAG_SRQ |
1750 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
1751
1752 caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1753 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1754 caps->qpc_timer_ba_pg_sz = 0;
1755 caps->qpc_timer_buf_pg_sz = 0;
1756 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1757 caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1758 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1759 caps->cqc_timer_ba_pg_sz = 0;
1760 caps->cqc_timer_buf_pg_sz = 0;
1761 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1762
1763 caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
1764 caps->sccc_ba_pg_sz = 0;
1765 caps->sccc_buf_pg_sz = 0;
1766 caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
1767 }
1768
1769 ret = hns_roce_v2_set_bt(hr_dev);
1770 if (ret)
1771 dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
1772 ret);
1773
1774 return ret;
1775 }
1776
1777 static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
1778 enum hns_roce_link_table_type type)
1779 {
1780 struct hns_roce_cmq_desc desc[2];
1781 struct hns_roce_cfg_llm_a *req_a =
1782 (struct hns_roce_cfg_llm_a *)desc[0].data;
1783 struct hns_roce_cfg_llm_b *req_b =
1784 (struct hns_roce_cfg_llm_b *)desc[1].data;
1785 struct hns_roce_v2_priv *priv = hr_dev->priv;
1786 struct hns_roce_link_table *link_tbl;
1787 struct hns_roce_link_table_entry *entry;
1788 enum hns_roce_opcode_type opcode;
1789 u32 page_num;
1790 int i;
1791
1792 switch (type) {
1793 case TSQ_LINK_TABLE:
1794 link_tbl = &priv->tsq;
1795 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
1796 break;
1797 case TPQ_LINK_TABLE:
1798 link_tbl = &priv->tpq;
1799 opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
1800 break;
1801 default:
1802 return -EINVAL;
1803 }
1804
1805 page_num = link_tbl->npages;
1806 entry = link_tbl->table.buf;
1807 memset(req_a, 0, sizeof(*req_a));
1808 memset(req_b, 0, sizeof(*req_b));
1809
1810 for (i = 0; i < 2; i++) {
1811 hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
1812
1813 if (i == 0)
1814 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1815 else
1816 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1817
1818 if (i == 0) {
1819 req_a->base_addr_l =
1820 cpu_to_le32(link_tbl->table.map & 0xffffffff);
1821 req_a->base_addr_h =
1822 cpu_to_le32(link_tbl->table.map >> 32);
1823 roce_set_field(req_a->depth_pgsz_init_en,
1824 CFG_LLM_QUE_DEPTH_M,
1825 CFG_LLM_QUE_DEPTH_S,
1826 link_tbl->npages);
1827 roce_set_field(req_a->depth_pgsz_init_en,
1828 CFG_LLM_QUE_PGSZ_M,
1829 CFG_LLM_QUE_PGSZ_S,
1830 link_tbl->pg_sz);
1831 req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
1832 req_a->head_ba_h_nxtptr =
1833 cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
1834 roce_set_field(req_a->head_ptr,
1835 CFG_LLM_HEAD_PTR_M,
1836 CFG_LLM_HEAD_PTR_S, 0);
1837 } else {
1838 req_b->tail_ba_l =
1839 cpu_to_le32(entry[page_num - 1].blk_ba0);
1840 roce_set_field(req_b->tail_ba_h,
1841 CFG_LLM_TAIL_BA_H_M,
1842 CFG_LLM_TAIL_BA_H_S,
1843 entry[page_num - 1].blk_ba1_nxt_ptr &
1844 HNS_ROCE_LINK_TABLE_BA1_M);
1845 roce_set_field(req_b->tail_ptr,
1846 CFG_LLM_TAIL_PTR_M,
1847 CFG_LLM_TAIL_PTR_S,
1848 (entry[page_num - 2].blk_ba1_nxt_ptr &
1849 HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
1850 HNS_ROCE_LINK_TABLE_NXT_PTR_S);
1851 }
1852 }
1853 roce_set_field(req_a->depth_pgsz_init_en,
1854 CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
1855
1856 return hns_roce_cmq_send(hr_dev, desc, 2);
1857 }
1858
1859 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
1860 enum hns_roce_link_table_type type)
1861 {
1862 struct hns_roce_v2_priv *priv = hr_dev->priv;
1863 struct hns_roce_link_table *link_tbl;
1864 struct hns_roce_link_table_entry *entry;
1865 struct device *dev = hr_dev->dev;
1866 u32 buf_chk_sz;
1867 dma_addr_t t;
1868 int func_num = 1;
1869 int pg_num_a;
1870 int pg_num_b;
1871 int pg_num;
1872 int size;
1873 int i;
1874
1875 switch (type) {
1876 case TSQ_LINK_TABLE:
1877 link_tbl = &priv->tsq;
1878 buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
1879 pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
1880 pg_num_b = hr_dev->caps.sl_num * 4 + 2;
1881 break;
1882 case TPQ_LINK_TABLE:
1883 link_tbl = &priv->tpq;
1884 buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
1885 pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
1886 pg_num_b = 2 * 4 * func_num + 2;
1887 break;
1888 default:
1889 return -EINVAL;
1890 }
1891
1892 pg_num = max(pg_num_a, pg_num_b);
1893 size = pg_num * sizeof(struct hns_roce_link_table_entry);
1894
1895 link_tbl->table.buf = dma_alloc_coherent(dev, size,
1896 &link_tbl->table.map,
1897 GFP_KERNEL);
1898 if (!link_tbl->table.buf)
1899 goto out;
1900
1901 link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
1902 GFP_KERNEL);
1903 if (!link_tbl->pg_list)
1904 goto err_kcalloc_failed;
1905
1906 entry = link_tbl->table.buf;
1907 for (i = 0; i < pg_num; ++i) {
1908 link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
1909 &t, GFP_KERNEL);
1910 if (!link_tbl->pg_list[i].buf)
1911 goto err_alloc_buf_failed;
1912
1913 link_tbl->pg_list[i].map = t;
1914
1915 entry[i].blk_ba0 = (u32)(t >> 12);
1916 entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44);
1917
1918 if (i < (pg_num - 1))
1919 entry[i].blk_ba1_nxt_ptr |=
1920 (i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S;
1921
1922 }
1923 link_tbl->npages = pg_num;
1924 link_tbl->pg_sz = buf_chk_sz;
1925
1926 return hns_roce_config_link_table(hr_dev, type);
1927
1928 err_alloc_buf_failed:
1929 for (i -= 1; i >= 0; i--)
1930 dma_free_coherent(dev, buf_chk_sz,
1931 link_tbl->pg_list[i].buf,
1932 link_tbl->pg_list[i].map);
1933 kfree(link_tbl->pg_list);
1934
1935 err_kcalloc_failed:
1936 dma_free_coherent(dev, size, link_tbl->table.buf,
1937 link_tbl->table.map);
1938
1939 out:
1940 return -ENOMEM;
1941 }
1942
1943 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
1944 struct hns_roce_link_table *link_tbl)
1945 {
1946 struct device *dev = hr_dev->dev;
1947 int size;
1948 int i;
1949
1950 size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
1951
1952 for (i = 0; i < link_tbl->npages; ++i)
1953 if (link_tbl->pg_list[i].buf)
1954 dma_free_coherent(dev, link_tbl->pg_sz,
1955 link_tbl->pg_list[i].buf,
1956 link_tbl->pg_list[i].map);
1957 kfree(link_tbl->pg_list);
1958
1959 dma_free_coherent(dev, size, link_tbl->table.buf,
1960 link_tbl->table.map);
1961 }
1962
1963 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
1964 {
1965 struct hns_roce_v2_priv *priv = hr_dev->priv;
1966 int qpc_count, cqc_count;
1967 int ret, i;
1968
1969
1970 ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
1971 if (ret) {
1972 dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
1973 return ret;
1974 }
1975
1976 ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
1977 if (ret) {
1978 dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
1979 goto err_tpq_init_failed;
1980 }
1981
1982
1983 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
1984 qpc_count++) {
1985 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
1986 qpc_count);
1987 if (ret) {
1988 dev_err(hr_dev->dev, "QPC Timer get failed\n");
1989 goto err_qpc_timer_failed;
1990 }
1991 }
1992
1993
1994 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
1995 cqc_count++) {
1996 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
1997 cqc_count);
1998 if (ret) {
1999 dev_err(hr_dev->dev, "CQC Timer get failed\n");
2000 goto err_cqc_timer_failed;
2001 }
2002 }
2003
2004 return 0;
2005
2006 err_cqc_timer_failed:
2007 for (i = 0; i < cqc_count; i++)
2008 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2009
2010 err_qpc_timer_failed:
2011 for (i = 0; i < qpc_count; i++)
2012 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2013
2014 hns_roce_free_link_table(hr_dev, &priv->tpq);
2015
2016 err_tpq_init_failed:
2017 hns_roce_free_link_table(hr_dev, &priv->tsq);
2018
2019 return ret;
2020 }
2021
2022 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
2023 {
2024 struct hns_roce_v2_priv *priv = hr_dev->priv;
2025
2026 if (hr_dev->pci_dev->revision == 0x21)
2027 hns_roce_function_clear(hr_dev);
2028
2029 hns_roce_free_link_table(hr_dev, &priv->tpq);
2030 hns_roce_free_link_table(hr_dev, &priv->tsq);
2031 }
2032
2033 static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
2034 {
2035 struct hns_roce_cmq_desc desc;
2036 struct hns_roce_mbox_status *mb_st =
2037 (struct hns_roce_mbox_status *)desc.data;
2038 enum hns_roce_cmd_return_status status;
2039
2040 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
2041
2042 status = hns_roce_cmq_send(hr_dev, &desc, 1);
2043 if (status)
2044 return status;
2045
2046 return le32_to_cpu(mb_st->mb_status_hw_run);
2047 }
2048
2049 static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
2050 {
2051 u32 status = hns_roce_query_mbox_status(hr_dev);
2052
2053 return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
2054 }
2055
2056 static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
2057 {
2058 u32 status = hns_roce_query_mbox_status(hr_dev);
2059
2060 return status & HNS_ROCE_HW_MB_STATUS_MASK;
2061 }
2062
2063 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
2064 u64 out_param, u32 in_modifier, u8 op_modifier,
2065 u16 op, u16 token, int event)
2066 {
2067 struct hns_roce_cmq_desc desc;
2068 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
2069
2070 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
2071
2072 mb->in_param_l = cpu_to_le32(in_param);
2073 mb->in_param_h = cpu_to_le32(in_param >> 32);
2074 mb->out_param_l = cpu_to_le32(out_param);
2075 mb->out_param_h = cpu_to_le32(out_param >> 32);
2076 mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
2077 mb->token_event_en = cpu_to_le32(event << 16 | token);
2078
2079 return hns_roce_cmq_send(hr_dev, &desc, 1);
2080 }
2081
2082 static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
2083 u64 out_param, u32 in_modifier, u8 op_modifier,
2084 u16 op, u16 token, int event)
2085 {
2086 struct device *dev = hr_dev->dev;
2087 unsigned long end;
2088 int ret;
2089
2090 end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
2091 while (hns_roce_v2_cmd_pending(hr_dev)) {
2092 if (time_after(jiffies, end)) {
2093 dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
2094 (int)end);
2095 return -EAGAIN;
2096 }
2097 cond_resched();
2098 }
2099
2100 ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
2101 op_modifier, op, token, event);
2102 if (ret)
2103 dev_err(dev, "Post mailbox fail(%d)\n", ret);
2104
2105 return ret;
2106 }
2107
2108 static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
2109 unsigned long timeout)
2110 {
2111 struct device *dev = hr_dev->dev;
2112 unsigned long end;
2113 u32 status;
2114
2115 end = msecs_to_jiffies(timeout) + jiffies;
2116 while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
2117 cond_resched();
2118
2119 if (hns_roce_v2_cmd_pending(hr_dev)) {
2120 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
2121 return -ETIMEDOUT;
2122 }
2123
2124 status = hns_roce_v2_cmd_complete(hr_dev);
2125 if (status != 0x1) {
2126 if (status == CMD_RST_PRC_EBUSY)
2127 return status;
2128
2129 dev_err(dev, "mailbox status 0x%x!\n", status);
2130 return -EBUSY;
2131 }
2132
2133 return 0;
2134 }
2135
2136 static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
2137 int gid_index, const union ib_gid *gid,
2138 enum hns_roce_sgid_type sgid_type)
2139 {
2140 struct hns_roce_cmq_desc desc;
2141 struct hns_roce_cfg_sgid_tb *sgid_tb =
2142 (struct hns_roce_cfg_sgid_tb *)desc.data;
2143 u32 *p;
2144
2145 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2146
2147 roce_set_field(sgid_tb->table_idx_rsv,
2148 CFG_SGID_TB_TABLE_IDX_M,
2149 CFG_SGID_TB_TABLE_IDX_S, gid_index);
2150 roce_set_field(sgid_tb->vf_sgid_type_rsv,
2151 CFG_SGID_TB_VF_SGID_TYPE_M,
2152 CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2153
2154 p = (u32 *)&gid->raw[0];
2155 sgid_tb->vf_sgid_l = cpu_to_le32(*p);
2156
2157 p = (u32 *)&gid->raw[4];
2158 sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
2159
2160 p = (u32 *)&gid->raw[8];
2161 sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
2162
2163 p = (u32 *)&gid->raw[0xc];
2164 sgid_tb->vf_sgid_h = cpu_to_le32(*p);
2165
2166 return hns_roce_cmq_send(hr_dev, &desc, 1);
2167 }
2168
2169 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
2170 int gid_index, const union ib_gid *gid,
2171 const struct ib_gid_attr *attr)
2172 {
2173 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
2174 int ret;
2175
2176 if (!gid || !attr)
2177 return -EINVAL;
2178
2179 if (attr->gid_type == IB_GID_TYPE_ROCE)
2180 sgid_type = GID_TYPE_FLAG_ROCE_V1;
2181
2182 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2183 if (ipv6_addr_v4mapped((void *)gid))
2184 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2185 else
2186 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2187 }
2188
2189 ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
2190 if (ret)
2191 dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
2192
2193 return ret;
2194 }
2195
2196 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
2197 u8 *addr)
2198 {
2199 struct hns_roce_cmq_desc desc;
2200 struct hns_roce_cfg_smac_tb *smac_tb =
2201 (struct hns_roce_cfg_smac_tb *)desc.data;
2202 u16 reg_smac_h;
2203 u32 reg_smac_l;
2204
2205 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
2206
2207 reg_smac_l = *(u32 *)(&addr[0]);
2208 reg_smac_h = *(u16 *)(&addr[4]);
2209
2210 memset(smac_tb, 0, sizeof(*smac_tb));
2211 roce_set_field(smac_tb->tb_idx_rsv,
2212 CFG_SMAC_TB_IDX_M,
2213 CFG_SMAC_TB_IDX_S, phy_port);
2214 roce_set_field(smac_tb->vf_smac_h_rsv,
2215 CFG_SMAC_TB_VF_SMAC_H_M,
2216 CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
2217 smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
2218
2219 return hns_roce_cmq_send(hr_dev, &desc, 1);
2220 }
2221
2222 static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
2223 struct hns_roce_mr *mr)
2224 {
2225 struct sg_dma_page_iter sg_iter;
2226 u64 page_addr;
2227 u64 *pages;
2228 int i;
2229
2230 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2231 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2232 roce_set_field(mpt_entry->byte_48_mode_ba,
2233 V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
2234 upper_32_bits(mr->pbl_ba >> 3));
2235
2236 pages = (u64 *)__get_free_page(GFP_KERNEL);
2237 if (!pages)
2238 return -ENOMEM;
2239
2240 i = 0;
2241 for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
2242 page_addr = sg_page_iter_dma_address(&sg_iter);
2243 pages[i] = page_addr >> 6;
2244
2245
2246 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
2247 goto found;
2248 i++;
2249 }
2250 found:
2251 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
2252 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
2253 V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
2254
2255 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
2256 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
2257 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
2258 roce_set_field(mpt_entry->byte_64_buf_pa1,
2259 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2260 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2261 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2262
2263 free_page((unsigned long)pages);
2264
2265 return 0;
2266 }
2267
2268 static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
2269 unsigned long mtpt_idx)
2270 {
2271 struct hns_roce_v2_mpt_entry *mpt_entry;
2272 int ret;
2273
2274 mpt_entry = mb_buf;
2275 memset(mpt_entry, 0, sizeof(*mpt_entry));
2276
2277 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2278 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2279 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2280 V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
2281 HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
2282 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2283 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2284 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2285 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2286 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2287 V2_MPT_BYTE_4_PD_S, mr->pd);
2288
2289 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
2290 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
2291 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2292 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
2293 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
2294 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
2295 mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2296 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2297 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
2298 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2299 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
2300 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2301 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
2302
2303 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
2304 mr->type == MR_TYPE_MR ? 0 : 1);
2305 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
2306 1);
2307
2308 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
2309 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
2310 mpt_entry->lkey = cpu_to_le32(mr->key);
2311 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
2312 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
2313
2314 if (mr->type == MR_TYPE_DMA)
2315 return 0;
2316
2317 ret = set_mtpt_pbl(mpt_entry, mr);
2318
2319 return ret;
2320 }
2321
2322 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
2323 struct hns_roce_mr *mr, int flags,
2324 u32 pdn, int mr_access_flags, u64 iova,
2325 u64 size, void *mb_buf)
2326 {
2327 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
2328 int ret = 0;
2329
2330 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2331 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2332
2333 if (flags & IB_MR_REREG_PD) {
2334 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2335 V2_MPT_BYTE_4_PD_S, pdn);
2336 mr->pd = pdn;
2337 }
2338
2339 if (flags & IB_MR_REREG_ACCESS) {
2340 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2341 V2_MPT_BYTE_8_BIND_EN_S,
2342 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
2343 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2344 V2_MPT_BYTE_8_ATOMIC_EN_S,
2345 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2346 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2347 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
2348 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2349 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
2350 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2351 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
2352 }
2353
2354 if (flags & IB_MR_REREG_TRANS) {
2355 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
2356 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
2357 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
2358 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
2359
2360 mr->iova = iova;
2361 mr->size = size;
2362
2363 ret = set_mtpt_pbl(mpt_entry, mr);
2364 }
2365
2366 return ret;
2367 }
2368
2369 static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
2370 {
2371 struct hns_roce_v2_mpt_entry *mpt_entry;
2372
2373 mpt_entry = mb_buf;
2374 memset(mpt_entry, 0, sizeof(*mpt_entry));
2375
2376 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2377 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2378 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2379 V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
2380 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2381 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2382 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2383 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2384 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2385 V2_MPT_BYTE_4_PD_S, mr->pd);
2386
2387 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
2388 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2389 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2390
2391 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
2392 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2393 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
2394 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2395
2396 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2397
2398 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2399 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
2400 V2_MPT_BYTE_48_PBL_BA_H_S,
2401 upper_32_bits(mr->pbl_ba >> 3));
2402
2403 roce_set_field(mpt_entry->byte_64_buf_pa1,
2404 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2405 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2406 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2407
2408 return 0;
2409 }
2410
2411 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
2412 {
2413 struct hns_roce_v2_mpt_entry *mpt_entry;
2414
2415 mpt_entry = mb_buf;
2416 memset(mpt_entry, 0, sizeof(*mpt_entry));
2417
2418 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2419 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2420 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2421 V2_MPT_BYTE_4_PD_S, mw->pdn);
2422 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2423 V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2424 V2_MPT_BYTE_4_PBL_HOP_NUM_S,
2425 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ?
2426 0 : mw->pbl_hop_num);
2427 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2428 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2429 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2430 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2431
2432 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2433 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2434
2435 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2436 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
2437 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2438 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
2439 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
2440
2441 roce_set_field(mpt_entry->byte_64_buf_pa1,
2442 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2443 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2444 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2445
2446 mpt_entry->lkey = cpu_to_le32(mw->rkey);
2447
2448 return 0;
2449 }
2450
2451 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2452 {
2453 return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
2454 n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
2455 }
2456
2457 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2458 {
2459 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
2460
2461
2462 return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
2463 !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
2464 }
2465
2466 static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
2467 {
2468 return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
2469 }
2470
2471 static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
2472 {
2473 return hns_roce_buf_offset(&srq->buf, n << srq->wqe_shift);
2474 }
2475
2476 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
2477 {
2478
2479 spin_lock(&srq->lock);
2480
2481 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
2482 srq->tail++;
2483
2484 spin_unlock(&srq->lock);
2485 }
2486
2487 static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
2488 {
2489 *hr_cq->set_ci_db = cons_index & 0xffffff;
2490 }
2491
2492 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2493 struct hns_roce_srq *srq)
2494 {
2495 struct hns_roce_v2_cqe *cqe, *dest;
2496 u32 prod_index;
2497 int nfreed = 0;
2498 int wqe_index;
2499 u8 owner_bit;
2500
2501 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
2502 ++prod_index) {
2503 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
2504 break;
2505 }
2506
2507
2508
2509
2510
2511 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2512 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2513 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2514 V2_CQE_BYTE_16_LCL_QPN_S) &
2515 HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
2516 if (srq &&
2517 roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
2518 wqe_index = roce_get_field(cqe->byte_4,
2519 V2_CQE_BYTE_4_WQE_INDX_M,
2520 V2_CQE_BYTE_4_WQE_INDX_S);
2521 hns_roce_free_srq_wqe(srq, wqe_index);
2522 }
2523 ++nfreed;
2524 } else if (nfreed) {
2525 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
2526 hr_cq->ib_cq.cqe);
2527 owner_bit = roce_get_bit(dest->byte_4,
2528 V2_CQE_BYTE_4_OWNER_S);
2529 memcpy(dest, cqe, sizeof(*cqe));
2530 roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
2531 owner_bit);
2532 }
2533 }
2534
2535 if (nfreed) {
2536 hr_cq->cons_index += nfreed;
2537
2538
2539
2540
2541 wmb();
2542 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2543 }
2544 }
2545
2546 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2547 struct hns_roce_srq *srq)
2548 {
2549 spin_lock_irq(&hr_cq->lock);
2550 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
2551 spin_unlock_irq(&hr_cq->lock);
2552 }
2553
2554 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
2555 struct hns_roce_cq *hr_cq, void *mb_buf,
2556 u64 *mtts, dma_addr_t dma_handle, int nent,
2557 u32 vector)
2558 {
2559 struct hns_roce_v2_cq_context *cq_context;
2560
2561 cq_context = mb_buf;
2562 memset(cq_context, 0, sizeof(*cq_context));
2563
2564 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
2565 V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
2566 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
2567 V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
2568 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
2569 V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
2570 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
2571 V2_CQC_BYTE_4_CEQN_S, vector);
2572
2573 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
2574 V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
2575
2576 cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
2577
2578 roce_set_field(cq_context->byte_16_hop_addr,
2579 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
2580 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
2581 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
2582 roce_set_field(cq_context->byte_16_hop_addr,
2583 V2_CQC_BYTE_16_CQE_HOP_NUM_M,
2584 V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
2585 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
2586
2587 cq_context->cqe_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
2588 roce_set_field(cq_context->byte_24_pgsz_addr,
2589 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
2590 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
2591 mtts[1] >> (32 + PAGE_ADDR_SHIFT));
2592 roce_set_field(cq_context->byte_24_pgsz_addr,
2593 V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
2594 V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
2595 hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
2596 roce_set_field(cq_context->byte_24_pgsz_addr,
2597 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
2598 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
2599 hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
2600
2601 cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
2602
2603 roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
2604 V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
2605
2606 if (hr_cq->db_en)
2607 roce_set_bit(cq_context->byte_44_db_record,
2608 V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
2609
2610 roce_set_field(cq_context->byte_44_db_record,
2611 V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
2612 V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
2613 ((u32)hr_cq->db.dma) >> 1);
2614 cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32);
2615
2616 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2617 V2_CQC_BYTE_56_CQ_MAX_CNT_M,
2618 V2_CQC_BYTE_56_CQ_MAX_CNT_S,
2619 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
2620 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2621 V2_CQC_BYTE_56_CQ_PERIOD_M,
2622 V2_CQC_BYTE_56_CQ_PERIOD_S,
2623 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
2624 }
2625
2626 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
2627 enum ib_cq_notify_flags flags)
2628 {
2629 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
2630 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2631 u32 notification_flag;
2632 __le32 doorbell[2];
2633
2634 doorbell[0] = 0;
2635 doorbell[1] = 0;
2636
2637 notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
2638 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
2639
2640
2641
2642
2643 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
2644 hr_cq->cqn);
2645 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
2646 HNS_ROCE_V2_CQ_DB_NTR);
2647 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
2648 V2_CQ_DB_PARAMETER_CONS_IDX_S,
2649 hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2650 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
2651 V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
2652 roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
2653 notification_flag);
2654
2655 hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
2656
2657 return 0;
2658 }
2659
2660 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
2661 struct hns_roce_qp **cur_qp,
2662 struct ib_wc *wc)
2663 {
2664 struct hns_roce_rinl_sge *sge_list;
2665 u32 wr_num, wr_cnt, sge_num;
2666 u32 sge_cnt, data_len, size;
2667 void *wqe_buf;
2668
2669 wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
2670 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
2671 wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
2672
2673 sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
2674 sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
2675 wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
2676 data_len = wc->byte_len;
2677
2678 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
2679 size = min(sge_list[sge_cnt].len, data_len);
2680 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
2681
2682 data_len -= size;
2683 wqe_buf += size;
2684 }
2685
2686 if (data_len) {
2687 wc->status = IB_WC_LOC_LEN_ERR;
2688 return -EAGAIN;
2689 }
2690
2691 return 0;
2692 }
2693
2694 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
2695 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2696 {
2697 struct hns_roce_srq *srq = NULL;
2698 struct hns_roce_dev *hr_dev;
2699 struct hns_roce_v2_cqe *cqe;
2700 struct hns_roce_qp *hr_qp;
2701 struct hns_roce_wq *wq;
2702 struct ib_qp_attr attr;
2703 int attr_mask;
2704 int is_send;
2705 u16 wqe_ctr;
2706 u32 opcode;
2707 u32 status;
2708 int qpn;
2709 int ret;
2710
2711
2712 cqe = next_cqe_sw_v2(hr_cq);
2713 if (!cqe)
2714 return -EAGAIN;
2715
2716 ++hr_cq->cons_index;
2717
2718 rmb();
2719
2720
2721 is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
2722
2723 qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2724 V2_CQE_BYTE_16_LCL_QPN_S);
2725
2726 if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2727 hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2728 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2729 if (unlikely(!hr_qp)) {
2730 dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
2731 hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
2732 return -EINVAL;
2733 }
2734 *cur_qp = hr_qp;
2735 }
2736
2737 wc->qp = &(*cur_qp)->ibqp;
2738 wc->vendor_err = 0;
2739
2740 if (is_send) {
2741 wq = &(*cur_qp)->sq;
2742 if ((*cur_qp)->sq_signal_bits) {
2743
2744
2745
2746
2747
2748 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
2749 V2_CQE_BYTE_4_WQE_INDX_M,
2750 V2_CQE_BYTE_4_WQE_INDX_S);
2751 wq->tail += (wqe_ctr - (u16)wq->tail) &
2752 (wq->wqe_cnt - 1);
2753 }
2754
2755 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2756 ++wq->tail;
2757 } else if ((*cur_qp)->ibqp.srq) {
2758 srq = to_hr_srq((*cur_qp)->ibqp.srq);
2759 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
2760 V2_CQE_BYTE_4_WQE_INDX_M,
2761 V2_CQE_BYTE_4_WQE_INDX_S);
2762 wc->wr_id = srq->wrid[wqe_ctr];
2763 hns_roce_free_srq_wqe(srq, wqe_ctr);
2764 } else {
2765
2766 wq = &(*cur_qp)->rq;
2767 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2768 ++wq->tail;
2769 }
2770
2771 status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
2772 V2_CQE_BYTE_4_STATUS_S);
2773 switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
2774 case HNS_ROCE_CQE_V2_SUCCESS:
2775 wc->status = IB_WC_SUCCESS;
2776 break;
2777 case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
2778 wc->status = IB_WC_LOC_LEN_ERR;
2779 break;
2780 case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
2781 wc->status = IB_WC_LOC_QP_OP_ERR;
2782 break;
2783 case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
2784 wc->status = IB_WC_LOC_PROT_ERR;
2785 break;
2786 case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
2787 wc->status = IB_WC_WR_FLUSH_ERR;
2788 break;
2789 case HNS_ROCE_CQE_V2_MW_BIND_ERR:
2790 wc->status = IB_WC_MW_BIND_ERR;
2791 break;
2792 case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
2793 wc->status = IB_WC_BAD_RESP_ERR;
2794 break;
2795 case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
2796 wc->status = IB_WC_LOC_ACCESS_ERR;
2797 break;
2798 case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
2799 wc->status = IB_WC_REM_INV_REQ_ERR;
2800 break;
2801 case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
2802 wc->status = IB_WC_REM_ACCESS_ERR;
2803 break;
2804 case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
2805 wc->status = IB_WC_REM_OP_ERR;
2806 break;
2807 case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
2808 wc->status = IB_WC_RETRY_EXC_ERR;
2809 break;
2810 case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
2811 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2812 break;
2813 case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
2814 wc->status = IB_WC_REM_ABORT_ERR;
2815 break;
2816 default:
2817 wc->status = IB_WC_GENERAL_ERR;
2818 break;
2819 }
2820
2821
2822 if ((wc->status != IB_WC_SUCCESS) &&
2823 (wc->status != IB_WC_WR_FLUSH_ERR)) {
2824 attr_mask = IB_QP_STATE;
2825 attr.qp_state = IB_QPS_ERR;
2826 return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp,
2827 &attr, attr_mask,
2828 (*cur_qp)->state, IB_QPS_ERR);
2829 }
2830
2831 if (wc->status == IB_WC_WR_FLUSH_ERR)
2832 return 0;
2833
2834 if (is_send) {
2835 wc->wc_flags = 0;
2836
2837 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2838 V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
2839 case HNS_ROCE_SQ_OPCODE_SEND:
2840 wc->opcode = IB_WC_SEND;
2841 break;
2842 case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
2843 wc->opcode = IB_WC_SEND;
2844 break;
2845 case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
2846 wc->opcode = IB_WC_SEND;
2847 wc->wc_flags |= IB_WC_WITH_IMM;
2848 break;
2849 case HNS_ROCE_SQ_OPCODE_RDMA_READ:
2850 wc->opcode = IB_WC_RDMA_READ;
2851 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2852 break;
2853 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
2854 wc->opcode = IB_WC_RDMA_WRITE;
2855 break;
2856 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
2857 wc->opcode = IB_WC_RDMA_WRITE;
2858 wc->wc_flags |= IB_WC_WITH_IMM;
2859 break;
2860 case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
2861 wc->opcode = IB_WC_LOCAL_INV;
2862 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2863 break;
2864 case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
2865 wc->opcode = IB_WC_COMP_SWAP;
2866 wc->byte_len = 8;
2867 break;
2868 case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
2869 wc->opcode = IB_WC_FETCH_ADD;
2870 wc->byte_len = 8;
2871 break;
2872 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
2873 wc->opcode = IB_WC_MASKED_COMP_SWAP;
2874 wc->byte_len = 8;
2875 break;
2876 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
2877 wc->opcode = IB_WC_MASKED_FETCH_ADD;
2878 wc->byte_len = 8;
2879 break;
2880 case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
2881 wc->opcode = IB_WC_REG_MR;
2882 break;
2883 case HNS_ROCE_SQ_OPCODE_BIND_MW:
2884 wc->opcode = IB_WC_REG_MR;
2885 break;
2886 default:
2887 wc->status = IB_WC_GENERAL_ERR;
2888 break;
2889 }
2890 } else {
2891
2892 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2893
2894 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2895 V2_CQE_BYTE_4_OPCODE_S);
2896 switch (opcode & 0x1f) {
2897 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
2898 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2899 wc->wc_flags = IB_WC_WITH_IMM;
2900 wc->ex.imm_data =
2901 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2902 break;
2903 case HNS_ROCE_V2_OPCODE_SEND:
2904 wc->opcode = IB_WC_RECV;
2905 wc->wc_flags = 0;
2906 break;
2907 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
2908 wc->opcode = IB_WC_RECV;
2909 wc->wc_flags = IB_WC_WITH_IMM;
2910 wc->ex.imm_data =
2911 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2912 break;
2913 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
2914 wc->opcode = IB_WC_RECV;
2915 wc->wc_flags = IB_WC_WITH_INVALIDATE;
2916 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
2917 break;
2918 default:
2919 wc->status = IB_WC_GENERAL_ERR;
2920 break;
2921 }
2922
2923 if ((wc->qp->qp_type == IB_QPT_RC ||
2924 wc->qp->qp_type == IB_QPT_UC) &&
2925 (opcode == HNS_ROCE_V2_OPCODE_SEND ||
2926 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
2927 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
2928 (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
2929 ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
2930 if (ret)
2931 return -EAGAIN;
2932 }
2933
2934 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
2935 V2_CQE_BYTE_32_SL_S);
2936 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
2937 V2_CQE_BYTE_32_RMT_QPN_M,
2938 V2_CQE_BYTE_32_RMT_QPN_S);
2939 wc->slid = 0;
2940 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
2941 V2_CQE_BYTE_32_GRH_S) ?
2942 IB_WC_GRH : 0);
2943 wc->port_num = roce_get_field(cqe->byte_32,
2944 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
2945 wc->pkey_index = 0;
2946 memcpy(wc->smac, cqe->smac, 4);
2947 wc->smac[4] = roce_get_field(cqe->byte_28,
2948 V2_CQE_BYTE_28_SMAC_4_M,
2949 V2_CQE_BYTE_28_SMAC_4_S);
2950 wc->smac[5] = roce_get_field(cqe->byte_28,
2951 V2_CQE_BYTE_28_SMAC_5_M,
2952 V2_CQE_BYTE_28_SMAC_5_S);
2953 wc->wc_flags |= IB_WC_WITH_SMAC;
2954 if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
2955 wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
2956 V2_CQE_BYTE_28_VID_M,
2957 V2_CQE_BYTE_28_VID_S);
2958 wc->wc_flags |= IB_WC_WITH_VLAN;
2959 } else {
2960 wc->vlan_id = 0xffff;
2961 }
2962
2963 wc->network_hdr_type = roce_get_field(cqe->byte_28,
2964 V2_CQE_BYTE_28_PORT_TYPE_M,
2965 V2_CQE_BYTE_28_PORT_TYPE_S);
2966 }
2967
2968 return 0;
2969 }
2970
2971 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
2972 struct ib_wc *wc)
2973 {
2974 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2975 struct hns_roce_qp *cur_qp = NULL;
2976 unsigned long flags;
2977 int npolled;
2978
2979 spin_lock_irqsave(&hr_cq->lock, flags);
2980
2981 for (npolled = 0; npolled < num_entries; ++npolled) {
2982 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
2983 break;
2984 }
2985
2986 if (npolled) {
2987
2988 wmb();
2989 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2990 }
2991
2992 spin_unlock_irqrestore(&hr_cq->lock, flags);
2993
2994 return npolled;
2995 }
2996
2997 static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
2998 int step_idx)
2999 {
3000 int op;
3001
3002 if (type == HEM_TYPE_SCCC && step_idx)
3003 return -EINVAL;
3004
3005 switch (type) {
3006 case HEM_TYPE_QPC:
3007 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
3008 break;
3009 case HEM_TYPE_MTPT:
3010 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
3011 break;
3012 case HEM_TYPE_CQC:
3013 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
3014 break;
3015 case HEM_TYPE_SRQC:
3016 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
3017 break;
3018 case HEM_TYPE_SCCC:
3019 op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
3020 break;
3021 case HEM_TYPE_QPC_TIMER:
3022 op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
3023 break;
3024 case HEM_TYPE_CQC_TIMER:
3025 op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
3026 break;
3027 default:
3028 dev_warn(hr_dev->dev,
3029 "Table %d not to be written by mailbox!\n", type);
3030 return -EINVAL;
3031 }
3032
3033 return op + step_idx;
3034 }
3035
3036 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
3037 struct hns_roce_hem_table *table, int obj,
3038 int step_idx)
3039 {
3040 struct hns_roce_cmd_mailbox *mailbox;
3041 struct hns_roce_hem_iter iter;
3042 struct hns_roce_hem_mhop mhop;
3043 struct hns_roce_hem *hem;
3044 unsigned long mhop_obj = obj;
3045 int i, j, k;
3046 int ret = 0;
3047 u64 hem_idx = 0;
3048 u64 l1_idx = 0;
3049 u64 bt_ba = 0;
3050 u32 chunk_ba_num;
3051 u32 hop_num;
3052 int op;
3053
3054 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3055 return 0;
3056
3057 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
3058 i = mhop.l0_idx;
3059 j = mhop.l1_idx;
3060 k = mhop.l2_idx;
3061 hop_num = mhop.hop_num;
3062 chunk_ba_num = mhop.bt_chunk_size / 8;
3063
3064 if (hop_num == 2) {
3065 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
3066 k;
3067 l1_idx = i * chunk_ba_num + j;
3068 } else if (hop_num == 1) {
3069 hem_idx = i * chunk_ba_num + j;
3070 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
3071 hem_idx = i;
3072 }
3073
3074 op = get_op_for_set_hem(hr_dev, table->type, step_idx);
3075 if (op == -EINVAL)
3076 return 0;
3077
3078 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3079 if (IS_ERR(mailbox))
3080 return PTR_ERR(mailbox);
3081
3082 if (table->type == HEM_TYPE_SCCC)
3083 obj = mhop.l0_idx;
3084
3085 if (check_whether_last_step(hop_num, step_idx)) {
3086 hem = table->hem[hem_idx];
3087 for (hns_roce_hem_first(hem, &iter);
3088 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
3089 bt_ba = hns_roce_hem_addr(&iter);
3090
3091
3092 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
3093 obj, 0, op,
3094 HNS_ROCE_CMD_TIMEOUT_MSECS);
3095 }
3096 } else {
3097 if (step_idx == 0)
3098 bt_ba = table->bt_l0_dma_addr[i];
3099 else if (step_idx == 1 && hop_num == 2)
3100 bt_ba = table->bt_l1_dma_addr[l1_idx];
3101
3102
3103 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
3104 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
3105 }
3106
3107 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3108 return ret;
3109 }
3110
3111 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
3112 struct hns_roce_hem_table *table, int obj,
3113 int step_idx)
3114 {
3115 struct device *dev = hr_dev->dev;
3116 struct hns_roce_cmd_mailbox *mailbox;
3117 int ret;
3118 u16 op = 0xff;
3119
3120 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3121 return 0;
3122
3123 switch (table->type) {
3124 case HEM_TYPE_QPC:
3125 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
3126 break;
3127 case HEM_TYPE_MTPT:
3128 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
3129 break;
3130 case HEM_TYPE_CQC:
3131 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
3132 break;
3133 case HEM_TYPE_SCCC:
3134 case HEM_TYPE_QPC_TIMER:
3135 case HEM_TYPE_CQC_TIMER:
3136 break;
3137 case HEM_TYPE_SRQC:
3138 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
3139 break;
3140 default:
3141 dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
3142 table->type);
3143 return 0;
3144 }
3145
3146 if (table->type == HEM_TYPE_SCCC ||
3147 table->type == HEM_TYPE_QPC_TIMER ||
3148 table->type == HEM_TYPE_CQC_TIMER)
3149 return 0;
3150
3151 op += step_idx;
3152
3153 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3154 if (IS_ERR(mailbox))
3155 return PTR_ERR(mailbox);
3156
3157
3158 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
3159 HNS_ROCE_CMD_TIMEOUT_MSECS);
3160
3161 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3162 return ret;
3163 }
3164
3165 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
3166 enum ib_qp_state cur_state,
3167 enum ib_qp_state new_state,
3168 struct hns_roce_v2_qp_context *context,
3169 struct hns_roce_qp *hr_qp)
3170 {
3171 struct hns_roce_cmd_mailbox *mailbox;
3172 int ret;
3173
3174 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3175 if (IS_ERR(mailbox))
3176 return PTR_ERR(mailbox);
3177
3178 memcpy(mailbox->buf, context, sizeof(*context) * 2);
3179
3180 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
3181 HNS_ROCE_CMD_MODIFY_QPC,
3182 HNS_ROCE_CMD_TIMEOUT_MSECS);
3183
3184 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3185
3186 return ret;
3187 }
3188
3189 static void set_access_flags(struct hns_roce_qp *hr_qp,
3190 struct hns_roce_v2_qp_context *context,
3191 struct hns_roce_v2_qp_context *qpc_mask,
3192 const struct ib_qp_attr *attr, int attr_mask)
3193 {
3194 u8 dest_rd_atomic;
3195 u32 access_flags;
3196
3197 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
3198 attr->max_dest_rd_atomic : hr_qp->resp_depth;
3199
3200 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
3201 attr->qp_access_flags : hr_qp->atomic_rd_en;
3202
3203 if (!dest_rd_atomic)
3204 access_flags &= IB_ACCESS_REMOTE_WRITE;
3205
3206 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3207 !!(access_flags & IB_ACCESS_REMOTE_READ));
3208 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
3209
3210 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3211 !!(access_flags & IB_ACCESS_REMOTE_WRITE));
3212 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
3213
3214 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3215 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3216 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
3217 }
3218
3219 static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
3220 struct hns_roce_v2_qp_context *context,
3221 struct hns_roce_v2_qp_context *qpc_mask)
3222 {
3223 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
3224 roce_set_field(context->byte_4_sqpn_tst,
3225 V2_QPC_BYTE_4_SGE_SHIFT_M,
3226 V2_QPC_BYTE_4_SGE_SHIFT_S,
3227 ilog2((unsigned int)hr_qp->sge.sge_cnt));
3228 else
3229 roce_set_field(context->byte_4_sqpn_tst,
3230 V2_QPC_BYTE_4_SGE_SHIFT_M,
3231 V2_QPC_BYTE_4_SGE_SHIFT_S,
3232 hr_qp->sq.max_gs >
3233 HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
3234 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
3235
3236 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
3237 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
3238
3239 roce_set_field(context->byte_20_smac_sgid_idx,
3240 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3241 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
3242 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3243 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
3244
3245 roce_set_field(context->byte_20_smac_sgid_idx,
3246 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3247 (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
3248 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT ||
3249 hr_qp->ibqp.srq) ? 0 :
3250 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
3251
3252 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3253 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
3254 }
3255
3256 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
3257 const struct ib_qp_attr *attr,
3258 int attr_mask,
3259 struct hns_roce_v2_qp_context *context,
3260 struct hns_roce_v2_qp_context *qpc_mask)
3261 {
3262 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3263 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3264
3265
3266
3267
3268
3269
3270
3271 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3272 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3273 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3274 V2_QPC_BYTE_4_TST_S, 0);
3275
3276 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3277 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3278 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3279 V2_QPC_BYTE_4_SQPN_S, 0);
3280
3281 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3282 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3283 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3284 V2_QPC_BYTE_16_PD_S, 0);
3285
3286 roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3287 V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
3288 roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3289 V2_QPC_BYTE_20_RQWS_S, 0);
3290
3291 set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
3292
3293
3294 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3295 V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
3296 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3297 V2_QPC_BYTE_24_VLAN_ID_S, 0);
3298
3299
3300
3301
3302
3303
3304 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
3305 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
3306 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
3307 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
3308
3309 roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
3310 V2_QPC_BYTE_60_TEMPID_S, 0);
3311
3312 roce_set_field(qpc_mask->byte_60_qpst_tempid,
3313 V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
3314 0);
3315 roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3316 V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
3317 roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3318 V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
3319 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
3320 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
3321
3322 if (hr_qp->rdb_en) {
3323 roce_set_bit(context->byte_68_rq_db,
3324 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
3325 roce_set_bit(qpc_mask->byte_68_rq_db,
3326 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
3327 }
3328
3329 roce_set_field(context->byte_68_rq_db,
3330 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3331 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
3332 ((u32)hr_qp->rdb.dma) >> 1);
3333 roce_set_field(qpc_mask->byte_68_rq_db,
3334 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3335 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
3336 context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
3337 qpc_mask->rq_db_record_addr = 0;
3338
3339 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
3340 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
3341 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
3342
3343 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3344 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3345 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3346 V2_QPC_BYTE_80_RX_CQN_S, 0);
3347 if (ibqp->srq) {
3348 roce_set_field(context->byte_76_srqn_op_en,
3349 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3350 to_hr_srq(ibqp->srq)->srqn);
3351 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3352 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3353 roce_set_bit(context->byte_76_srqn_op_en,
3354 V2_QPC_BYTE_76_SRQ_EN_S, 1);
3355 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3356 V2_QPC_BYTE_76_SRQ_EN_S, 0);
3357 }
3358
3359 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3360 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3361 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3362 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3363 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3364 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3365
3366 roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
3367 V2_QPC_BYTE_92_SRQ_INFO_S, 0);
3368
3369 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3370 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3371
3372 roce_set_field(qpc_mask->byte_104_rq_sge,
3373 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
3374 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
3375
3376 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3377 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3378 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3379 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3380 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3381 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3382 V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
3383
3384 qpc_mask->rq_rnr_timer = 0;
3385 qpc_mask->rx_msg_len = 0;
3386 qpc_mask->rx_rkey_pkt_info = 0;
3387 qpc_mask->rx_va = 0;
3388
3389 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3390 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3391 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3392 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3393
3394 roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
3395 0);
3396 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
3397 V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
3398 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
3399 V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
3400
3401 roce_set_field(qpc_mask->byte_144_raq,
3402 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
3403 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
3404 roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
3405 V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
3406 roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
3407
3408 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
3409 V2_QPC_BYTE_148_RQ_MSN_S, 0);
3410 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
3411 V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
3412
3413 roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3414 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
3415 roce_set_field(qpc_mask->byte_152_raq,
3416 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
3417 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
3418
3419 roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
3420 V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
3421
3422 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3423 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3424 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
3425 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3426 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
3427 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
3428
3429 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3430 V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
3431 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3432 V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
3433 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3434 V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
3435 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3436 V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
3437 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3438 V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
3439 roce_set_field(qpc_mask->byte_168_irrl_idx,
3440 V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
3441 V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
3442
3443 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3444 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
3445 roce_set_field(qpc_mask->byte_172_sq_psn,
3446 V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3447 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
3448
3449 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
3450 0);
3451
3452 roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
3453 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
3454
3455 roce_set_field(qpc_mask->byte_176_msg_pktn,
3456 V2_QPC_BYTE_176_MSG_USE_PKTN_M,
3457 V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
3458 roce_set_field(qpc_mask->byte_176_msg_pktn,
3459 V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
3460 V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
3461
3462 roce_set_field(qpc_mask->byte_184_irrl_idx,
3463 V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
3464 V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
3465
3466 qpc_mask->cur_sge_offset = 0;
3467
3468 roce_set_field(qpc_mask->byte_192_ext_sge,
3469 V2_QPC_BYTE_192_CUR_SGE_IDX_M,
3470 V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
3471 roce_set_field(qpc_mask->byte_192_ext_sge,
3472 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
3473 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
3474
3475 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3476 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3477
3478 roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
3479 V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
3480 roce_set_field(qpc_mask->byte_200_sq_max,
3481 V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
3482 V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
3483
3484 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
3485 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
3486
3487 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3488 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3489
3490 qpc_mask->sq_timer = 0;
3491
3492 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3493 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3494 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3495 roce_set_field(qpc_mask->byte_232_irrl_sge,
3496 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3497 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3498
3499 roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
3500 0);
3501 roce_set_bit(qpc_mask->byte_232_irrl_sge,
3502 V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
3503 roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
3504 0);
3505
3506 qpc_mask->irrl_cur_sge_offset = 0;
3507
3508 roce_set_field(qpc_mask->byte_240_irrl_tail,
3509 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3510 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3511 roce_set_field(qpc_mask->byte_240_irrl_tail,
3512 V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
3513 V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
3514 roce_set_field(qpc_mask->byte_240_irrl_tail,
3515 V2_QPC_BYTE_240_RX_ACK_MSN_M,
3516 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3517
3518 roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
3519 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3520 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
3521 0);
3522 roce_set_field(qpc_mask->byte_248_ack_psn,
3523 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3524 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3525 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
3526 0);
3527 roce_set_bit(qpc_mask->byte_248_ack_psn,
3528 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3529 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
3530 0);
3531
3532 hr_qp->access_flags = attr->qp_access_flags;
3533 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3534 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3535 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3536 V2_QPC_BYTE_252_TX_CQN_S, 0);
3537
3538 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
3539 V2_QPC_BYTE_252_ERR_TYPE_S, 0);
3540
3541 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3542 V2_QPC_BYTE_256_RQ_CQE_IDX_M,
3543 V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
3544 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3545 V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
3546 V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
3547 }
3548
3549 static void modify_qp_init_to_init(struct ib_qp *ibqp,
3550 const struct ib_qp_attr *attr, int attr_mask,
3551 struct hns_roce_v2_qp_context *context,
3552 struct hns_roce_v2_qp_context *qpc_mask)
3553 {
3554 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3555
3556
3557
3558
3559
3560
3561
3562 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3563 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3564 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3565 V2_QPC_BYTE_4_TST_S, 0);
3566
3567 if (attr_mask & IB_QP_ACCESS_FLAGS) {
3568 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3569 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
3570 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3571 0);
3572
3573 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3574 !!(attr->qp_access_flags &
3575 IB_ACCESS_REMOTE_WRITE));
3576 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3577 0);
3578
3579 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3580 !!(attr->qp_access_flags &
3581 IB_ACCESS_REMOTE_ATOMIC));
3582 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3583 0);
3584 } else {
3585 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3586 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
3587 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3588 0);
3589
3590 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3591 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
3592 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3593 0);
3594
3595 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3596 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3597 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3598 0);
3599 }
3600
3601 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3602 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3603 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3604 V2_QPC_BYTE_16_PD_S, 0);
3605
3606 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3607 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3608 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3609 V2_QPC_BYTE_80_RX_CQN_S, 0);
3610
3611 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3612 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3613 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3614 V2_QPC_BYTE_252_TX_CQN_S, 0);
3615
3616 if (ibqp->srq) {
3617 roce_set_bit(context->byte_76_srqn_op_en,
3618 V2_QPC_BYTE_76_SRQ_EN_S, 1);
3619 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3620 V2_QPC_BYTE_76_SRQ_EN_S, 0);
3621 roce_set_field(context->byte_76_srqn_op_en,
3622 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3623 to_hr_srq(ibqp->srq)->srqn);
3624 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3625 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3626 }
3627
3628 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3629 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3630 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3631 V2_QPC_BYTE_4_SQPN_S, 0);
3632
3633 if (attr_mask & IB_QP_DEST_QPN) {
3634 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3635 V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
3636 roce_set_field(qpc_mask->byte_56_dqpn_err,
3637 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3638 }
3639 }
3640
3641 static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev,
3642 struct hns_roce_qp *hr_qp, int mtt_cnt,
3643 u32 page_size)
3644 {
3645 struct device *dev = hr_dev->dev;
3646
3647 if (hr_qp->rq.wqe_cnt < 1)
3648 return true;
3649
3650 if (mtt_cnt < 1) {
3651 dev_err(dev, "qp(0x%lx) rqwqe buf ba find failed\n",
3652 hr_qp->qpn);
3653 return false;
3654 }
3655
3656 if (mtt_cnt < MTT_MIN_COUNT &&
3657 (hr_qp->rq.offset + page_size) < hr_qp->buff_size) {
3658 dev_err(dev, "qp(0x%lx) next rqwqe buf ba find failed\n",
3659 hr_qp->qpn);
3660 return false;
3661 }
3662
3663 return true;
3664 }
3665
3666 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
3667 const struct ib_qp_attr *attr, int attr_mask,
3668 struct hns_roce_v2_qp_context *context,
3669 struct hns_roce_v2_qp_context *qpc_mask)
3670 {
3671 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
3672 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3673 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3674 struct device *dev = hr_dev->dev;
3675 u64 mtts[MTT_MIN_COUNT] = { 0 };
3676 dma_addr_t dma_handle_3;
3677 dma_addr_t dma_handle_2;
3678 u64 wqe_sge_ba;
3679 u32 page_size;
3680 u8 port_num;
3681 u64 *mtts_3;
3682 u64 *mtts_2;
3683 int count;
3684 u8 *dmac;
3685 u8 *smac;
3686 int port;
3687
3688
3689 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3690 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
3691 hr_qp->rq.offset / page_size, mtts,
3692 MTT_MIN_COUNT, &wqe_sge_ba);
3693 if (!ibqp->srq)
3694 if (!check_wqe_rq_mtt_count(hr_dev, hr_qp, count, page_size))
3695 return -EINVAL;
3696
3697
3698 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
3699 hr_qp->qpn, &dma_handle_2);
3700 if (!mtts_2) {
3701 dev_err(dev, "qp irrl_table find failed\n");
3702 return -EINVAL;
3703 }
3704
3705
3706 mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
3707 hr_qp->qpn, &dma_handle_3);
3708 if (!mtts_3) {
3709 dev_err(dev, "qp trrl_table find failed\n");
3710 return -EINVAL;
3711 }
3712
3713 if (attr_mask & IB_QP_ALT_PATH) {
3714 dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
3715 return -EINVAL;
3716 }
3717
3718 dmac = (u8 *)attr->ah_attr.roce.dmac;
3719 context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
3720 qpc_mask->wqe_sge_ba = 0;
3721
3722
3723
3724
3725
3726
3727
3728 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3729 V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3));
3730 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3731 V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
3732
3733 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3734 V2_QPC_BYTE_12_SQ_HOP_NUM_S,
3735 hr_dev->caps.wqe_sq_hop_num == HNS_ROCE_HOP_NUM_0 ?
3736 0 : hr_dev->caps.wqe_sq_hop_num);
3737 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3738 V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
3739
3740 roce_set_field(context->byte_20_smac_sgid_idx,
3741 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3742 V2_QPC_BYTE_20_SGE_HOP_NUM_S,
3743 ((ibqp->qp_type == IB_QPT_GSI) ||
3744 hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
3745 hr_dev->caps.wqe_sge_hop_num : 0);
3746 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3747 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3748 V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
3749
3750 roce_set_field(context->byte_20_smac_sgid_idx,
3751 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3752 V2_QPC_BYTE_20_RQ_HOP_NUM_S,
3753 hr_dev->caps.wqe_rq_hop_num == HNS_ROCE_HOP_NUM_0 ?
3754 0 : hr_dev->caps.wqe_rq_hop_num);
3755 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3756 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3757 V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
3758
3759 roce_set_field(context->byte_16_buf_ba_pg_sz,
3760 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3761 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
3762 hr_qp->wqe_bt_pg_shift + PG_SHIFT_OFFSET);
3763 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3764 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3765 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
3766
3767 roce_set_field(context->byte_16_buf_ba_pg_sz,
3768 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3769 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
3770 hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
3771 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3772 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3773 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
3774
3775 context->rq_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
3776 qpc_mask->rq_cur_blk_addr = 0;
3777
3778 roce_set_field(context->byte_92_srq_info,
3779 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3780 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
3781 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3782 roce_set_field(qpc_mask->byte_92_srq_info,
3783 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3784 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
3785
3786 context->rq_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
3787 qpc_mask->rq_nxt_blk_addr = 0;
3788
3789 roce_set_field(context->byte_104_rq_sge,
3790 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3791 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
3792 mtts[1] >> (32 + PAGE_ADDR_SHIFT));
3793 roce_set_field(qpc_mask->byte_104_rq_sge,
3794 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3795 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
3796
3797 roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3798 V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
3799 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3800 V2_QPC_BYTE_132_TRRL_BA_S, 0);
3801 context->trrl_ba = cpu_to_le32(dma_handle_3 >> (16 + 4));
3802 qpc_mask->trrl_ba = 0;
3803 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3804 V2_QPC_BYTE_140_TRRL_BA_S,
3805 (u32)(dma_handle_3 >> (32 + 16 + 4)));
3806 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3807 V2_QPC_BYTE_140_TRRL_BA_S, 0);
3808
3809 context->irrl_ba = cpu_to_le32(dma_handle_2 >> 6);
3810 qpc_mask->irrl_ba = 0;
3811 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3812 V2_QPC_BYTE_208_IRRL_BA_S,
3813 dma_handle_2 >> (32 + 6));
3814 roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3815 V2_QPC_BYTE_208_IRRL_BA_S, 0);
3816
3817 roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
3818 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
3819
3820 roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3821 hr_qp->sq_signal_bits);
3822 roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3823 0);
3824
3825 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
3826
3827 smac = (u8 *)hr_dev->dev_addr[port];
3828
3829 if (ether_addr_equal_unaligned(dmac, smac) ||
3830 hr_dev->loop_idc == 0x1) {
3831 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
3832 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
3833 }
3834
3835 if (attr_mask & IB_QP_DEST_QPN) {
3836 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3837 V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
3838 roce_set_field(qpc_mask->byte_56_dqpn_err,
3839 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3840 }
3841
3842
3843 port_num = rdma_ah_get_port_num(&attr->ah_attr);
3844 roce_set_field(context->byte_20_smac_sgid_idx,
3845 V2_QPC_BYTE_20_SGID_IDX_M,
3846 V2_QPC_BYTE_20_SGID_IDX_S,
3847 hns_get_gid_index(hr_dev, port_num - 1,
3848 grh->sgid_index));
3849 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3850 V2_QPC_BYTE_20_SGID_IDX_M,
3851 V2_QPC_BYTE_20_SGID_IDX_S, 0);
3852 memcpy(&(context->dmac), dmac, sizeof(u32));
3853 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3854 V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
3855 qpc_mask->dmac = 0;
3856 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3857 V2_QPC_BYTE_52_DMAC_S, 0);
3858
3859
3860 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3861 V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
3862 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3863 V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
3864
3865 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
3866 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3867 V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
3868 else if (attr_mask & IB_QP_PATH_MTU)
3869 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3870 V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
3871
3872 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3873 V2_QPC_BYTE_24_MTU_S, 0);
3874
3875 roce_set_field(context->byte_84_rq_ci_pi,
3876 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3877 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
3878 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3879 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3880 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3881
3882 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3883 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3884 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3885 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3886 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3887 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3888 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3889 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3890 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3891 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3892
3893 context->rq_rnr_timer = 0;
3894 qpc_mask->rq_rnr_timer = 0;
3895
3896 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3897 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3898 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3899 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3900
3901
3902 roce_set_field(context->byte_168_irrl_idx,
3903 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3904 V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
3905 roce_set_field(qpc_mask->byte_168_irrl_idx,
3906 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3907 V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
3908
3909 return 0;
3910 }
3911
3912 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
3913 const struct ib_qp_attr *attr, int attr_mask,
3914 struct hns_roce_v2_qp_context *context,
3915 struct hns_roce_v2_qp_context *qpc_mask)
3916 {
3917 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3918 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3919 struct device *dev = hr_dev->dev;
3920 u64 sge_cur_blk = 0;
3921 u64 sq_cur_blk = 0;
3922 u32 page_size;
3923 int count;
3924
3925
3926 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
3927 if (count < 1) {
3928 dev_err(dev, "qp(0x%lx) buf pa find failed\n", hr_qp->qpn);
3929 return -EINVAL;
3930 }
3931
3932 if (hr_qp->sge.offset) {
3933 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3934 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
3935 hr_qp->sge.offset / page_size,
3936 &sge_cur_blk, 1, NULL);
3937 if (count < 1) {
3938 dev_err(dev, "qp(0x%lx) sge pa find failed\n",
3939 hr_qp->qpn);
3940 return -EINVAL;
3941 }
3942 }
3943
3944
3945 if ((attr_mask & IB_QP_ALT_PATH) ||
3946 (attr_mask & IB_QP_PATH_MIG_STATE)) {
3947 dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
3948 return -EINVAL;
3949 }
3950
3951
3952
3953
3954
3955
3956
3957 context->sq_cur_blk_addr = cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
3958 roce_set_field(context->byte_168_irrl_idx,
3959 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3960 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
3961 sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
3962 qpc_mask->sq_cur_blk_addr = 0;
3963 roce_set_field(qpc_mask->byte_168_irrl_idx,
3964 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3965 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
3966
3967 context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) ||
3968 hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
3969 cpu_to_le32(sge_cur_blk >>
3970 PAGE_ADDR_SHIFT) : 0;
3971 roce_set_field(context->byte_184_irrl_idx,
3972 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3973 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
3974 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs >
3975 HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
3976 (sge_cur_blk >>
3977 (32 + PAGE_ADDR_SHIFT)) : 0);
3978 qpc_mask->sq_cur_sge_blk_addr = 0;
3979 roce_set_field(qpc_mask->byte_184_irrl_idx,
3980 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3981 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
3982
3983 context->rx_sq_cur_blk_addr =
3984 cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
3985 roce_set_field(context->byte_232_irrl_sge,
3986 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3987 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
3988 sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
3989 qpc_mask->rx_sq_cur_blk_addr = 0;
3990 roce_set_field(qpc_mask->byte_232_irrl_sge,
3991 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3992 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
3993
3994
3995
3996
3997
3998
3999 roce_set_field(qpc_mask->byte_232_irrl_sge,
4000 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
4001 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
4002
4003 roce_set_field(qpc_mask->byte_240_irrl_tail,
4004 V2_QPC_BYTE_240_RX_ACK_MSN_M,
4005 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
4006
4007 roce_set_field(qpc_mask->byte_248_ack_psn,
4008 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
4009 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
4010 roce_set_bit(qpc_mask->byte_248_ack_psn,
4011 V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
4012 roce_set_field(qpc_mask->byte_248_ack_psn,
4013 V2_QPC_BYTE_248_IRRL_PSN_M,
4014 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
4015
4016 roce_set_field(qpc_mask->byte_240_irrl_tail,
4017 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
4018 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
4019
4020 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4021 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
4022 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
4023
4024 roce_set_bit(qpc_mask->byte_248_ack_psn,
4025 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
4026
4027 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
4028 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
4029
4030 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4031 V2_QPC_BYTE_212_LSN_S, 0x100);
4032 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4033 V2_QPC_BYTE_212_LSN_S, 0);
4034
4035 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
4036 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
4037
4038 return 0;
4039 }
4040
4041 static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
4042 enum ib_qp_state new_state)
4043 {
4044
4045 if ((cur_state != IB_QPS_RESET &&
4046 (new_state == IB_QPS_ERR || new_state == IB_QPS_RESET)) ||
4047 ((cur_state == IB_QPS_RTS || cur_state == IB_QPS_SQD) &&
4048 (new_state == IB_QPS_RTS || new_state == IB_QPS_SQD)) ||
4049 (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS))
4050 return true;
4051
4052 return false;
4053
4054 }
4055
4056 static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4057 const struct ib_qp_attr *attr,
4058 int attr_mask,
4059 struct hns_roce_v2_qp_context *context,
4060 struct hns_roce_v2_qp_context *qpc_mask)
4061 {
4062 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4063 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4064 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4065 const struct ib_gid_attr *gid_attr = NULL;
4066 int is_roce_protocol;
4067 bool is_udp = false;
4068 u16 vlan = 0xffff;
4069 u8 ib_port;
4070 u8 hr_port;
4071 int ret;
4072
4073 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4074 hr_port = ib_port - 1;
4075 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4076 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4077
4078 if (is_roce_protocol) {
4079 gid_attr = attr->ah_attr.grh.sgid_attr;
4080 ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
4081 if (ret)
4082 return ret;
4083
4084 if (gid_attr)
4085 is_udp = (gid_attr->gid_type ==
4086 IB_GID_TYPE_ROCE_UDP_ENCAP);
4087 }
4088
4089 if (vlan < VLAN_CFI_MASK) {
4090 roce_set_bit(context->byte_76_srqn_op_en,
4091 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
4092 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
4093 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
4094 roce_set_bit(context->byte_168_irrl_idx,
4095 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
4096 roce_set_bit(qpc_mask->byte_168_irrl_idx,
4097 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
4098 }
4099
4100 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4101 V2_QPC_BYTE_24_VLAN_ID_S, vlan);
4102 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4103 V2_QPC_BYTE_24_VLAN_ID_S, 0);
4104
4105 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4106 dev_err(hr_dev->dev, "sgid_index(%u) too large. max is %d\n",
4107 grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
4108 return -EINVAL;
4109 }
4110
4111 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4112 dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
4113 return -EINVAL;
4114 }
4115
4116 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4117 V2_QPC_BYTE_52_UDPSPN_S,
4118 is_udp ? 0x12b7 : 0);
4119
4120 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4121 V2_QPC_BYTE_52_UDPSPN_S, 0);
4122
4123 roce_set_field(context->byte_20_smac_sgid_idx,
4124 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
4125 grh->sgid_index);
4126
4127 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4128 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
4129
4130 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4131 V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
4132 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4133 V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
4134
4135 if (hr_dev->pci_dev->revision == 0x21 && is_udp)
4136 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4137 V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
4138 else
4139 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4140 V2_QPC_BYTE_24_TC_S, grh->traffic_class);
4141 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4142 V2_QPC_BYTE_24_TC_S, 0);
4143 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4144 V2_QPC_BYTE_28_FL_S, grh->flow_label);
4145 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4146 V2_QPC_BYTE_28_FL_S, 0);
4147 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4148 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4149 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4150 V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
4151 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4152 V2_QPC_BYTE_28_SL_S, 0);
4153 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4154
4155 return 0;
4156 }
4157
4158 static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
4159 const struct ib_qp_attr *attr,
4160 int attr_mask,
4161 enum ib_qp_state cur_state,
4162 enum ib_qp_state new_state,
4163 struct hns_roce_v2_qp_context *context,
4164 struct hns_roce_v2_qp_context *qpc_mask)
4165 {
4166 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4167 int ret = 0;
4168
4169 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4170 memset(qpc_mask, 0, sizeof(*qpc_mask));
4171 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
4172 qpc_mask);
4173 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4174 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
4175 qpc_mask);
4176 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4177 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
4178 qpc_mask);
4179 if (ret)
4180 goto out;
4181 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4182 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
4183 qpc_mask);
4184 if (ret)
4185 goto out;
4186 } else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
4187
4188 ;
4189 } else {
4190 dev_err(hr_dev->dev, "Illegal state for QP!\n");
4191 ret = -EINVAL;
4192 goto out;
4193 }
4194
4195 out:
4196 return ret;
4197 }
4198
4199 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
4200 const struct ib_qp_attr *attr,
4201 int attr_mask,
4202 struct hns_roce_v2_qp_context *context,
4203 struct hns_roce_v2_qp_context *qpc_mask)
4204 {
4205 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4206 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4207 int ret = 0;
4208
4209 if (attr_mask & IB_QP_AV) {
4210 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
4211 qpc_mask);
4212 if (ret)
4213 return ret;
4214 }
4215
4216 if (attr_mask & IB_QP_TIMEOUT) {
4217 if (attr->timeout < 31) {
4218 roce_set_field(context->byte_28_at_fl,
4219 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4220 attr->timeout);
4221 roce_set_field(qpc_mask->byte_28_at_fl,
4222 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4223 0);
4224 } else {
4225 dev_warn(hr_dev->dev,
4226 "Local ACK timeout shall be 0 to 30.\n");
4227 }
4228 }
4229
4230 if (attr_mask & IB_QP_RETRY_CNT) {
4231 roce_set_field(context->byte_212_lsn,
4232 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4233 V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
4234 attr->retry_cnt);
4235 roce_set_field(qpc_mask->byte_212_lsn,
4236 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4237 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
4238
4239 roce_set_field(context->byte_212_lsn,
4240 V2_QPC_BYTE_212_RETRY_CNT_M,
4241 V2_QPC_BYTE_212_RETRY_CNT_S,
4242 attr->retry_cnt);
4243 roce_set_field(qpc_mask->byte_212_lsn,
4244 V2_QPC_BYTE_212_RETRY_CNT_M,
4245 V2_QPC_BYTE_212_RETRY_CNT_S, 0);
4246 }
4247
4248 if (attr_mask & IB_QP_RNR_RETRY) {
4249 roce_set_field(context->byte_244_rnr_rxack,
4250 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4251 V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
4252 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4253 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4254 V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
4255
4256 roce_set_field(context->byte_244_rnr_rxack,
4257 V2_QPC_BYTE_244_RNR_CNT_M,
4258 V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
4259 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4260 V2_QPC_BYTE_244_RNR_CNT_M,
4261 V2_QPC_BYTE_244_RNR_CNT_S, 0);
4262 }
4263
4264
4265 if (attr_mask & IB_QP_SQ_PSN) {
4266 roce_set_field(context->byte_172_sq_psn,
4267 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4268 V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
4269 roce_set_field(qpc_mask->byte_172_sq_psn,
4270 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4271 V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
4272
4273 roce_set_field(context->byte_196_sq_psn,
4274 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4275 V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
4276 roce_set_field(qpc_mask->byte_196_sq_psn,
4277 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4278 V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
4279
4280 roce_set_field(context->byte_220_retry_psn_msn,
4281 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4282 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
4283 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4284 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4285 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
4286
4287 roce_set_field(context->byte_224_retry_msg,
4288 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4289 V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
4290 attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
4291 roce_set_field(qpc_mask->byte_224_retry_msg,
4292 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4293 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
4294
4295 roce_set_field(context->byte_224_retry_msg,
4296 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4297 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
4298 attr->sq_psn);
4299 roce_set_field(qpc_mask->byte_224_retry_msg,
4300 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4301 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
4302
4303 roce_set_field(context->byte_244_rnr_rxack,
4304 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4305 V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
4306 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4307 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4308 V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
4309 }
4310
4311 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
4312 attr->max_dest_rd_atomic) {
4313 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4314 V2_QPC_BYTE_140_RR_MAX_S,
4315 fls(attr->max_dest_rd_atomic - 1));
4316 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4317 V2_QPC_BYTE_140_RR_MAX_S, 0);
4318 }
4319
4320 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
4321 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
4322 V2_QPC_BYTE_208_SR_MAX_S,
4323 fls(attr->max_rd_atomic - 1));
4324 roce_set_field(qpc_mask->byte_208_irrl,
4325 V2_QPC_BYTE_208_SR_MAX_M,
4326 V2_QPC_BYTE_208_SR_MAX_S, 0);
4327 }
4328
4329 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
4330 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
4331
4332 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
4333 roce_set_field(context->byte_80_rnr_rx_cqn,
4334 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4335 V2_QPC_BYTE_80_MIN_RNR_TIME_S,
4336 attr->min_rnr_timer);
4337 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
4338 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4339 V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
4340 }
4341
4342
4343 if (attr_mask & IB_QP_RQ_PSN) {
4344 roce_set_field(context->byte_108_rx_reqepsn,
4345 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4346 V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
4347 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4348 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4349 V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
4350
4351 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
4352 V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
4353 roce_set_field(qpc_mask->byte_152_raq,
4354 V2_QPC_BYTE_152_RAQ_PSN_M,
4355 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
4356 }
4357
4358 if (attr_mask & IB_QP_QKEY) {
4359 context->qkey_xrcd = cpu_to_le32(attr->qkey);
4360 qpc_mask->qkey_xrcd = 0;
4361 hr_qp->qkey = attr->qkey;
4362 }
4363
4364 return ret;
4365 }
4366
4367 static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
4368 const struct ib_qp_attr *attr,
4369 int attr_mask)
4370 {
4371 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4372 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4373
4374 if (attr_mask & IB_QP_ACCESS_FLAGS)
4375 hr_qp->atomic_rd_en = attr->qp_access_flags;
4376
4377 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4378 hr_qp->resp_depth = attr->max_dest_rd_atomic;
4379 if (attr_mask & IB_QP_PORT) {
4380 hr_qp->port = attr->port_num - 1;
4381 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
4382 }
4383 }
4384
4385 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
4386 const struct ib_qp_attr *attr,
4387 int attr_mask, enum ib_qp_state cur_state,
4388 enum ib_qp_state new_state)
4389 {
4390 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4391 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4392 struct hns_roce_v2_qp_context ctx[2];
4393 struct hns_roce_v2_qp_context *context = ctx;
4394 struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
4395 struct device *dev = hr_dev->dev;
4396 int ret;
4397
4398
4399
4400
4401
4402
4403
4404 memset(context, 0, sizeof(*context));
4405 memset(qpc_mask, 0xff, sizeof(*qpc_mask));
4406 ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
4407 new_state, context, qpc_mask);
4408 if (ret)
4409 goto out;
4410
4411
4412 if (new_state == IB_QPS_ERR) {
4413 roce_set_field(context->byte_160_sq_ci_pi,
4414 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4415 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
4416 hr_qp->sq.head);
4417 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
4418 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4419 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
4420
4421 if (!ibqp->srq) {
4422 roce_set_field(context->byte_84_rq_ci_pi,
4423 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4424 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
4425 hr_qp->rq.head);
4426 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4427 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4428 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4429 }
4430 }
4431
4432
4433 ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
4434 qpc_mask);
4435 if (ret)
4436 goto out;
4437
4438 roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
4439 ibqp->srq ? 1 : 0);
4440 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4441 V2_QPC_BYTE_108_INV_CREDIT_S, 0);
4442
4443
4444 roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4445 V2_QPC_BYTE_60_QP_ST_S, new_state);
4446 roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4447 V2_QPC_BYTE_60_QP_ST_S, 0);
4448
4449
4450 ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state, ctx, hr_qp);
4451 if (ret) {
4452 dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
4453 goto out;
4454 }
4455
4456 hr_qp->state = new_state;
4457
4458 hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
4459
4460 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
4461 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
4462 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
4463 if (ibqp->send_cq != ibqp->recv_cq)
4464 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
4465 hr_qp->qpn, NULL);
4466
4467 hr_qp->rq.head = 0;
4468 hr_qp->rq.tail = 0;
4469 hr_qp->sq.head = 0;
4470 hr_qp->sq.tail = 0;
4471 hr_qp->next_sge = 0;
4472 if (hr_qp->rq.wqe_cnt)
4473 *hr_qp->rdb.db_record = 0;
4474 }
4475
4476 out:
4477 return ret;
4478 }
4479
4480 static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
4481 {
4482 switch (state) {
4483 case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
4484 case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
4485 case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
4486 case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
4487 case HNS_ROCE_QP_ST_SQ_DRAINING:
4488 case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
4489 case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
4490 case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
4491 default: return -1;
4492 }
4493 }
4494
4495 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
4496 struct hns_roce_qp *hr_qp,
4497 struct hns_roce_v2_qp_context *hr_context)
4498 {
4499 struct hns_roce_cmd_mailbox *mailbox;
4500 int ret;
4501
4502 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4503 if (IS_ERR(mailbox))
4504 return PTR_ERR(mailbox);
4505
4506 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
4507 HNS_ROCE_CMD_QUERY_QPC,
4508 HNS_ROCE_CMD_TIMEOUT_MSECS);
4509 if (ret) {
4510 dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
4511 goto out;
4512 }
4513
4514 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
4515
4516 out:
4517 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4518 return ret;
4519 }
4520
4521 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4522 int qp_attr_mask,
4523 struct ib_qp_init_attr *qp_init_attr)
4524 {
4525 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4526 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4527 struct hns_roce_v2_qp_context context = {};
4528 struct device *dev = hr_dev->dev;
4529 int tmp_qp_state;
4530 int state;
4531 int ret;
4532
4533 memset(qp_attr, 0, sizeof(*qp_attr));
4534 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
4535
4536 mutex_lock(&hr_qp->mutex);
4537
4538 if (hr_qp->state == IB_QPS_RESET) {
4539 qp_attr->qp_state = IB_QPS_RESET;
4540 ret = 0;
4541 goto done;
4542 }
4543
4544 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
4545 if (ret) {
4546 dev_err(dev, "query qpc error\n");
4547 ret = -EINVAL;
4548 goto out;
4549 }
4550
4551 state = roce_get_field(context.byte_60_qpst_tempid,
4552 V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
4553 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
4554 if (tmp_qp_state == -1) {
4555 dev_err(dev, "Illegal ib_qp_state\n");
4556 ret = -EINVAL;
4557 goto out;
4558 }
4559 hr_qp->state = (u8)tmp_qp_state;
4560 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
4561 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc,
4562 V2_QPC_BYTE_24_MTU_M,
4563 V2_QPC_BYTE_24_MTU_S);
4564 qp_attr->path_mig_state = IB_MIG_ARMED;
4565 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
4566 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
4567 qp_attr->qkey = V2_QKEY_VAL;
4568
4569 qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
4570 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4571 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
4572 qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn,
4573 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4574 V2_QPC_BYTE_172_SQ_CUR_PSN_S);
4575 qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err,
4576 V2_QPC_BYTE_56_DQPN_M,
4577 V2_QPC_BYTE_56_DQPN_S);
4578 qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en,
4579 V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) |
4580 ((roce_get_bit(context.byte_76_srqn_op_en,
4581 V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) |
4582 ((roce_get_bit(context.byte_76_srqn_op_en,
4583 V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
4584
4585 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
4586 hr_qp->ibqp.qp_type == IB_QPT_UC) {
4587 struct ib_global_route *grh =
4588 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
4589
4590 rdma_ah_set_sl(&qp_attr->ah_attr,
4591 roce_get_field(context.byte_28_at_fl,
4592 V2_QPC_BYTE_28_SL_M,
4593 V2_QPC_BYTE_28_SL_S));
4594 grh->flow_label = roce_get_field(context.byte_28_at_fl,
4595 V2_QPC_BYTE_28_FL_M,
4596 V2_QPC_BYTE_28_FL_S);
4597 grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx,
4598 V2_QPC_BYTE_20_SGID_IDX_M,
4599 V2_QPC_BYTE_20_SGID_IDX_S);
4600 grh->hop_limit = roce_get_field(context.byte_24_mtu_tc,
4601 V2_QPC_BYTE_24_HOP_LIMIT_M,
4602 V2_QPC_BYTE_24_HOP_LIMIT_S);
4603 grh->traffic_class = roce_get_field(context.byte_24_mtu_tc,
4604 V2_QPC_BYTE_24_TC_M,
4605 V2_QPC_BYTE_24_TC_S);
4606
4607 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
4608 }
4609
4610 qp_attr->port_num = hr_qp->port + 1;
4611 qp_attr->sq_draining = 0;
4612 qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl,
4613 V2_QPC_BYTE_208_SR_MAX_M,
4614 V2_QPC_BYTE_208_SR_MAX_S);
4615 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
4616 V2_QPC_BYTE_140_RR_MAX_M,
4617 V2_QPC_BYTE_140_RR_MAX_S);
4618 qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
4619 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4620 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
4621 qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl,
4622 V2_QPC_BYTE_28_AT_M,
4623 V2_QPC_BYTE_28_AT_S);
4624 qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
4625 V2_QPC_BYTE_212_RETRY_CNT_M,
4626 V2_QPC_BYTE_212_RETRY_CNT_S);
4627 qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer);
4628
4629 done:
4630 qp_attr->cur_qp_state = qp_attr->qp_state;
4631 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
4632 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
4633
4634 if (!ibqp->uobject) {
4635 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
4636 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
4637 } else {
4638 qp_attr->cap.max_send_wr = 0;
4639 qp_attr->cap.max_send_sge = 0;
4640 }
4641
4642 qp_init_attr->cap = qp_attr->cap;
4643
4644 out:
4645 mutex_unlock(&hr_qp->mutex);
4646 return ret;
4647 }
4648
4649 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
4650 struct hns_roce_qp *hr_qp,
4651 struct ib_udata *udata)
4652 {
4653 struct hns_roce_cq *send_cq, *recv_cq;
4654 struct ib_device *ibdev = &hr_dev->ib_dev;
4655 int ret = 0;
4656
4657 if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
4658
4659 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
4660 hr_qp->state, IB_QPS_RESET);
4661 if (ret)
4662 ibdev_err(ibdev, "modify QP to Reset failed.\n");
4663 }
4664
4665 send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
4666 recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
4667
4668 hns_roce_lock_cqs(send_cq, recv_cq);
4669
4670 if (!udata) {
4671 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
4672 to_hr_srq(hr_qp->ibqp.srq) : NULL);
4673 if (send_cq != recv_cq)
4674 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
4675 }
4676
4677 hns_roce_qp_remove(hr_dev, hr_qp);
4678
4679 hns_roce_unlock_cqs(send_cq, recv_cq);
4680
4681 hns_roce_qp_free(hr_dev, hr_qp);
4682
4683
4684 if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
4685 (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
4686 (hr_qp->ibqp.qp_type == IB_QPT_UD))
4687 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
4688
4689 hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
4690
4691 if (udata) {
4692 struct hns_roce_ucontext *context =
4693 rdma_udata_to_drv_context(
4694 udata,
4695 struct hns_roce_ucontext,
4696 ibucontext);
4697
4698 if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
4699 hns_roce_db_unmap_user(context, &hr_qp->sdb);
4700
4701 if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
4702 hns_roce_db_unmap_user(context, &hr_qp->rdb);
4703 } else {
4704 kfree(hr_qp->sq.wrid);
4705 kfree(hr_qp->rq.wrid);
4706 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
4707 if (hr_qp->rq.wqe_cnt)
4708 hns_roce_free_db(hr_dev, &hr_qp->rdb);
4709 }
4710 ib_umem_release(hr_qp->umem);
4711
4712 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
4713 hr_qp->rq.wqe_cnt) {
4714 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
4715 kfree(hr_qp->rq_inl_buf.wqe_list);
4716 }
4717
4718 return ret;
4719 }
4720
4721 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
4722 {
4723 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4724 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4725 int ret;
4726
4727 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
4728 if (ret)
4729 ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
4730 hr_qp->qpn, ret);
4731
4732 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
4733 kfree(hr_to_hr_sqp(hr_qp));
4734 else
4735 kfree(hr_qp);
4736
4737 return 0;
4738 }
4739
4740 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
4741 struct hns_roce_qp *hr_qp)
4742 {
4743 struct hns_roce_sccc_clr_done *resp;
4744 struct hns_roce_sccc_clr *clr;
4745 struct hns_roce_cmq_desc desc;
4746 int ret, i;
4747
4748 mutex_lock(&hr_dev->qp_table.scc_mutex);
4749
4750
4751 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
4752 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4753 if (ret) {
4754 dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
4755 goto out;
4756 }
4757
4758
4759 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
4760 clr = (struct hns_roce_sccc_clr *)desc.data;
4761 clr->qpn = cpu_to_le32(hr_qp->qpn);
4762 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4763 if (ret) {
4764 dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
4765 goto out;
4766 }
4767
4768
4769 resp = (struct hns_roce_sccc_clr_done *)desc.data;
4770 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
4771 hns_roce_cmq_setup_basic_desc(&desc,
4772 HNS_ROCE_OPC_QUERY_SCCC, true);
4773 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4774 if (ret) {
4775 dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
4776 goto out;
4777 }
4778
4779 if (resp->clr_done)
4780 goto out;
4781
4782 msleep(20);
4783 }
4784
4785 dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
4786 ret = -ETIMEDOUT;
4787
4788 out:
4789 mutex_unlock(&hr_dev->qp_table.scc_mutex);
4790 return ret;
4791 }
4792
4793 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
4794 {
4795 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
4796 struct hns_roce_v2_cq_context *cq_context;
4797 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
4798 struct hns_roce_v2_cq_context *cqc_mask;
4799 struct hns_roce_cmd_mailbox *mailbox;
4800 int ret;
4801
4802 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4803 if (IS_ERR(mailbox))
4804 return PTR_ERR(mailbox);
4805
4806 cq_context = mailbox->buf;
4807 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
4808
4809 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
4810
4811 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4812 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4813 cq_count);
4814 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4815 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4816 0);
4817 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4818 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4819 cq_period);
4820 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4821 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4822 0);
4823
4824 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
4825 HNS_ROCE_CMD_MODIFY_CQC,
4826 HNS_ROCE_CMD_TIMEOUT_MSECS);
4827 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4828 if (ret)
4829 dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
4830
4831 return ret;
4832 }
4833
4834 static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
4835 {
4836 struct hns_roce_qp *hr_qp;
4837 struct ib_qp_attr attr;
4838 int attr_mask;
4839 int ret;
4840
4841 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
4842 if (!hr_qp) {
4843 dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
4844 return;
4845 }
4846
4847 if (hr_qp->ibqp.uobject) {
4848 if (hr_qp->sdb_en == 1) {
4849 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
4850 if (hr_qp->rdb_en == 1)
4851 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
4852 } else {
4853 dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
4854 return;
4855 }
4856 }
4857
4858 attr_mask = IB_QP_STATE;
4859 attr.qp_state = IB_QPS_ERR;
4860 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
4861 hr_qp->state, IB_QPS_ERR);
4862 if (ret)
4863 dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
4864 qpn);
4865 }
4866
4867 static void hns_roce_irq_work_handle(struct work_struct *work)
4868 {
4869 struct hns_roce_work *irq_work =
4870 container_of(work, struct hns_roce_work, work);
4871 struct device *dev = irq_work->hr_dev->dev;
4872 u32 qpn = irq_work->qpn;
4873 u32 cqn = irq_work->cqn;
4874
4875 switch (irq_work->event_type) {
4876 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4877 dev_info(dev, "Path migrated succeeded.\n");
4878 break;
4879 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4880 dev_warn(dev, "Path migration failed.\n");
4881 break;
4882 case HNS_ROCE_EVENT_TYPE_COMM_EST:
4883 break;
4884 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4885 dev_warn(dev, "Send queue drained.\n");
4886 break;
4887 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4888 dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
4889 qpn, irq_work->sub_type);
4890 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4891 break;
4892 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4893 dev_err(dev, "Invalid request local work queue 0x%x error.\n",
4894 qpn);
4895 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4896 break;
4897 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4898 dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
4899 qpn, irq_work->sub_type);
4900 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4901 break;
4902 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4903 dev_warn(dev, "SRQ limit reach.\n");
4904 break;
4905 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4906 dev_warn(dev, "SRQ last wqe reach.\n");
4907 break;
4908 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4909 dev_err(dev, "SRQ catas error.\n");
4910 break;
4911 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4912 dev_err(dev, "CQ 0x%x access err.\n", cqn);
4913 break;
4914 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4915 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
4916 break;
4917 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4918 dev_warn(dev, "DB overflow.\n");
4919 break;
4920 case HNS_ROCE_EVENT_TYPE_FLR:
4921 dev_warn(dev, "Function level reset.\n");
4922 break;
4923 default:
4924 break;
4925 }
4926
4927 kfree(irq_work);
4928 }
4929
4930 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
4931 struct hns_roce_eq *eq,
4932 u32 qpn, u32 cqn)
4933 {
4934 struct hns_roce_work *irq_work;
4935
4936 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
4937 if (!irq_work)
4938 return;
4939
4940 INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
4941 irq_work->hr_dev = hr_dev;
4942 irq_work->qpn = qpn;
4943 irq_work->cqn = cqn;
4944 irq_work->event_type = eq->event_type;
4945 irq_work->sub_type = eq->sub_type;
4946 queue_work(hr_dev->irq_workq, &(irq_work->work));
4947 }
4948
4949 static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
4950 {
4951 struct hns_roce_dev *hr_dev = eq->hr_dev;
4952 __le32 doorbell[2];
4953
4954 doorbell[0] = 0;
4955 doorbell[1] = 0;
4956
4957 if (eq->type_flag == HNS_ROCE_AEQ) {
4958 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4959 HNS_ROCE_V2_EQ_DB_CMD_S,
4960 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4961 HNS_ROCE_EQ_DB_CMD_AEQ :
4962 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
4963 } else {
4964 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
4965 HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
4966
4967 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4968 HNS_ROCE_V2_EQ_DB_CMD_S,
4969 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4970 HNS_ROCE_EQ_DB_CMD_CEQ :
4971 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
4972 }
4973
4974 roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
4975 HNS_ROCE_V2_EQ_DB_PARA_S,
4976 (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
4977
4978 hns_roce_write64(hr_dev, doorbell, eq->doorbell);
4979 }
4980
4981 static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
4982 {
4983 u32 buf_chk_sz;
4984 unsigned long off;
4985
4986 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4987 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4988
4989 return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
4990 off % buf_chk_sz);
4991 }
4992
4993 static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
4994 {
4995 u32 buf_chk_sz;
4996 unsigned long off;
4997
4998 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4999
5000 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
5001
5002 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
5003 return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
5004 off % buf_chk_sz);
5005 else
5006 return (struct hns_roce_aeqe *)((u8 *)
5007 (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
5008 }
5009
5010 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
5011 {
5012 struct hns_roce_aeqe *aeqe;
5013
5014 if (!eq->hop_num)
5015 aeqe = get_aeqe_v2(eq, eq->cons_index);
5016 else
5017 aeqe = mhop_get_aeqe(eq, eq->cons_index);
5018
5019 return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
5020 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
5021 }
5022
5023 static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
5024 struct hns_roce_eq *eq)
5025 {
5026 struct device *dev = hr_dev->dev;
5027 struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
5028 int aeqe_found = 0;
5029 int event_type;
5030 int sub_type;
5031 u32 srqn;
5032 u32 qpn;
5033 u32 cqn;
5034
5035 while (aeqe) {
5036
5037
5038
5039 dma_rmb();
5040
5041 event_type = roce_get_field(aeqe->asyn,
5042 HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
5043 HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
5044 sub_type = roce_get_field(aeqe->asyn,
5045 HNS_ROCE_V2_AEQE_SUB_TYPE_M,
5046 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
5047 qpn = roce_get_field(aeqe->event.qp_event.qp,
5048 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5049 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5050 cqn = roce_get_field(aeqe->event.cq_event.cq,
5051 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5052 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5053 srqn = roce_get_field(aeqe->event.srq_event.srq,
5054 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5055 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5056
5057 switch (event_type) {
5058 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5059 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5060 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5061 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5062 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5063 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5064 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5065 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5066 hns_roce_qp_event(hr_dev, qpn, event_type);
5067 break;
5068 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5069 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5070 hns_roce_srq_event(hr_dev, srqn, event_type);
5071 break;
5072 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5073 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5074 hns_roce_cq_event(hr_dev, cqn, event_type);
5075 break;
5076 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5077 break;
5078 case HNS_ROCE_EVENT_TYPE_MB:
5079 hns_roce_cmd_event(hr_dev,
5080 le16_to_cpu(aeqe->event.cmd.token),
5081 aeqe->event.cmd.status,
5082 le64_to_cpu(aeqe->event.cmd.out_param));
5083 break;
5084 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
5085 break;
5086 case HNS_ROCE_EVENT_TYPE_FLR:
5087 break;
5088 default:
5089 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
5090 event_type, eq->eqn, eq->cons_index);
5091 break;
5092 }
5093
5094 eq->event_type = event_type;
5095 eq->sub_type = sub_type;
5096 ++eq->cons_index;
5097 aeqe_found = 1;
5098
5099 if (eq->cons_index > (2 * eq->entries - 1))
5100 eq->cons_index = 0;
5101
5102 hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
5103
5104 aeqe = next_aeqe_sw_v2(eq);
5105 }
5106
5107 set_eq_cons_index_v2(eq);
5108 return aeqe_found;
5109 }
5110
5111 static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
5112 {
5113 u32 buf_chk_sz;
5114 unsigned long off;
5115
5116 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5117 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
5118
5119 return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
5120 off % buf_chk_sz);
5121 }
5122
5123 static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
5124 {
5125 u32 buf_chk_sz;
5126 unsigned long off;
5127
5128 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5129
5130 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
5131
5132 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
5133 return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
5134 off % buf_chk_sz);
5135 else
5136 return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
5137 buf_chk_sz]) + off % buf_chk_sz);
5138 }
5139
5140 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
5141 {
5142 struct hns_roce_ceqe *ceqe;
5143
5144 if (!eq->hop_num)
5145 ceqe = get_ceqe_v2(eq, eq->cons_index);
5146 else
5147 ceqe = mhop_get_ceqe(eq, eq->cons_index);
5148
5149 return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
5150 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
5151 }
5152
5153 static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
5154 struct hns_roce_eq *eq)
5155 {
5156 struct device *dev = hr_dev->dev;
5157 struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
5158 int ceqe_found = 0;
5159 u32 cqn;
5160
5161 while (ceqe) {
5162
5163
5164
5165 dma_rmb();
5166
5167 cqn = roce_get_field(ceqe->comp,
5168 HNS_ROCE_V2_CEQE_COMP_CQN_M,
5169 HNS_ROCE_V2_CEQE_COMP_CQN_S);
5170
5171 hns_roce_cq_completion(hr_dev, cqn);
5172
5173 ++eq->cons_index;
5174 ceqe_found = 1;
5175
5176 if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1)) {
5177 dev_warn(dev, "cons_index overflow, set back to 0.\n");
5178 eq->cons_index = 0;
5179 }
5180
5181 ceqe = next_ceqe_sw_v2(eq);
5182 }
5183
5184 set_eq_cons_index_v2(eq);
5185
5186 return ceqe_found;
5187 }
5188
5189 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
5190 {
5191 struct hns_roce_eq *eq = eq_ptr;
5192 struct hns_roce_dev *hr_dev = eq->hr_dev;
5193 int int_work = 0;
5194
5195 if (eq->type_flag == HNS_ROCE_CEQ)
5196
5197 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
5198 else
5199
5200 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
5201
5202 return IRQ_RETVAL(int_work);
5203 }
5204
5205 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
5206 {
5207 struct hns_roce_dev *hr_dev = dev_id;
5208 struct device *dev = hr_dev->dev;
5209 int int_work = 0;
5210 u32 int_st;
5211 u32 int_en;
5212
5213
5214 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
5215 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
5216
5217 if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
5218 struct pci_dev *pdev = hr_dev->pci_dev;
5219 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5220 const struct hnae3_ae_ops *ops = ae_dev->ops;
5221
5222 dev_err(dev, "AEQ overflow!\n");
5223
5224 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
5225 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5226
5227
5228 if (ops->set_default_reset_request)
5229 ops->set_default_reset_request(ae_dev,
5230 HNAE3_FUNC_RESET);
5231 if (ops->reset_event)
5232 ops->reset_event(pdev, NULL);
5233
5234 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5235 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5236
5237 int_work = 1;
5238 } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
5239 dev_err(dev, "BUS ERR!\n");
5240
5241 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S;
5242 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5243
5244 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5245 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5246
5247 int_work = 1;
5248 } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
5249 dev_err(dev, "OTHER ERR!\n");
5250
5251 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S;
5252 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5253
5254 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5255 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5256
5257 int_work = 1;
5258 } else
5259 dev_err(dev, "There is no abnormal irq found!\n");
5260
5261 return IRQ_RETVAL(int_work);
5262 }
5263
5264 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
5265 int eq_num, int enable_flag)
5266 {
5267 int i;
5268
5269 if (enable_flag == EQ_ENABLE) {
5270 for (i = 0; i < eq_num; i++)
5271 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5272 i * EQ_REG_OFFSET,
5273 HNS_ROCE_V2_VF_EVENT_INT_EN_M);
5274
5275 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5276 HNS_ROCE_V2_VF_ABN_INT_EN_M);
5277 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5278 HNS_ROCE_V2_VF_ABN_INT_CFG_M);
5279 } else {
5280 for (i = 0; i < eq_num; i++)
5281 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5282 i * EQ_REG_OFFSET,
5283 HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
5284
5285 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5286 HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
5287 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5288 HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
5289 }
5290 }
5291
5292 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
5293 {
5294 struct device *dev = hr_dev->dev;
5295 int ret;
5296
5297 if (eqn < hr_dev->caps.num_comp_vectors)
5298 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5299 0, HNS_ROCE_CMD_DESTROY_CEQC,
5300 HNS_ROCE_CMD_TIMEOUT_MSECS);
5301 else
5302 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5303 0, HNS_ROCE_CMD_DESTROY_AEQC,
5304 HNS_ROCE_CMD_TIMEOUT_MSECS);
5305 if (ret)
5306 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
5307 }
5308
5309 static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
5310 struct hns_roce_eq *eq)
5311 {
5312 struct device *dev = hr_dev->dev;
5313 u64 idx;
5314 u64 size;
5315 u32 buf_chk_sz;
5316 u32 bt_chk_sz;
5317 u32 mhop_num;
5318 int eqe_alloc;
5319 int i = 0;
5320 int j = 0;
5321
5322 mhop_num = hr_dev->caps.eqe_hop_num;
5323 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5324 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
5325
5326 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5327 dma_free_coherent(dev, (unsigned int)(eq->entries *
5328 eq->eqe_size), eq->bt_l0, eq->l0_dma);
5329 return;
5330 }
5331
5332 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5333 if (mhop_num == 1) {
5334 for (i = 0; i < eq->l0_last_num; i++) {
5335 if (i == eq->l0_last_num - 1) {
5336 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5337 size = (eq->entries - eqe_alloc) * eq->eqe_size;
5338 dma_free_coherent(dev, size, eq->buf[i],
5339 eq->buf_dma[i]);
5340 break;
5341 }
5342 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5343 eq->buf_dma[i]);
5344 }
5345 } else if (mhop_num == 2) {
5346 for (i = 0; i < eq->l0_last_num; i++) {
5347 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5348 eq->l1_dma[i]);
5349
5350 for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5351 idx = i * (bt_chk_sz / BA_BYTE_LEN) + j;
5352 if ((i == eq->l0_last_num - 1)
5353 && j == eq->l1_last_num - 1) {
5354 eqe_alloc = (buf_chk_sz / eq->eqe_size)
5355 * idx;
5356 size = (eq->entries - eqe_alloc)
5357 * eq->eqe_size;
5358 dma_free_coherent(dev, size,
5359 eq->buf[idx],
5360 eq->buf_dma[idx]);
5361 break;
5362 }
5363 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5364 eq->buf_dma[idx]);
5365 }
5366 }
5367 }
5368 kfree(eq->buf_dma);
5369 kfree(eq->buf);
5370 kfree(eq->l1_dma);
5371 kfree(eq->bt_l1);
5372 eq->buf_dma = NULL;
5373 eq->buf = NULL;
5374 eq->l1_dma = NULL;
5375 eq->bt_l1 = NULL;
5376 }
5377
5378 static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
5379 struct hns_roce_eq *eq)
5380 {
5381 u32 buf_chk_sz;
5382
5383 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5384
5385 if (hr_dev->caps.eqe_hop_num) {
5386 hns_roce_mhop_free_eq(hr_dev, eq);
5387 return;
5388 }
5389
5390 dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
5391 eq->buf_list->map);
5392 kfree(eq->buf_list);
5393 }
5394
5395 static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
5396 struct hns_roce_eq *eq,
5397 void *mb_buf)
5398 {
5399 struct hns_roce_eq_context *eqc;
5400
5401 eqc = mb_buf;
5402 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
5403
5404
5405 eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
5406 eq->hop_num = hr_dev->caps.eqe_hop_num;
5407 eq->cons_index = 0;
5408 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
5409 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
5410 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
5411 eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
5412 eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
5413 eq->shift = ilog2((unsigned int)eq->entries);
5414
5415 if (!eq->hop_num)
5416 eq->eqe_ba = eq->buf_list->map;
5417 else
5418 eq->eqe_ba = eq->l0_dma;
5419
5420
5421 roce_set_field(eqc->byte_4,
5422 HNS_ROCE_EQC_EQ_ST_M,
5423 HNS_ROCE_EQC_EQ_ST_S,
5424 HNS_ROCE_V2_EQ_STATE_VALID);
5425
5426
5427 roce_set_field(eqc->byte_4,
5428 HNS_ROCE_EQC_HOP_NUM_M,
5429 HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
5430
5431
5432 roce_set_field(eqc->byte_4,
5433 HNS_ROCE_EQC_OVER_IGNORE_M,
5434 HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
5435
5436
5437 roce_set_field(eqc->byte_4,
5438 HNS_ROCE_EQC_COALESCE_M,
5439 HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
5440
5441
5442 roce_set_field(eqc->byte_4,
5443 HNS_ROCE_EQC_ARM_ST_M,
5444 HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
5445
5446
5447 roce_set_field(eqc->byte_4,
5448 HNS_ROCE_EQC_EQN_M,
5449 HNS_ROCE_EQC_EQN_S, eq->eqn);
5450
5451
5452 roce_set_field(eqc->byte_4,
5453 HNS_ROCE_EQC_EQE_CNT_M,
5454 HNS_ROCE_EQC_EQE_CNT_S,
5455 HNS_ROCE_EQ_INIT_EQE_CNT);
5456
5457
5458 roce_set_field(eqc->byte_8,
5459 HNS_ROCE_EQC_BA_PG_SZ_M,
5460 HNS_ROCE_EQC_BA_PG_SZ_S,
5461 eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
5462
5463
5464 roce_set_field(eqc->byte_8,
5465 HNS_ROCE_EQC_BUF_PG_SZ_M,
5466 HNS_ROCE_EQC_BUF_PG_SZ_S,
5467 eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
5468
5469
5470 roce_set_field(eqc->byte_8,
5471 HNS_ROCE_EQC_PROD_INDX_M,
5472 HNS_ROCE_EQC_PROD_INDX_S,
5473 HNS_ROCE_EQ_INIT_PROD_IDX);
5474
5475
5476 roce_set_field(eqc->byte_12,
5477 HNS_ROCE_EQC_MAX_CNT_M,
5478 HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
5479
5480
5481 roce_set_field(eqc->byte_12,
5482 HNS_ROCE_EQC_PERIOD_M,
5483 HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
5484
5485
5486 roce_set_field(eqc->eqe_report_timer,
5487 HNS_ROCE_EQC_REPORT_TIMER_M,
5488 HNS_ROCE_EQC_REPORT_TIMER_S,
5489 HNS_ROCE_EQ_INIT_REPORT_TIMER);
5490
5491
5492 roce_set_field(eqc->eqe_ba0,
5493 HNS_ROCE_EQC_EQE_BA_L_M,
5494 HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
5495
5496
5497 roce_set_field(eqc->eqe_ba1,
5498 HNS_ROCE_EQC_EQE_BA_H_M,
5499 HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
5500
5501
5502 roce_set_field(eqc->byte_28,
5503 HNS_ROCE_EQC_SHIFT_M,
5504 HNS_ROCE_EQC_SHIFT_S, eq->shift);
5505
5506
5507 roce_set_field(eqc->byte_28,
5508 HNS_ROCE_EQC_MSI_INDX_M,
5509 HNS_ROCE_EQC_MSI_INDX_S,
5510 HNS_ROCE_EQ_INIT_MSI_IDX);
5511
5512
5513 roce_set_field(eqc->byte_28,
5514 HNS_ROCE_EQC_CUR_EQE_BA_L_M,
5515 HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
5516
5517
5518 roce_set_field(eqc->byte_32,
5519 HNS_ROCE_EQC_CUR_EQE_BA_M_M,
5520 HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
5521
5522
5523 roce_set_field(eqc->byte_36,
5524 HNS_ROCE_EQC_CUR_EQE_BA_H_M,
5525 HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
5526
5527
5528 roce_set_field(eqc->byte_36,
5529 HNS_ROCE_EQC_CONS_INDX_M,
5530 HNS_ROCE_EQC_CONS_INDX_S,
5531 HNS_ROCE_EQ_INIT_CONS_IDX);
5532
5533
5534 roce_set_field(eqc->nxt_eqe_ba0,
5535 HNS_ROCE_EQC_NXT_EQE_BA_L_M,
5536 HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
5537
5538
5539 roce_set_field(eqc->nxt_eqe_ba1,
5540 HNS_ROCE_EQC_NXT_EQE_BA_H_M,
5541 HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
5542 }
5543
5544 static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
5545 struct hns_roce_eq *eq)
5546 {
5547 struct device *dev = hr_dev->dev;
5548 int eq_alloc_done = 0;
5549 int eq_buf_cnt = 0;
5550 int eqe_alloc;
5551 u32 buf_chk_sz;
5552 u32 bt_chk_sz;
5553 u32 mhop_num;
5554 u64 size;
5555 u64 idx;
5556 int ba_num;
5557 int bt_num;
5558 int record_i;
5559 int record_j;
5560 int i = 0;
5561 int j = 0;
5562
5563 mhop_num = hr_dev->caps.eqe_hop_num;
5564 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5565 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
5566
5567 ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
5568 buf_chk_sz);
5569 bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN);
5570
5571 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5572 if (eq->entries > buf_chk_sz / eq->eqe_size) {
5573 dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
5574 eq->entries);
5575 return -EINVAL;
5576 }
5577 eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
5578 &(eq->l0_dma), GFP_KERNEL);
5579 if (!eq->bt_l0)
5580 return -ENOMEM;
5581
5582 eq->cur_eqe_ba = eq->l0_dma;
5583 eq->nxt_eqe_ba = 0;
5584
5585 return 0;
5586 }
5587
5588 eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
5589 if (!eq->buf_dma)
5590 return -ENOMEM;
5591 eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
5592 if (!eq->buf)
5593 goto err_kcalloc_buf;
5594
5595 if (mhop_num == 2) {
5596 eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
5597 if (!eq->l1_dma)
5598 goto err_kcalloc_l1_dma;
5599
5600 eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
5601 if (!eq->bt_l1)
5602 goto err_kcalloc_bt_l1;
5603 }
5604
5605
5606 eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
5607 if (!eq->bt_l0)
5608 goto err_dma_alloc_l0;
5609
5610 if (mhop_num == 1) {
5611 if (ba_num > (bt_chk_sz / BA_BYTE_LEN))
5612 dev_err(dev, "ba_num %d is too large for 1 hop\n",
5613 ba_num);
5614
5615
5616 for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
5617 if (eq_buf_cnt + 1 < ba_num) {
5618 size = buf_chk_sz;
5619 } else {
5620 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5621 size = (eq->entries - eqe_alloc) * eq->eqe_size;
5622 }
5623 eq->buf[i] = dma_alloc_coherent(dev, size,
5624 &(eq->buf_dma[i]),
5625 GFP_KERNEL);
5626 if (!eq->buf[i])
5627 goto err_dma_alloc_buf;
5628
5629 *(eq->bt_l0 + i) = eq->buf_dma[i];
5630
5631 eq_buf_cnt++;
5632 if (eq_buf_cnt >= ba_num)
5633 break;
5634 }
5635 eq->cur_eqe_ba = eq->buf_dma[0];
5636 if (ba_num > 1)
5637 eq->nxt_eqe_ba = eq->buf_dma[1];
5638
5639 } else if (mhop_num == 2) {
5640
5641 for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
5642 eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
5643 &(eq->l1_dma[i]),
5644 GFP_KERNEL);
5645 if (!eq->bt_l1[i])
5646 goto err_dma_alloc_l1;
5647 *(eq->bt_l0 + i) = eq->l1_dma[i];
5648
5649 for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5650 idx = i * bt_chk_sz / BA_BYTE_LEN + j;
5651 if (eq_buf_cnt + 1 < ba_num) {
5652 size = buf_chk_sz;
5653 } else {
5654 eqe_alloc = (buf_chk_sz / eq->eqe_size)
5655 * idx;
5656 size = (eq->entries - eqe_alloc)
5657 * eq->eqe_size;
5658 }
5659 eq->buf[idx] = dma_alloc_coherent(dev, size,
5660 &(eq->buf_dma[idx]),
5661 GFP_KERNEL);
5662 if (!eq->buf[idx])
5663 goto err_dma_alloc_buf;
5664
5665 *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
5666
5667 eq_buf_cnt++;
5668 if (eq_buf_cnt >= ba_num) {
5669 eq_alloc_done = 1;
5670 break;
5671 }
5672 }
5673
5674 if (eq_alloc_done)
5675 break;
5676 }
5677 eq->cur_eqe_ba = eq->buf_dma[0];
5678 if (ba_num > 1)
5679 eq->nxt_eqe_ba = eq->buf_dma[1];
5680 }
5681
5682 eq->l0_last_num = i + 1;
5683 if (mhop_num == 2)
5684 eq->l1_last_num = j + 1;
5685
5686 return 0;
5687
5688 err_dma_alloc_l1:
5689 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5690 eq->bt_l0 = NULL;
5691 eq->l0_dma = 0;
5692 for (i -= 1; i >= 0; i--) {
5693 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5694 eq->l1_dma[i]);
5695
5696 for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5697 idx = i * bt_chk_sz / BA_BYTE_LEN + j;
5698 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5699 eq->buf_dma[idx]);
5700 }
5701 }
5702 goto err_dma_alloc_l0;
5703
5704 err_dma_alloc_buf:
5705 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5706 eq->bt_l0 = NULL;
5707 eq->l0_dma = 0;
5708
5709 if (mhop_num == 1)
5710 for (i -= 1; i >= 0; i--)
5711 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5712 eq->buf_dma[i]);
5713 else if (mhop_num == 2) {
5714 record_i = i;
5715 record_j = j;
5716 for (; i >= 0; i--) {
5717 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5718 eq->l1_dma[i]);
5719
5720 for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5721 if (i == record_i && j >= record_j)
5722 break;
5723
5724 idx = i * bt_chk_sz / BA_BYTE_LEN + j;
5725 dma_free_coherent(dev, buf_chk_sz,
5726 eq->buf[idx],
5727 eq->buf_dma[idx]);
5728 }
5729 }
5730 }
5731
5732 err_dma_alloc_l0:
5733 kfree(eq->bt_l1);
5734 eq->bt_l1 = NULL;
5735
5736 err_kcalloc_bt_l1:
5737 kfree(eq->l1_dma);
5738 eq->l1_dma = NULL;
5739
5740 err_kcalloc_l1_dma:
5741 kfree(eq->buf);
5742 eq->buf = NULL;
5743
5744 err_kcalloc_buf:
5745 kfree(eq->buf_dma);
5746 eq->buf_dma = NULL;
5747
5748 return -ENOMEM;
5749 }
5750
5751 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5752 struct hns_roce_eq *eq,
5753 unsigned int eq_cmd)
5754 {
5755 struct device *dev = hr_dev->dev;
5756 struct hns_roce_cmd_mailbox *mailbox;
5757 u32 buf_chk_sz = 0;
5758 int ret;
5759
5760
5761 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5762 if (IS_ERR(mailbox))
5763 return PTR_ERR(mailbox);
5764
5765 if (!hr_dev->caps.eqe_hop_num) {
5766 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5767
5768 eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
5769 GFP_KERNEL);
5770 if (!eq->buf_list) {
5771 ret = -ENOMEM;
5772 goto free_cmd_mbox;
5773 }
5774
5775 eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
5776 &(eq->buf_list->map),
5777 GFP_KERNEL);
5778 if (!eq->buf_list->buf) {
5779 ret = -ENOMEM;
5780 goto err_alloc_buf;
5781 }
5782
5783 } else {
5784 ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
5785 if (ret) {
5786 ret = -ENOMEM;
5787 goto free_cmd_mbox;
5788 }
5789 }
5790
5791 hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
5792
5793 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
5794 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
5795 if (ret) {
5796 dev_err(dev, "[mailbox cmd] create eqc failed.\n");
5797 goto err_cmd_mbox;
5798 }
5799
5800 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5801
5802 return 0;
5803
5804 err_cmd_mbox:
5805 if (!hr_dev->caps.eqe_hop_num)
5806 dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
5807 eq->buf_list->map);
5808 else {
5809 hns_roce_mhop_free_eq(hr_dev, eq);
5810 goto free_cmd_mbox;
5811 }
5812
5813 err_alloc_buf:
5814 kfree(eq->buf_list);
5815
5816 free_cmd_mbox:
5817 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5818
5819 return ret;
5820 }
5821
5822 static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
5823 int comp_num, int aeq_num, int other_num)
5824 {
5825 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5826 int i, j;
5827 int ret;
5828
5829 for (i = 0; i < irq_num; i++) {
5830 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
5831 GFP_KERNEL);
5832 if (!hr_dev->irq_names[i]) {
5833 ret = -ENOMEM;
5834 goto err_kzalloc_failed;
5835 }
5836 }
5837
5838
5839 for (j = 0; j < other_num; j++)
5840 snprintf((char *)hr_dev->irq_names[j],
5841 HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", j);
5842
5843 for (j = other_num; j < (other_num + aeq_num); j++)
5844 snprintf((char *)hr_dev->irq_names[j],
5845 HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
5846 j - other_num);
5847
5848 for (j = (other_num + aeq_num); j < irq_num; j++)
5849 snprintf((char *)hr_dev->irq_names[j],
5850 HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
5851 j - other_num - aeq_num);
5852
5853 for (j = 0; j < irq_num; j++) {
5854 if (j < other_num)
5855 ret = request_irq(hr_dev->irq[j],
5856 hns_roce_v2_msix_interrupt_abn,
5857 0, hr_dev->irq_names[j], hr_dev);
5858
5859 else if (j < (other_num + comp_num))
5860 ret = request_irq(eq_table->eq[j - other_num].irq,
5861 hns_roce_v2_msix_interrupt_eq,
5862 0, hr_dev->irq_names[j + aeq_num],
5863 &eq_table->eq[j - other_num]);
5864 else
5865 ret = request_irq(eq_table->eq[j - other_num].irq,
5866 hns_roce_v2_msix_interrupt_eq,
5867 0, hr_dev->irq_names[j - comp_num],
5868 &eq_table->eq[j - other_num]);
5869 if (ret) {
5870 dev_err(hr_dev->dev, "Request irq error!\n");
5871 goto err_request_failed;
5872 }
5873 }
5874
5875 return 0;
5876
5877 err_request_failed:
5878 for (j -= 1; j >= 0; j--)
5879 if (j < other_num)
5880 free_irq(hr_dev->irq[j], hr_dev);
5881 else
5882 free_irq(eq_table->eq[j - other_num].irq,
5883 &eq_table->eq[j - other_num]);
5884
5885 err_kzalloc_failed:
5886 for (i -= 1; i >= 0; i--)
5887 kfree(hr_dev->irq_names[i]);
5888
5889 return ret;
5890 }
5891
5892 static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
5893 {
5894 int irq_num;
5895 int eq_num;
5896 int i;
5897
5898 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
5899 irq_num = eq_num + hr_dev->caps.num_other_vectors;
5900
5901 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
5902 free_irq(hr_dev->irq[i], hr_dev);
5903
5904 for (i = 0; i < eq_num; i++)
5905 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
5906
5907 for (i = 0; i < irq_num; i++)
5908 kfree(hr_dev->irq_names[i]);
5909 }
5910
5911 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
5912 {
5913 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5914 struct device *dev = hr_dev->dev;
5915 struct hns_roce_eq *eq;
5916 unsigned int eq_cmd;
5917 int irq_num;
5918 int eq_num;
5919 int other_num;
5920 int comp_num;
5921 int aeq_num;
5922 int i;
5923 int ret;
5924
5925 other_num = hr_dev->caps.num_other_vectors;
5926 comp_num = hr_dev->caps.num_comp_vectors;
5927 aeq_num = hr_dev->caps.num_aeq_vectors;
5928
5929 eq_num = comp_num + aeq_num;
5930 irq_num = eq_num + other_num;
5931
5932 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
5933 if (!eq_table->eq)
5934 return -ENOMEM;
5935
5936
5937 for (i = 0; i < eq_num; i++) {
5938 eq = &eq_table->eq[i];
5939 eq->hr_dev = hr_dev;
5940 eq->eqn = i;
5941 if (i < comp_num) {
5942
5943 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
5944 eq->type_flag = HNS_ROCE_CEQ;
5945 eq->entries = hr_dev->caps.ceqe_depth;
5946 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
5947 eq->irq = hr_dev->irq[i + other_num + aeq_num];
5948 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
5949 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
5950 } else {
5951
5952 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
5953 eq->type_flag = HNS_ROCE_AEQ;
5954 eq->entries = hr_dev->caps.aeqe_depth;
5955 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
5956 eq->irq = hr_dev->irq[i - comp_num + other_num];
5957 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
5958 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
5959 }
5960
5961 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
5962 if (ret) {
5963 dev_err(dev, "eq create failed.\n");
5964 goto err_create_eq_fail;
5965 }
5966 }
5967
5968
5969 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
5970
5971 ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
5972 aeq_num, other_num);
5973 if (ret) {
5974 dev_err(dev, "Request irq failed.\n");
5975 goto err_request_irq_fail;
5976 }
5977
5978 hr_dev->irq_workq =
5979 create_singlethread_workqueue("hns_roce_irq_workqueue");
5980 if (!hr_dev->irq_workq) {
5981 dev_err(dev, "Create irq workqueue failed!\n");
5982 ret = -ENOMEM;
5983 goto err_create_wq_fail;
5984 }
5985
5986 return 0;
5987
5988 err_create_wq_fail:
5989 __hns_roce_free_irq(hr_dev);
5990
5991 err_request_irq_fail:
5992 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
5993
5994 err_create_eq_fail:
5995 for (i -= 1; i >= 0; i--)
5996 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
5997 kfree(eq_table->eq);
5998
5999 return ret;
6000 }
6001
6002 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
6003 {
6004 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6005 int eq_num;
6006 int i;
6007
6008 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6009
6010
6011 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6012
6013 __hns_roce_free_irq(hr_dev);
6014
6015 for (i = 0; i < eq_num; i++) {
6016 hns_roce_v2_destroy_eqc(hr_dev, i);
6017
6018 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
6019 }
6020
6021 kfree(eq_table->eq);
6022
6023 flush_workqueue(hr_dev->irq_workq);
6024 destroy_workqueue(hr_dev->irq_workq);
6025 }
6026
6027 static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
6028 struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
6029 u32 cqn, void *mb_buf, u64 *mtts_wqe,
6030 u64 *mtts_idx, dma_addr_t dma_handle_wqe,
6031 dma_addr_t dma_handle_idx)
6032 {
6033 struct hns_roce_srq_context *srq_context;
6034
6035 srq_context = mb_buf;
6036 memset(srq_context, 0, sizeof(*srq_context));
6037
6038 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
6039 SRQC_BYTE_4_SRQ_ST_S, 1);
6040
6041 roce_set_field(srq_context->byte_4_srqn_srqst,
6042 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
6043 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
6044 (hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
6045 hr_dev->caps.srqwqe_hop_num));
6046 roce_set_field(srq_context->byte_4_srqn_srqst,
6047 SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
6048 ilog2(srq->max));
6049
6050 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
6051 SRQC_BYTE_4_SRQN_S, srq->srqn);
6052
6053 roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6054 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
6055
6056 roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
6057 SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
6058
6059 srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
6060
6061 roce_set_field(srq_context->byte_24_wqe_bt_ba,
6062 SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
6063 SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
6064 dma_handle_wqe >> 35);
6065
6066 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
6067 SRQC_BYTE_28_PD_S, pdn);
6068 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
6069 SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
6070 fls(srq->max_gs - 1));
6071
6072 srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
6073 roce_set_field(srq_context->rsv_idx_bt_ba,
6074 SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
6075 SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
6076 dma_handle_idx >> 35);
6077
6078 srq_context->idx_cur_blk_addr =
6079 cpu_to_le32(mtts_idx[0] >> PAGE_ADDR_SHIFT);
6080 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6081 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
6082 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
6083 mtts_idx[0] >> (32 + PAGE_ADDR_SHIFT));
6084 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6085 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
6086 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
6087 hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
6088 hr_dev->caps.idx_hop_num);
6089
6090 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6091 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
6092 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
6093 hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET);
6094 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6095 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
6096 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
6097 hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET);
6098
6099 srq_context->idx_nxt_blk_addr =
6100 cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT);
6101 roce_set_field(srq_context->rsv_idxnxtblkaddr,
6102 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
6103 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
6104 mtts_idx[1] >> (32 + PAGE_ADDR_SHIFT));
6105 roce_set_field(srq_context->byte_56_xrc_cqn,
6106 SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
6107 cqn);
6108 roce_set_field(srq_context->byte_56_xrc_cqn,
6109 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
6110 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
6111 hr_dev->caps.srqwqe_ba_pg_sz + PG_SHIFT_OFFSET);
6112 roce_set_field(srq_context->byte_56_xrc_cqn,
6113 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
6114 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
6115 hr_dev->caps.srqwqe_buf_pg_sz + PG_SHIFT_OFFSET);
6116
6117 roce_set_bit(srq_context->db_record_addr_record_en,
6118 SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
6119 }
6120
6121 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
6122 struct ib_srq_attr *srq_attr,
6123 enum ib_srq_attr_mask srq_attr_mask,
6124 struct ib_udata *udata)
6125 {
6126 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6127 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6128 struct hns_roce_srq_context *srq_context;
6129 struct hns_roce_srq_context *srqc_mask;
6130 struct hns_roce_cmd_mailbox *mailbox;
6131 int ret;
6132
6133 if (srq_attr_mask & IB_SRQ_LIMIT) {
6134 if (srq_attr->srq_limit >= srq->max)
6135 return -EINVAL;
6136
6137 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6138 if (IS_ERR(mailbox))
6139 return PTR_ERR(mailbox);
6140
6141 srq_context = mailbox->buf;
6142 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
6143
6144 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
6145
6146 roce_set_field(srq_context->byte_8_limit_wl,
6147 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6148 SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
6149 roce_set_field(srqc_mask->byte_8_limit_wl,
6150 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6151 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
6152
6153 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
6154 HNS_ROCE_CMD_MODIFY_SRQC,
6155 HNS_ROCE_CMD_TIMEOUT_MSECS);
6156 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6157 if (ret) {
6158 dev_err(hr_dev->dev,
6159 "MODIFY SRQ Failed to cmd mailbox.\n");
6160 return ret;
6161 }
6162 }
6163
6164 return 0;
6165 }
6166
6167 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
6168 {
6169 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6170 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6171 struct hns_roce_srq_context *srq_context;
6172 struct hns_roce_cmd_mailbox *mailbox;
6173 int limit_wl;
6174 int ret;
6175
6176 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6177 if (IS_ERR(mailbox))
6178 return PTR_ERR(mailbox);
6179
6180 srq_context = mailbox->buf;
6181 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
6182 HNS_ROCE_CMD_QUERY_SRQC,
6183 HNS_ROCE_CMD_TIMEOUT_MSECS);
6184 if (ret) {
6185 dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n");
6186 goto out;
6187 }
6188
6189 limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
6190 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6191 SRQC_BYTE_8_SRQ_LIMIT_WL_S);
6192
6193 attr->srq_limit = limit_wl;
6194 attr->max_wr = srq->max - 1;
6195 attr->max_sge = srq->max_gs;
6196
6197 memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
6198
6199 out:
6200 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6201 return ret;
6202 }
6203
6204 static int find_empty_entry(struct hns_roce_idx_que *idx_que,
6205 unsigned long size)
6206 {
6207 int wqe_idx;
6208
6209 if (unlikely(bitmap_full(idx_que->bitmap, size)))
6210 return -ENOSPC;
6211
6212 wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
6213
6214 bitmap_set(idx_que->bitmap, wqe_idx, 1);
6215
6216 return wqe_idx;
6217 }
6218
6219 static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
6220 int cur_idx, int wqe_idx)
6221 {
6222 unsigned int *addr;
6223
6224 addr = (unsigned int *)hns_roce_buf_offset(&idx_que->idx_buf,
6225 cur_idx * idx_que->entry_sz);
6226 *addr = wqe_idx;
6227 }
6228
6229 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
6230 const struct ib_recv_wr *wr,
6231 const struct ib_recv_wr **bad_wr)
6232 {
6233 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6234 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6235 struct hns_roce_v2_wqe_data_seg *dseg;
6236 struct hns_roce_v2_db srq_db;
6237 unsigned long flags;
6238 int ret = 0;
6239 int wqe_idx;
6240 void *wqe;
6241 int nreq;
6242 int ind;
6243 int i;
6244
6245 spin_lock_irqsave(&srq->lock, flags);
6246
6247 ind = srq->head & (srq->max - 1);
6248
6249 for (nreq = 0; wr; ++nreq, wr = wr->next) {
6250 if (unlikely(wr->num_sge > srq->max_gs)) {
6251 ret = -EINVAL;
6252 *bad_wr = wr;
6253 break;
6254 }
6255
6256 if (unlikely(srq->head == srq->tail)) {
6257 ret = -ENOMEM;
6258 *bad_wr = wr;
6259 break;
6260 }
6261
6262 wqe_idx = find_empty_entry(&srq->idx_que, srq->max);
6263 if (wqe_idx < 0) {
6264 ret = -ENOMEM;
6265 *bad_wr = wr;
6266 break;
6267 }
6268
6269 fill_idx_queue(&srq->idx_que, ind, wqe_idx);
6270 wqe = get_srq_wqe(srq, wqe_idx);
6271 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
6272
6273 for (i = 0; i < wr->num_sge; ++i) {
6274 dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
6275 dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
6276 dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
6277 }
6278
6279 if (i < srq->max_gs) {
6280 dseg[i].len = 0;
6281 dseg[i].lkey = cpu_to_le32(0x100);
6282 dseg[i].addr = 0;
6283 }
6284
6285 srq->wrid[wqe_idx] = wr->wr_id;
6286 ind = (ind + 1) & (srq->max - 1);
6287 }
6288
6289 if (likely(nreq)) {
6290 srq->head += nreq;
6291
6292
6293
6294
6295
6296 wmb();
6297
6298 srq_db.byte_4 =
6299 cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
6300 (srq->srqn & V2_DB_BYTE_4_TAG_M));
6301 srq_db.parameter = cpu_to_le32(srq->head);
6302
6303 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
6304
6305 }
6306
6307 spin_unlock_irqrestore(&srq->lock, flags);
6308
6309 return ret;
6310 }
6311
6312 static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6313 .query_cqc_info = hns_roce_v2_query_cqc_info,
6314 };
6315
6316 static const struct ib_device_ops hns_roce_v2_dev_ops = {
6317 .destroy_qp = hns_roce_v2_destroy_qp,
6318 .modify_cq = hns_roce_v2_modify_cq,
6319 .poll_cq = hns_roce_v2_poll_cq,
6320 .post_recv = hns_roce_v2_post_recv,
6321 .post_send = hns_roce_v2_post_send,
6322 .query_qp = hns_roce_v2_query_qp,
6323 .req_notify_cq = hns_roce_v2_req_notify_cq,
6324 };
6325
6326 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6327 .modify_srq = hns_roce_v2_modify_srq,
6328 .post_srq_recv = hns_roce_v2_post_srq_recv,
6329 .query_srq = hns_roce_v2_query_srq,
6330 };
6331
6332 static const struct hns_roce_hw hns_roce_hw_v2 = {
6333 .cmq_init = hns_roce_v2_cmq_init,
6334 .cmq_exit = hns_roce_v2_cmq_exit,
6335 .hw_profile = hns_roce_v2_profile,
6336 .hw_init = hns_roce_v2_init,
6337 .hw_exit = hns_roce_v2_exit,
6338 .post_mbox = hns_roce_v2_post_mbox,
6339 .chk_mbox = hns_roce_v2_chk_mbox,
6340 .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
6341 .set_gid = hns_roce_v2_set_gid,
6342 .set_mac = hns_roce_v2_set_mac,
6343 .write_mtpt = hns_roce_v2_write_mtpt,
6344 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6345 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6346 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6347 .write_cqc = hns_roce_v2_write_cqc,
6348 .set_hem = hns_roce_v2_set_hem,
6349 .clear_hem = hns_roce_v2_clear_hem,
6350 .modify_qp = hns_roce_v2_modify_qp,
6351 .query_qp = hns_roce_v2_query_qp,
6352 .destroy_qp = hns_roce_v2_destroy_qp,
6353 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6354 .modify_cq = hns_roce_v2_modify_cq,
6355 .post_send = hns_roce_v2_post_send,
6356 .post_recv = hns_roce_v2_post_recv,
6357 .req_notify_cq = hns_roce_v2_req_notify_cq,
6358 .poll_cq = hns_roce_v2_poll_cq,
6359 .init_eq = hns_roce_v2_init_eq_table,
6360 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6361 .write_srqc = hns_roce_v2_write_srqc,
6362 .modify_srq = hns_roce_v2_modify_srq,
6363 .query_srq = hns_roce_v2_query_srq,
6364 .post_srq_recv = hns_roce_v2_post_srq_recv,
6365 .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6366 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6367 };
6368
6369 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6370 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6371 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6372 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6373 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6374 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6375
6376 {0, }
6377 };
6378
6379 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6380
6381 static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6382 struct hnae3_handle *handle)
6383 {
6384 struct hns_roce_v2_priv *priv = hr_dev->priv;
6385 int i;
6386
6387 hr_dev->hw = &hns_roce_hw_v2;
6388 hr_dev->dfx = &hns_roce_dfx_hw_v2;
6389 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6390 hr_dev->odb_offset = hr_dev->sdb_offset;
6391
6392
6393 hr_dev->reg_base = handle->rinfo.roce_io_base;
6394 hr_dev->caps.num_ports = 1;
6395 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6396 hr_dev->iboe.phy_port[0] = 0;
6397
6398 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6399 hr_dev->iboe.netdevs[0]->dev_addr);
6400
6401 for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
6402 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6403 i + handle->rinfo.base_vector);
6404
6405
6406 hr_dev->cmd_mod = 1;
6407 hr_dev->loop_idc = 0;
6408
6409 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6410 priv->handle = handle;
6411
6412 return 0;
6413 }
6414
6415 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6416 {
6417 struct hns_roce_dev *hr_dev;
6418 int ret;
6419
6420 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6421 if (!hr_dev)
6422 return -ENOMEM;
6423
6424 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6425 if (!hr_dev->priv) {
6426 ret = -ENOMEM;
6427 goto error_failed_kzalloc;
6428 }
6429
6430 hr_dev->pci_dev = handle->pdev;
6431 hr_dev->dev = &handle->pdev->dev;
6432
6433 ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
6434 if (ret) {
6435 dev_err(hr_dev->dev, "Get Configuration failed!\n");
6436 goto error_failed_get_cfg;
6437 }
6438
6439 ret = hns_roce_init(hr_dev);
6440 if (ret) {
6441 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6442 goto error_failed_get_cfg;
6443 }
6444
6445 handle->priv = hr_dev;
6446
6447 return 0;
6448
6449 error_failed_get_cfg:
6450 kfree(hr_dev->priv);
6451
6452 error_failed_kzalloc:
6453 ib_dealloc_device(&hr_dev->ib_dev);
6454
6455 return ret;
6456 }
6457
6458 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6459 bool reset)
6460 {
6461 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
6462
6463 if (!hr_dev)
6464 return;
6465
6466 handle->priv = NULL;
6467 hns_roce_exit(hr_dev);
6468 kfree(hr_dev->priv);
6469 ib_dealloc_device(&hr_dev->ib_dev);
6470 }
6471
6472 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6473 {
6474 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6475 const struct pci_device_id *id;
6476 struct device *dev = &handle->pdev->dev;
6477 int ret;
6478
6479 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6480
6481 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6482 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6483 goto reset_chk_err;
6484 }
6485
6486 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6487 if (!id)
6488 return 0;
6489
6490 ret = __hns_roce_hw_v2_init_instance(handle);
6491 if (ret) {
6492 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6493 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6494 if (ops->ae_dev_resetting(handle) ||
6495 ops->get_hw_reset_stat(handle))
6496 goto reset_chk_err;
6497 else
6498 return ret;
6499 }
6500
6501 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6502
6503
6504 return 0;
6505
6506 reset_chk_err:
6507 dev_err(dev, "Device is busy in resetting state.\n"
6508 "please retry later.\n");
6509
6510 return -EBUSY;
6511 }
6512
6513 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6514 bool reset)
6515 {
6516 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6517 return;
6518
6519 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6520
6521 __hns_roce_hw_v2_uninit_instance(handle, reset);
6522
6523 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6524 }
6525 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6526 {
6527 struct hns_roce_dev *hr_dev;
6528 struct ib_event event;
6529
6530 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6531 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6532 return 0;
6533 }
6534
6535 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6536 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6537
6538 hr_dev = (struct hns_roce_dev *)handle->priv;
6539 if (!hr_dev)
6540 return 0;
6541
6542 hr_dev->is_reset = true;
6543 hr_dev->active = false;
6544 hr_dev->dis_db = true;
6545
6546 event.event = IB_EVENT_DEVICE_FATAL;
6547 event.device = &hr_dev->ib_dev;
6548 event.element.port_num = 1;
6549 ib_dispatch_event(&event);
6550
6551 return 0;
6552 }
6553
6554 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6555 {
6556 struct device *dev = &handle->pdev->dev;
6557 int ret;
6558
6559 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6560 &handle->rinfo.state)) {
6561 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6562 return 0;
6563 }
6564
6565 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6566
6567 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6568 ret = __hns_roce_hw_v2_init_instance(handle);
6569 if (ret) {
6570
6571
6572
6573
6574 handle->priv = NULL;
6575 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6576 } else {
6577 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6578 dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6579 }
6580
6581 return ret;
6582 }
6583
6584 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6585 {
6586 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6587 return 0;
6588
6589 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6590 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6591 msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
6592 __hns_roce_hw_v2_uninit_instance(handle, false);
6593
6594 return 0;
6595 }
6596
6597 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6598 enum hnae3_reset_notify_type type)
6599 {
6600 int ret = 0;
6601
6602 switch (type) {
6603 case HNAE3_DOWN_CLIENT:
6604 ret = hns_roce_hw_v2_reset_notify_down(handle);
6605 break;
6606 case HNAE3_INIT_CLIENT:
6607 ret = hns_roce_hw_v2_reset_notify_init(handle);
6608 break;
6609 case HNAE3_UNINIT_CLIENT:
6610 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6611 break;
6612 default:
6613 break;
6614 }
6615
6616 return ret;
6617 }
6618
6619 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6620 .init_instance = hns_roce_hw_v2_init_instance,
6621 .uninit_instance = hns_roce_hw_v2_uninit_instance,
6622 .reset_notify = hns_roce_hw_v2_reset_notify,
6623 };
6624
6625 static struct hnae3_client hns_roce_hw_v2_client = {
6626 .name = "hns_roce_hw_v2",
6627 .type = HNAE3_CLIENT_ROCE,
6628 .ops = &hns_roce_hw_v2_ops,
6629 };
6630
6631 static int __init hns_roce_hw_v2_init(void)
6632 {
6633 return hnae3_register_client(&hns_roce_hw_v2_client);
6634 }
6635
6636 static void __exit hns_roce_hw_v2_exit(void)
6637 {
6638 hnae3_unregister_client(&hns_roce_hw_v2_client);
6639 }
6640
6641 module_init(hns_roce_hw_v2_init);
6642 module_exit(hns_roce_hw_v2_exit);
6643
6644 MODULE_LICENSE("Dual BSD/GPL");
6645 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6646 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6647 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6648 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");