This source file includes following definitions.
- __from_ib_access_flags
- __to_ib_access_flags
- bnxt_re_build_sgl
- bnxt_re_query_device
- bnxt_re_modify_device
- bnxt_re_query_port
- bnxt_re_get_port_immutable
- bnxt_re_query_fw_str
- bnxt_re_query_pkey
- bnxt_re_query_gid
- bnxt_re_del_gid
- bnxt_re_add_gid
- bnxt_re_get_link_layer
- bnxt_re_create_fence_wqe
- bnxt_re_bind_fence_mw
- bnxt_re_destroy_fence_mr
- bnxt_re_create_fence_mr
- bnxt_re_dealloc_pd
- bnxt_re_alloc_pd
- bnxt_re_destroy_ah
- bnxt_re_stack_to_dev_nw_type
- bnxt_re_create_ah
- bnxt_re_modify_ah
- bnxt_re_query_ah
- bnxt_re_lock_cqs
- bnxt_re_unlock_cqs
- bnxt_re_destroy_qp
- __from_ib_qp_type
- bnxt_re_init_user_qp
- bnxt_re_create_qp
- __from_ib_qp_state
- __to_ib_qp_state
- __from_ib_mtu
- __to_ib_mtu
- bnxt_re_destroy_srq
- bnxt_re_init_user_srq
- bnxt_re_create_srq
- bnxt_re_modify_srq
- bnxt_re_query_srq
- bnxt_re_post_srq_recv
- bnxt_re_modify_shadow_qp
- bnxt_re_modify_qp
- bnxt_re_query_qp
- bnxt_re_build_qp1_send_v2
- bnxt_re_build_qp1_shadow_qp_recv
- is_ud_qp
- bnxt_re_build_send_wqe
- bnxt_re_build_rdma_wqe
- bnxt_re_build_atomic_wqe
- bnxt_re_build_inv_wqe
- bnxt_re_build_reg_wqe
- bnxt_re_copy_inline_data
- bnxt_re_copy_wr_payload
- bnxt_ud_qp_hw_stall_workaround
- bnxt_re_post_send_shadow_qp
- bnxt_re_post_send
- bnxt_re_post_recv_shadow_qp
- bnxt_re_post_recv
- bnxt_re_destroy_cq
- bnxt_re_create_cq
- __req_to_ib_wc_status
- __rawqp1_to_ib_wc_status
- __rc_to_ib_wc_status
- bnxt_re_process_req_wc
- bnxt_re_check_packet_type
- bnxt_re_to_ib_nw_type
- bnxt_re_is_loopback_packet
- bnxt_re_process_raw_qp_pkt_rx
- bnxt_re_process_res_rawqp1_wc
- bnxt_re_is_vlan_pkt
- bnxt_re_process_res_rc_wc
- bnxt_re_process_res_shadow_qp_wc
- bnxt_re_process_res_ud_wc
- send_phantom_wqe
- bnxt_re_poll_cq
- bnxt_re_req_notify_cq
- bnxt_re_get_dma_mr
- bnxt_re_dereg_mr
- bnxt_re_set_page
- bnxt_re_map_mr_sg
- bnxt_re_alloc_mr
- bnxt_re_alloc_mw
- bnxt_re_dealloc_mw
- bnxt_re_page_size_ok
- fill_umem_pbl_tbl
- bnxt_re_reg_user_mr
- bnxt_re_alloc_ucontext
- bnxt_re_dealloc_ucontext
- bnxt_re_mmap
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/uverbs_ioctl.h>
52
53 #include "bnxt_ulp.h"
54
55 #include "roce_hsi.h"
56 #include "qplib_res.h"
57 #include "qplib_sp.h"
58 #include "qplib_fp.h"
59 #include "qplib_rcfw.h"
60
61 #include "bnxt_re.h"
62 #include "ib_verbs.h"
63 #include <rdma/bnxt_re-abi.h>
64
65 static int __from_ib_access_flags(int iflags)
66 {
67 int qflags = 0;
68
69 if (iflags & IB_ACCESS_LOCAL_WRITE)
70 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
71 if (iflags & IB_ACCESS_REMOTE_READ)
72 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
73 if (iflags & IB_ACCESS_REMOTE_WRITE)
74 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
75 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
76 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
77 if (iflags & IB_ACCESS_MW_BIND)
78 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
79 if (iflags & IB_ZERO_BASED)
80 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
81 if (iflags & IB_ACCESS_ON_DEMAND)
82 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
83 return qflags;
84 };
85
86 static enum ib_access_flags __to_ib_access_flags(int qflags)
87 {
88 enum ib_access_flags iflags = 0;
89
90 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
91 iflags |= IB_ACCESS_LOCAL_WRITE;
92 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
93 iflags |= IB_ACCESS_REMOTE_WRITE;
94 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
95 iflags |= IB_ACCESS_REMOTE_READ;
96 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
97 iflags |= IB_ACCESS_REMOTE_ATOMIC;
98 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
99 iflags |= IB_ACCESS_MW_BIND;
100 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
101 iflags |= IB_ZERO_BASED;
102 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
103 iflags |= IB_ACCESS_ON_DEMAND;
104 return iflags;
105 };
106
107 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
108 struct bnxt_qplib_sge *sg_list, int num)
109 {
110 int i, total = 0;
111
112 for (i = 0; i < num; i++) {
113 sg_list[i].addr = ib_sg_list[i].addr;
114 sg_list[i].lkey = ib_sg_list[i].lkey;
115 sg_list[i].size = ib_sg_list[i].length;
116 total += sg_list[i].size;
117 }
118 return total;
119 }
120
121
122 int bnxt_re_query_device(struct ib_device *ibdev,
123 struct ib_device_attr *ib_attr,
124 struct ib_udata *udata)
125 {
126 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
127 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
128
129 memset(ib_attr, 0, sizeof(*ib_attr));
130 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
131 min(sizeof(dev_attr->fw_ver),
132 sizeof(ib_attr->fw_ver)));
133 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
134 (u8 *)&ib_attr->sys_image_guid);
135 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
136 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
137
138 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
139 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
140 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
141 ib_attr->max_qp = dev_attr->max_qp;
142 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
143 ib_attr->device_cap_flags =
144 IB_DEVICE_CURR_QP_STATE_MOD
145 | IB_DEVICE_RC_RNR_NAK_GEN
146 | IB_DEVICE_SHUTDOWN_PORT
147 | IB_DEVICE_SYS_IMAGE_GUID
148 | IB_DEVICE_LOCAL_DMA_LKEY
149 | IB_DEVICE_RESIZE_MAX_WR
150 | IB_DEVICE_PORT_ACTIVE_EVENT
151 | IB_DEVICE_N_NOTIFY_CQ
152 | IB_DEVICE_MEM_WINDOW
153 | IB_DEVICE_MEM_WINDOW_TYPE_2B
154 | IB_DEVICE_MEM_MGT_EXTENSIONS;
155 ib_attr->max_send_sge = dev_attr->max_qp_sges;
156 ib_attr->max_recv_sge = dev_attr->max_qp_sges;
157 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
158 ib_attr->max_cq = dev_attr->max_cq;
159 ib_attr->max_cqe = dev_attr->max_cq_wqes;
160 ib_attr->max_mr = dev_attr->max_mr;
161 ib_attr->max_pd = dev_attr->max_pd;
162 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
163 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
164 ib_attr->atomic_cap = IB_ATOMIC_NONE;
165 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
166
167 ib_attr->max_ee_rd_atom = 0;
168 ib_attr->max_res_rd_atom = 0;
169 ib_attr->max_ee_init_rd_atom = 0;
170 ib_attr->max_ee = 0;
171 ib_attr->max_rdd = 0;
172 ib_attr->max_mw = dev_attr->max_mw;
173 ib_attr->max_raw_ipv6_qp = 0;
174 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
175 ib_attr->max_mcast_grp = 0;
176 ib_attr->max_mcast_qp_attach = 0;
177 ib_attr->max_total_mcast_qp_attach = 0;
178 ib_attr->max_ah = dev_attr->max_ah;
179
180 ib_attr->max_fmr = 0;
181 ib_attr->max_map_per_fmr = 0;
182
183 ib_attr->max_srq = dev_attr->max_srq;
184 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
185 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
186
187 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
188
189 ib_attr->max_pkeys = 1;
190 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
191 return 0;
192 }
193
194 int bnxt_re_modify_device(struct ib_device *ibdev,
195 int device_modify_mask,
196 struct ib_device_modify *device_modify)
197 {
198 switch (device_modify_mask) {
199 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
200
201
202 break;
203 case IB_DEVICE_MODIFY_NODE_DESC:
204
205 break;
206 default:
207 break;
208 }
209 return 0;
210 }
211
212
213 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
214 struct ib_port_attr *port_attr)
215 {
216 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
217 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
218
219 memset(port_attr, 0, sizeof(*port_attr));
220
221 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
222 port_attr->state = IB_PORT_ACTIVE;
223 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
224 } else {
225 port_attr->state = IB_PORT_DOWN;
226 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
227 }
228 port_attr->max_mtu = IB_MTU_4096;
229 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
230 port_attr->gid_tbl_len = dev_attr->max_sgid;
231 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
232 IB_PORT_DEVICE_MGMT_SUP |
233 IB_PORT_VENDOR_CLASS_SUP;
234 port_attr->ip_gids = true;
235
236 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
237 port_attr->bad_pkey_cntr = 0;
238 port_attr->qkey_viol_cntr = 0;
239 port_attr->pkey_tbl_len = dev_attr->max_pkey;
240 port_attr->lid = 0;
241 port_attr->sm_lid = 0;
242 port_attr->lmc = 0;
243 port_attr->max_vl_num = 4;
244 port_attr->sm_sl = 0;
245 port_attr->subnet_timeout = 0;
246 port_attr->init_type_reply = 0;
247 port_attr->active_speed = rdev->active_speed;
248 port_attr->active_width = rdev->active_width;
249
250 return 0;
251 }
252
253 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
254 struct ib_port_immutable *immutable)
255 {
256 struct ib_port_attr port_attr;
257
258 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
259 return -EINVAL;
260
261 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
262 immutable->gid_tbl_len = port_attr.gid_tbl_len;
263 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
264 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
265 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
266 return 0;
267 }
268
269 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
270 {
271 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
272
273 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
274 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
275 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
276 }
277
278 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
279 u16 index, u16 *pkey)
280 {
281 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
282
283
284
285 memset(pkey, 0, sizeof(*pkey));
286 return bnxt_qplib_get_pkey(&rdev->qplib_res,
287 &rdev->qplib_res.pkey_tbl, index, pkey);
288 }
289
290 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
291 int index, union ib_gid *gid)
292 {
293 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
294 int rc = 0;
295
296
297 memset(gid, 0, sizeof(*gid));
298 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
299 &rdev->qplib_res.sgid_tbl, index,
300 (struct bnxt_qplib_gid *)gid);
301 return rc;
302 }
303
304 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
305 {
306 int rc = 0;
307 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
308 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
309 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
310 struct bnxt_qplib_gid *gid_to_del;
311 u16 vlan_id = 0xFFFF;
312
313
314 ctx = *context;
315 if (!ctx)
316 return -EINVAL;
317
318 if (sgid_tbl && sgid_tbl->active) {
319 if (ctx->idx >= sgid_tbl->max)
320 return -EINVAL;
321 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
322 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
323
324
325
326
327
328
329
330
331 if (ctx->idx == 0 &&
332 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
333 ctx->refcnt == 1 && rdev->qp1_sqp) {
334 dev_dbg(rdev_to_dev(rdev),
335 "Trying to delete GID0 while QP1 is alive\n");
336 return -EFAULT;
337 }
338 ctx->refcnt--;
339 if (!ctx->refcnt) {
340 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
341 vlan_id, true);
342 if (rc) {
343 dev_err(rdev_to_dev(rdev),
344 "Failed to remove GID: %#x", rc);
345 } else {
346 ctx_tbl = sgid_tbl->ctx;
347 ctx_tbl[ctx->idx] = NULL;
348 kfree(ctx);
349 }
350 }
351 } else {
352 return -EINVAL;
353 }
354 return rc;
355 }
356
357 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
358 {
359 int rc;
360 u32 tbl_idx = 0;
361 u16 vlan_id = 0xFFFF;
362 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
363 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
364 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
365
366 rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
367 if (rc)
368 return rc;
369
370 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
371 rdev->qplib_res.netdev->dev_addr,
372 vlan_id, true, &tbl_idx);
373 if (rc == -EALREADY) {
374 ctx_tbl = sgid_tbl->ctx;
375 ctx_tbl[tbl_idx]->refcnt++;
376 *context = ctx_tbl[tbl_idx];
377 return 0;
378 }
379
380 if (rc < 0) {
381 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
382 return rc;
383 }
384
385 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
386 if (!ctx)
387 return -ENOMEM;
388 ctx_tbl = sgid_tbl->ctx;
389 ctx->idx = tbl_idx;
390 ctx->refcnt = 1;
391 ctx_tbl[tbl_idx] = ctx;
392 *context = ctx;
393
394 return rc;
395 }
396
397 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
398 u8 port_num)
399 {
400 return IB_LINK_LAYER_ETHERNET;
401 }
402
403 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
404
405 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
406 {
407 struct bnxt_re_fence_data *fence = &pd->fence;
408 struct ib_mr *ib_mr = &fence->mr->ib_mr;
409 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
410
411 memset(wqe, 0, sizeof(*wqe));
412 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
413 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
414 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
415 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
416 wqe->bind.zero_based = false;
417 wqe->bind.parent_l_key = ib_mr->lkey;
418 wqe->bind.va = (u64)(unsigned long)fence->va;
419 wqe->bind.length = fence->size;
420 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
421 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
422
423
424
425
426 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
427 }
428
429 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
430 {
431 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
432 qplib_qp);
433 struct ib_pd *ib_pd = qp->ib_qp.pd;
434 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
435 struct bnxt_re_fence_data *fence = &pd->fence;
436 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
437 struct bnxt_qplib_swqe wqe;
438 int rc;
439
440 memcpy(&wqe, fence_wqe, sizeof(wqe));
441 wqe.bind.r_key = fence->bind_rkey;
442 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
443
444 dev_dbg(rdev_to_dev(qp->rdev),
445 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
446 wqe.bind.r_key, qp->qplib_qp.id, pd);
447 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
448 if (rc) {
449 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
450 return rc;
451 }
452 bnxt_qplib_post_send_db(&qp->qplib_qp);
453
454 return rc;
455 }
456
457 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
458 {
459 struct bnxt_re_fence_data *fence = &pd->fence;
460 struct bnxt_re_dev *rdev = pd->rdev;
461 struct device *dev = &rdev->en_dev->pdev->dev;
462 struct bnxt_re_mr *mr = fence->mr;
463
464 if (fence->mw) {
465 bnxt_re_dealloc_mw(fence->mw);
466 fence->mw = NULL;
467 }
468 if (mr) {
469 if (mr->ib_mr.rkey)
470 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
471 true);
472 if (mr->ib_mr.lkey)
473 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
474 kfree(mr);
475 fence->mr = NULL;
476 }
477 if (fence->dma_addr) {
478 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
479 DMA_BIDIRECTIONAL);
480 fence->dma_addr = 0;
481 }
482 }
483
484 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
485 {
486 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
487 struct bnxt_re_fence_data *fence = &pd->fence;
488 struct bnxt_re_dev *rdev = pd->rdev;
489 struct device *dev = &rdev->en_dev->pdev->dev;
490 struct bnxt_re_mr *mr = NULL;
491 dma_addr_t dma_addr = 0;
492 struct ib_mw *mw;
493 u64 pbl_tbl;
494 int rc;
495
496 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
497 DMA_BIDIRECTIONAL);
498 rc = dma_mapping_error(dev, dma_addr);
499 if (rc) {
500 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
501 rc = -EIO;
502 fence->dma_addr = 0;
503 goto fail;
504 }
505 fence->dma_addr = dma_addr;
506
507
508 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
509 if (!mr) {
510 rc = -ENOMEM;
511 goto fail;
512 }
513 fence->mr = mr;
514 mr->rdev = rdev;
515 mr->qplib_mr.pd = &pd->qplib_pd;
516 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
517 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
518 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
519 if (rc) {
520 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
521 goto fail;
522 }
523
524
525 mr->ib_mr.lkey = mr->qplib_mr.lkey;
526 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
527 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
528 pbl_tbl = dma_addr;
529 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
530 BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
531 if (rc) {
532 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
533 goto fail;
534 }
535 mr->ib_mr.rkey = mr->qplib_mr.rkey;
536
537
538 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
539 if (IS_ERR(mw)) {
540 dev_err(rdev_to_dev(rdev),
541 "Failed to create fence-MW for PD: %p\n", pd);
542 rc = PTR_ERR(mw);
543 goto fail;
544 }
545 fence->mw = mw;
546
547 bnxt_re_create_fence_wqe(pd);
548 return 0;
549
550 fail:
551 bnxt_re_destroy_fence_mr(pd);
552 return rc;
553 }
554
555
556 void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
557 {
558 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
559 struct bnxt_re_dev *rdev = pd->rdev;
560
561 bnxt_re_destroy_fence_mr(pd);
562
563 if (pd->qplib_pd.id)
564 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
565 &pd->qplib_pd);
566 }
567
568 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
569 {
570 struct ib_device *ibdev = ibpd->device;
571 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
572 struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
573 udata, struct bnxt_re_ucontext, ib_uctx);
574 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
575 int rc;
576
577 pd->rdev = rdev;
578 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
579 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
580 rc = -ENOMEM;
581 goto fail;
582 }
583
584 if (udata) {
585 struct bnxt_re_pd_resp resp;
586
587 if (!ucntx->dpi.dbr) {
588
589
590
591
592 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
593 &ucntx->dpi, ucntx)) {
594 rc = -ENOMEM;
595 goto dbfail;
596 }
597 }
598
599 resp.pdid = pd->qplib_pd.id;
600
601 resp.dpi = ucntx->dpi.dpi;
602 resp.dbr = (u64)ucntx->dpi.umdbr;
603
604 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
605 if (rc) {
606 dev_err(rdev_to_dev(rdev),
607 "Failed to copy user response\n");
608 goto dbfail;
609 }
610 }
611
612 if (!udata)
613 if (bnxt_re_create_fence_mr(pd))
614 dev_warn(rdev_to_dev(rdev),
615 "Failed to create Fence-MR\n");
616 return 0;
617 dbfail:
618 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
619 &pd->qplib_pd);
620 fail:
621 return rc;
622 }
623
624
625 void bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
626 {
627 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
628 struct bnxt_re_dev *rdev = ah->rdev;
629
630 bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
631 !(flags & RDMA_DESTROY_AH_SLEEPABLE));
632 }
633
634 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
635 {
636 u8 nw_type;
637
638 switch (ntype) {
639 case RDMA_NETWORK_IPV4:
640 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
641 break;
642 case RDMA_NETWORK_IPV6:
643 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
644 break;
645 default:
646 nw_type = CMDQ_CREATE_AH_TYPE_V1;
647 break;
648 }
649 return nw_type;
650 }
651
652 int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
653 u32 flags, struct ib_udata *udata)
654 {
655 struct ib_pd *ib_pd = ib_ah->pd;
656 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
657 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
658 struct bnxt_re_dev *rdev = pd->rdev;
659 const struct ib_gid_attr *sgid_attr;
660 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
661 u8 nw_type;
662 int rc;
663
664 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
665 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
666 return -EINVAL;
667 }
668
669 ah->rdev = rdev;
670 ah->qplib_ah.pd = &pd->qplib_pd;
671
672
673 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
674 sizeof(union ib_gid));
675
676
677
678
679
680 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
681 ah->qplib_ah.host_sgid_index = grh->sgid_index;
682 ah->qplib_ah.traffic_class = grh->traffic_class;
683 ah->qplib_ah.flow_label = grh->flow_label;
684 ah->qplib_ah.hop_limit = grh->hop_limit;
685 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
686
687 sgid_attr = grh->sgid_attr;
688
689 nw_type = rdma_gid_attr_network_type(sgid_attr);
690 ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
691
692 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
693 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
694 !(flags & RDMA_CREATE_AH_SLEEPABLE));
695 if (rc) {
696 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
697 return rc;
698 }
699
700
701 if (udata) {
702 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
703 udata, struct bnxt_re_ucontext, ib_uctx);
704 unsigned long flag;
705 u32 *wrptr;
706
707 spin_lock_irqsave(&uctx->sh_lock, flag);
708 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
709 *wrptr = ah->qplib_ah.id;
710 wmb();
711 spin_unlock_irqrestore(&uctx->sh_lock, flag);
712 }
713
714 return 0;
715 }
716
717 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
718 {
719 return 0;
720 }
721
722 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
723 {
724 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
725
726 ah_attr->type = ib_ah->type;
727 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
728 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
729 rdma_ah_set_grh(ah_attr, NULL, 0,
730 ah->qplib_ah.host_sgid_index,
731 0, ah->qplib_ah.traffic_class);
732 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
733 rdma_ah_set_port_num(ah_attr, 1);
734 rdma_ah_set_static_rate(ah_attr, 0);
735 return 0;
736 }
737
738 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
739 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
740 {
741 unsigned long flags;
742
743 spin_lock_irqsave(&qp->scq->cq_lock, flags);
744 if (qp->rcq != qp->scq)
745 spin_lock(&qp->rcq->cq_lock);
746 else
747 __acquire(&qp->rcq->cq_lock);
748
749 return flags;
750 }
751
752 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
753 unsigned long flags)
754 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
755 {
756 if (qp->rcq != qp->scq)
757 spin_unlock(&qp->rcq->cq_lock);
758 else
759 __release(&qp->rcq->cq_lock);
760 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
761 }
762
763
764 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
765 {
766 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
767 struct bnxt_re_dev *rdev = qp->rdev;
768 unsigned int flags;
769 int rc;
770
771 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
772 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
773 if (rc) {
774 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
775 return rc;
776 }
777
778 if (rdma_is_kernel_res(&qp->ib_qp.res)) {
779 flags = bnxt_re_lock_cqs(qp);
780 bnxt_qplib_clean_qp(&qp->qplib_qp);
781 bnxt_re_unlock_cqs(qp, flags);
782 }
783
784 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
785
786 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
787 bnxt_qplib_destroy_ah(&rdev->qplib_res, &rdev->sqp_ah->qplib_ah,
788 false);
789
790 bnxt_qplib_clean_qp(&qp->qplib_qp);
791 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
792 &rdev->qp1_sqp->qplib_qp);
793 if (rc) {
794 dev_err(rdev_to_dev(rdev),
795 "Failed to destroy Shadow QP");
796 return rc;
797 }
798 bnxt_qplib_free_qp_res(&rdev->qplib_res,
799 &rdev->qp1_sqp->qplib_qp);
800 mutex_lock(&rdev->qp_lock);
801 list_del(&rdev->qp1_sqp->list);
802 atomic_dec(&rdev->qp_count);
803 mutex_unlock(&rdev->qp_lock);
804
805 kfree(rdev->sqp_ah);
806 kfree(rdev->qp1_sqp);
807 rdev->qp1_sqp = NULL;
808 rdev->sqp_ah = NULL;
809 }
810
811 ib_umem_release(qp->rumem);
812 ib_umem_release(qp->sumem);
813
814 mutex_lock(&rdev->qp_lock);
815 list_del(&qp->list);
816 atomic_dec(&rdev->qp_count);
817 mutex_unlock(&rdev->qp_lock);
818 kfree(qp);
819 return 0;
820 }
821
822 static u8 __from_ib_qp_type(enum ib_qp_type type)
823 {
824 switch (type) {
825 case IB_QPT_GSI:
826 return CMDQ_CREATE_QP1_TYPE_GSI;
827 case IB_QPT_RC:
828 return CMDQ_CREATE_QP_TYPE_RC;
829 case IB_QPT_UD:
830 return CMDQ_CREATE_QP_TYPE_UD;
831 default:
832 return IB_QPT_MAX;
833 }
834 }
835
836 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
837 struct bnxt_re_qp *qp, struct ib_udata *udata)
838 {
839 struct bnxt_re_qp_req ureq;
840 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
841 struct ib_umem *umem;
842 int bytes = 0, psn_sz;
843 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
844 udata, struct bnxt_re_ucontext, ib_uctx);
845
846 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
847 return -EFAULT;
848
849 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
850
851 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
852 psn_sz = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
853 sizeof(struct sq_psn_search_ext) :
854 sizeof(struct sq_psn_search);
855 bytes += (qplib_qp->sq.max_wqe * psn_sz);
856 }
857 bytes = PAGE_ALIGN(bytes);
858 umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
859 if (IS_ERR(umem))
860 return PTR_ERR(umem);
861
862 qp->sumem = umem;
863 qplib_qp->sq.sg_info.sglist = umem->sg_head.sgl;
864 qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem);
865 qplib_qp->sq.sg_info.nmap = umem->nmap;
866 qplib_qp->qp_handle = ureq.qp_handle;
867
868 if (!qp->qplib_qp.srq) {
869 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
870 bytes = PAGE_ALIGN(bytes);
871 umem = ib_umem_get(udata, ureq.qprva, bytes,
872 IB_ACCESS_LOCAL_WRITE, 1);
873 if (IS_ERR(umem))
874 goto rqfail;
875 qp->rumem = umem;
876 qplib_qp->rq.sg_info.sglist = umem->sg_head.sgl;
877 qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem);
878 qplib_qp->rq.sg_info.nmap = umem->nmap;
879 }
880
881 qplib_qp->dpi = &cntx->dpi;
882 return 0;
883 rqfail:
884 ib_umem_release(qp->sumem);
885 qp->sumem = NULL;
886 memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
887
888 return PTR_ERR(umem);
889 }
890
891 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
892 (struct bnxt_re_pd *pd,
893 struct bnxt_qplib_res *qp1_res,
894 struct bnxt_qplib_qp *qp1_qp)
895 {
896 struct bnxt_re_dev *rdev = pd->rdev;
897 struct bnxt_re_ah *ah;
898 union ib_gid sgid;
899 int rc;
900
901 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
902 if (!ah)
903 return NULL;
904
905 ah->rdev = rdev;
906 ah->qplib_ah.pd = &pd->qplib_pd;
907
908 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
909 if (rc)
910 goto fail;
911
912
913 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
914 sizeof(union ib_gid));
915 ah->qplib_ah.sgid_index = 0;
916
917 ah->qplib_ah.traffic_class = 0;
918 ah->qplib_ah.flow_label = 0;
919 ah->qplib_ah.hop_limit = 1;
920 ah->qplib_ah.sl = 0;
921
922 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
923
924 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
925 if (rc) {
926 dev_err(rdev_to_dev(rdev),
927 "Failed to allocate HW AH for Shadow QP");
928 goto fail;
929 }
930
931 return ah;
932
933 fail:
934 kfree(ah);
935 return NULL;
936 }
937
938 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
939 (struct bnxt_re_pd *pd,
940 struct bnxt_qplib_res *qp1_res,
941 struct bnxt_qplib_qp *qp1_qp)
942 {
943 struct bnxt_re_dev *rdev = pd->rdev;
944 struct bnxt_re_qp *qp;
945 int rc;
946
947 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
948 if (!qp)
949 return NULL;
950
951 qp->rdev = rdev;
952
953
954 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
955
956 qp->qplib_qp.pd = &pd->qplib_pd;
957 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
958 qp->qplib_qp.type = IB_QPT_UD;
959
960 qp->qplib_qp.max_inline_data = 0;
961 qp->qplib_qp.sig_type = true;
962
963
964 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
965 qp->qplib_qp.sq.max_sge = 2;
966
967 qp->qplib_qp.sq.q_full_delta = 1;
968
969 qp->qplib_qp.scq = qp1_qp->scq;
970 qp->qplib_qp.rcq = qp1_qp->rcq;
971
972 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
973 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
974
975 qp->qplib_qp.rq.q_full_delta = 1;
976
977 qp->qplib_qp.mtu = qp1_qp->mtu;
978
979 qp->qplib_qp.sq_hdr_buf_size = 0;
980 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
981 qp->qplib_qp.dpi = &rdev->dpi_privileged;
982
983 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
984 if (rc)
985 goto fail;
986
987 rdev->sqp_id = qp->qplib_qp.id;
988
989 spin_lock_init(&qp->sq_lock);
990 INIT_LIST_HEAD(&qp->list);
991 mutex_lock(&rdev->qp_lock);
992 list_add_tail(&qp->list, &rdev->qp_list);
993 atomic_inc(&rdev->qp_count);
994 mutex_unlock(&rdev->qp_lock);
995 return qp;
996 fail:
997 kfree(qp);
998 return NULL;
999 }
1000
1001 struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1002 struct ib_qp_init_attr *qp_init_attr,
1003 struct ib_udata *udata)
1004 {
1005 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1006 struct bnxt_re_dev *rdev = pd->rdev;
1007 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1008 struct bnxt_re_qp *qp;
1009 struct bnxt_re_cq *cq;
1010 struct bnxt_re_srq *srq;
1011 int rc, entries;
1012
1013 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1014 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1015 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1016 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1017 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1018 return ERR_PTR(-EINVAL);
1019
1020 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1021 if (!qp)
1022 return ERR_PTR(-ENOMEM);
1023
1024 qp->rdev = rdev;
1025 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1026 qp->qplib_qp.pd = &pd->qplib_pd;
1027 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1028 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1029
1030 if (qp_init_attr->qp_type == IB_QPT_GSI &&
1031 bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
1032 qp->qplib_qp.type = CMDQ_CREATE_QP_TYPE_GSI;
1033 if (qp->qplib_qp.type == IB_QPT_MAX) {
1034 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1035 qp->qplib_qp.type);
1036 rc = -EINVAL;
1037 goto fail;
1038 }
1039
1040 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1041 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1042 IB_SIGNAL_ALL_WR) ? true : false);
1043
1044 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1045 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1046 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1047
1048 if (qp_init_attr->send_cq) {
1049 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1050 ib_cq);
1051 if (!cq) {
1052 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1053 rc = -EINVAL;
1054 goto fail;
1055 }
1056 qp->qplib_qp.scq = &cq->qplib_cq;
1057 qp->scq = cq;
1058 }
1059
1060 if (qp_init_attr->recv_cq) {
1061 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1062 ib_cq);
1063 if (!cq) {
1064 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1065 rc = -EINVAL;
1066 goto fail;
1067 }
1068 qp->qplib_qp.rcq = &cq->qplib_cq;
1069 qp->rcq = cq;
1070 }
1071
1072 if (qp_init_attr->srq) {
1073 srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
1074 ib_srq);
1075 if (!srq) {
1076 dev_err(rdev_to_dev(rdev), "SRQ not found");
1077 rc = -EINVAL;
1078 goto fail;
1079 }
1080 qp->qplib_qp.srq = &srq->qplib_srq;
1081 qp->qplib_qp.rq.max_wqe = 0;
1082 } else {
1083
1084
1085
1086 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1087 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1088 dev_attr->max_qp_wqes + 1);
1089
1090 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1091 qp_init_attr->cap.max_recv_wr;
1092
1093 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1094 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1095 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1096 }
1097
1098 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1099
1100 if (qp_init_attr->qp_type == IB_QPT_GSI &&
1101 !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) {
1102
1103 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1104 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1105 dev_attr->max_qp_wqes + 1);
1106 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1107 qp_init_attr->cap.max_send_wr;
1108 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1109 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1110 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1111 qp->qplib_qp.sq.max_sge++;
1112 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1113 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1114
1115 qp->qplib_qp.rq_hdr_buf_size =
1116 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1117
1118 qp->qplib_qp.sq_hdr_buf_size =
1119 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1120 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1121 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1122 if (rc) {
1123 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1124 goto fail;
1125 }
1126
1127 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1128 &qp->qplib_qp);
1129 if (!rdev->qp1_sqp) {
1130 rc = -EINVAL;
1131 dev_err(rdev_to_dev(rdev),
1132 "Failed to create Shadow QP for QP1");
1133 goto qp_destroy;
1134 }
1135 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1136 &qp->qplib_qp);
1137 if (!rdev->sqp_ah) {
1138 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1139 &rdev->qp1_sqp->qplib_qp);
1140 rc = -EINVAL;
1141 dev_err(rdev_to_dev(rdev),
1142 "Failed to create AH entry for ShadowQP");
1143 goto qp_destroy;
1144 }
1145
1146 } else {
1147
1148 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1149 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1150 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1151 dev_attr->max_qp_wqes +
1152 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1153 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1154
1155
1156
1157
1158
1159
1160
1161 qp->qplib_qp.sq.q_full_delta -= 1;
1162
1163 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1164 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1165 if (udata) {
1166 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1167 if (rc)
1168 goto fail;
1169 } else {
1170 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1171 }
1172
1173 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1174 if (rc) {
1175 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1176 goto free_umem;
1177 }
1178 }
1179
1180 qp->ib_qp.qp_num = qp->qplib_qp.id;
1181 spin_lock_init(&qp->sq_lock);
1182 spin_lock_init(&qp->rq_lock);
1183
1184 if (udata) {
1185 struct bnxt_re_qp_resp resp;
1186
1187 resp.qpid = qp->ib_qp.qp_num;
1188 resp.rsvd = 0;
1189 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1190 if (rc) {
1191 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1192 goto qp_destroy;
1193 }
1194 }
1195 INIT_LIST_HEAD(&qp->list);
1196 mutex_lock(&rdev->qp_lock);
1197 list_add_tail(&qp->list, &rdev->qp_list);
1198 atomic_inc(&rdev->qp_count);
1199 mutex_unlock(&rdev->qp_lock);
1200
1201 return &qp->ib_qp;
1202 qp_destroy:
1203 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1204 free_umem:
1205 ib_umem_release(qp->rumem);
1206 ib_umem_release(qp->sumem);
1207 fail:
1208 kfree(qp);
1209 return ERR_PTR(rc);
1210 }
1211
1212 static u8 __from_ib_qp_state(enum ib_qp_state state)
1213 {
1214 switch (state) {
1215 case IB_QPS_RESET:
1216 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1217 case IB_QPS_INIT:
1218 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1219 case IB_QPS_RTR:
1220 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1221 case IB_QPS_RTS:
1222 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1223 case IB_QPS_SQD:
1224 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1225 case IB_QPS_SQE:
1226 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1227 case IB_QPS_ERR:
1228 default:
1229 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1230 }
1231 }
1232
1233 static enum ib_qp_state __to_ib_qp_state(u8 state)
1234 {
1235 switch (state) {
1236 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1237 return IB_QPS_RESET;
1238 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1239 return IB_QPS_INIT;
1240 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1241 return IB_QPS_RTR;
1242 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1243 return IB_QPS_RTS;
1244 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1245 return IB_QPS_SQD;
1246 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1247 return IB_QPS_SQE;
1248 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1249 default:
1250 return IB_QPS_ERR;
1251 }
1252 }
1253
1254 static u32 __from_ib_mtu(enum ib_mtu mtu)
1255 {
1256 switch (mtu) {
1257 case IB_MTU_256:
1258 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1259 case IB_MTU_512:
1260 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1261 case IB_MTU_1024:
1262 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1263 case IB_MTU_2048:
1264 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1265 case IB_MTU_4096:
1266 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1267 default:
1268 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1269 }
1270 }
1271
1272 static enum ib_mtu __to_ib_mtu(u32 mtu)
1273 {
1274 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1275 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1276 return IB_MTU_256;
1277 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1278 return IB_MTU_512;
1279 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1280 return IB_MTU_1024;
1281 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1282 return IB_MTU_2048;
1283 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1284 return IB_MTU_4096;
1285 default:
1286 return IB_MTU_2048;
1287 }
1288 }
1289
1290
1291 void bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1292 {
1293 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1294 ib_srq);
1295 struct bnxt_re_dev *rdev = srq->rdev;
1296 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1297 struct bnxt_qplib_nq *nq = NULL;
1298
1299 if (qplib_srq->cq)
1300 nq = qplib_srq->cq->nq;
1301 bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1302 ib_umem_release(srq->umem);
1303 atomic_dec(&rdev->srq_count);
1304 if (nq)
1305 nq->budget--;
1306 }
1307
1308 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1309 struct bnxt_re_pd *pd,
1310 struct bnxt_re_srq *srq,
1311 struct ib_udata *udata)
1312 {
1313 struct bnxt_re_srq_req ureq;
1314 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1315 struct ib_umem *umem;
1316 int bytes = 0;
1317 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1318 udata, struct bnxt_re_ucontext, ib_uctx);
1319
1320 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1321 return -EFAULT;
1322
1323 bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1324 bytes = PAGE_ALIGN(bytes);
1325 umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
1326 if (IS_ERR(umem))
1327 return PTR_ERR(umem);
1328
1329 srq->umem = umem;
1330 qplib_srq->sg_info.sglist = umem->sg_head.sgl;
1331 qplib_srq->sg_info.npages = ib_umem_num_pages(umem);
1332 qplib_srq->sg_info.nmap = umem->nmap;
1333 qplib_srq->srq_handle = ureq.srq_handle;
1334 qplib_srq->dpi = &cntx->dpi;
1335
1336 return 0;
1337 }
1338
1339 int bnxt_re_create_srq(struct ib_srq *ib_srq,
1340 struct ib_srq_init_attr *srq_init_attr,
1341 struct ib_udata *udata)
1342 {
1343 struct ib_pd *ib_pd = ib_srq->pd;
1344 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1345 struct bnxt_re_dev *rdev = pd->rdev;
1346 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1347 struct bnxt_re_srq *srq =
1348 container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1349 struct bnxt_qplib_nq *nq = NULL;
1350 int rc, entries;
1351
1352 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1353 dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
1354 rc = -EINVAL;
1355 goto exit;
1356 }
1357
1358 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1359 rc = -EOPNOTSUPP;
1360 goto exit;
1361 }
1362
1363 srq->rdev = rdev;
1364 srq->qplib_srq.pd = &pd->qplib_pd;
1365 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1366
1367
1368
1369 entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1370 if (entries > dev_attr->max_srq_wqes + 1)
1371 entries = dev_attr->max_srq_wqes + 1;
1372
1373 srq->qplib_srq.max_wqe = entries;
1374 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1375 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1376 srq->srq_limit = srq_init_attr->attr.srq_limit;
1377 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1378 nq = &rdev->nq[0];
1379
1380 if (udata) {
1381 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1382 if (rc)
1383 goto fail;
1384 }
1385
1386 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1387 if (rc) {
1388 dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
1389 goto fail;
1390 }
1391
1392 if (udata) {
1393 struct bnxt_re_srq_resp resp;
1394
1395 resp.srqid = srq->qplib_srq.id;
1396 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1397 if (rc) {
1398 dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
1399 bnxt_qplib_destroy_srq(&rdev->qplib_res,
1400 &srq->qplib_srq);
1401 goto fail;
1402 }
1403 }
1404 if (nq)
1405 nq->budget++;
1406 atomic_inc(&rdev->srq_count);
1407
1408 return 0;
1409
1410 fail:
1411 ib_umem_release(srq->umem);
1412 exit:
1413 return rc;
1414 }
1415
1416 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1417 enum ib_srq_attr_mask srq_attr_mask,
1418 struct ib_udata *udata)
1419 {
1420 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1421 ib_srq);
1422 struct bnxt_re_dev *rdev = srq->rdev;
1423 int rc;
1424
1425 switch (srq_attr_mask) {
1426 case IB_SRQ_MAX_WR:
1427
1428 break;
1429 case IB_SRQ_LIMIT:
1430
1431 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1432 return -EINVAL;
1433
1434 srq->qplib_srq.threshold = srq_attr->srq_limit;
1435 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1436 if (rc) {
1437 dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
1438 return rc;
1439 }
1440
1441 srq->srq_limit = srq_attr->srq_limit;
1442
1443 break;
1444 default:
1445 dev_err(rdev_to_dev(rdev),
1446 "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1447 return -EINVAL;
1448 }
1449 return 0;
1450 }
1451
1452 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1453 {
1454 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1455 ib_srq);
1456 struct bnxt_re_srq tsrq;
1457 struct bnxt_re_dev *rdev = srq->rdev;
1458 int rc;
1459
1460
1461 tsrq.qplib_srq.id = srq->qplib_srq.id;
1462 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1463 if (rc) {
1464 dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
1465 return rc;
1466 }
1467 srq_attr->max_wr = srq->qplib_srq.max_wqe;
1468 srq_attr->max_sge = srq->qplib_srq.max_sge;
1469 srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1470
1471 return 0;
1472 }
1473
1474 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1475 const struct ib_recv_wr **bad_wr)
1476 {
1477 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1478 ib_srq);
1479 struct bnxt_qplib_swqe wqe;
1480 unsigned long flags;
1481 int rc = 0;
1482
1483 spin_lock_irqsave(&srq->lock, flags);
1484 while (wr) {
1485
1486 wqe.num_sge = wr->num_sge;
1487 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1488 wqe.wr_id = wr->wr_id;
1489 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1490
1491 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1492 if (rc) {
1493 *bad_wr = wr;
1494 break;
1495 }
1496 wr = wr->next;
1497 }
1498 spin_unlock_irqrestore(&srq->lock, flags);
1499
1500 return rc;
1501 }
1502 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1503 struct bnxt_re_qp *qp1_qp,
1504 int qp_attr_mask)
1505 {
1506 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1507 int rc = 0;
1508
1509 if (qp_attr_mask & IB_QP_STATE) {
1510 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1511 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1512 }
1513 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1514 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1515 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1516 }
1517
1518 if (qp_attr_mask & IB_QP_QKEY) {
1519 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1520
1521 qp->qplib_qp.qkey = 0x81818181;
1522 }
1523 if (qp_attr_mask & IB_QP_SQ_PSN) {
1524 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1525 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1526 }
1527
1528 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1529 if (rc)
1530 dev_err(rdev_to_dev(rdev),
1531 "Failed to modify Shadow QP for QP1");
1532 return rc;
1533 }
1534
1535 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1536 int qp_attr_mask, struct ib_udata *udata)
1537 {
1538 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1539 struct bnxt_re_dev *rdev = qp->rdev;
1540 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1541 enum ib_qp_state curr_qp_state, new_qp_state;
1542 int rc, entries;
1543 unsigned int flags;
1544 u8 nw_type;
1545
1546 qp->qplib_qp.modify_flags = 0;
1547 if (qp_attr_mask & IB_QP_STATE) {
1548 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1549 new_qp_state = qp_attr->qp_state;
1550 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1551 ib_qp->qp_type, qp_attr_mask)) {
1552 dev_err(rdev_to_dev(rdev),
1553 "Invalid attribute mask: %#x specified ",
1554 qp_attr_mask);
1555 dev_err(rdev_to_dev(rdev),
1556 "for qpn: %#x type: %#x",
1557 ib_qp->qp_num, ib_qp->qp_type);
1558 dev_err(rdev_to_dev(rdev),
1559 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1560 curr_qp_state, new_qp_state);
1561 return -EINVAL;
1562 }
1563 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1564 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1565
1566 if (!qp->sumem &&
1567 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1568 dev_dbg(rdev_to_dev(rdev),
1569 "Move QP = %p to flush list\n",
1570 qp);
1571 flags = bnxt_re_lock_cqs(qp);
1572 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1573 bnxt_re_unlock_cqs(qp, flags);
1574 }
1575 if (!qp->sumem &&
1576 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1577 dev_dbg(rdev_to_dev(rdev),
1578 "Move QP = %p out of flush list\n",
1579 qp);
1580 flags = bnxt_re_lock_cqs(qp);
1581 bnxt_qplib_clean_qp(&qp->qplib_qp);
1582 bnxt_re_unlock_cqs(qp, flags);
1583 }
1584 }
1585 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1586 qp->qplib_qp.modify_flags |=
1587 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1588 qp->qplib_qp.en_sqd_async_notify = true;
1589 }
1590 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1591 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1592 qp->qplib_qp.access =
1593 __from_ib_access_flags(qp_attr->qp_access_flags);
1594
1595 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1596
1597 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1598 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
1599 }
1600 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1601 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1602 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1603 }
1604 if (qp_attr_mask & IB_QP_QKEY) {
1605 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1606 qp->qplib_qp.qkey = qp_attr->qkey;
1607 }
1608 if (qp_attr_mask & IB_QP_AV) {
1609 const struct ib_global_route *grh =
1610 rdma_ah_read_grh(&qp_attr->ah_attr);
1611 const struct ib_gid_attr *sgid_attr;
1612
1613 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1614 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1615 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1616 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1617 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1618 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1619 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1620 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1621 sizeof(qp->qplib_qp.ah.dgid.data));
1622 qp->qplib_qp.ah.flow_label = grh->flow_label;
1623
1624
1625
1626
1627 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1628 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1629 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1630 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1631 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1632 ether_addr_copy(qp->qplib_qp.ah.dmac,
1633 qp_attr->ah_attr.roce.dmac);
1634
1635 sgid_attr = qp_attr->ah_attr.grh.sgid_attr;
1636 rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
1637 &qp->qplib_qp.smac[0]);
1638 if (rc)
1639 return rc;
1640
1641 nw_type = rdma_gid_attr_network_type(sgid_attr);
1642 switch (nw_type) {
1643 case RDMA_NETWORK_IPV4:
1644 qp->qplib_qp.nw_type =
1645 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1646 break;
1647 case RDMA_NETWORK_IPV6:
1648 qp->qplib_qp.nw_type =
1649 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1650 break;
1651 default:
1652 qp->qplib_qp.nw_type =
1653 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1654 break;
1655 }
1656 }
1657
1658 if (qp_attr_mask & IB_QP_PATH_MTU) {
1659 qp->qplib_qp.modify_flags |=
1660 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1661 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1662 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1663 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1664 qp->qplib_qp.modify_flags |=
1665 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1666 qp->qplib_qp.path_mtu =
1667 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1668 qp->qplib_qp.mtu =
1669 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1670 }
1671
1672 if (qp_attr_mask & IB_QP_TIMEOUT) {
1673 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1674 qp->qplib_qp.timeout = qp_attr->timeout;
1675 }
1676 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1677 qp->qplib_qp.modify_flags |=
1678 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1679 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1680 }
1681 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1682 qp->qplib_qp.modify_flags |=
1683 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1684 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1685 }
1686 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1687 qp->qplib_qp.modify_flags |=
1688 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1689 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1690 }
1691 if (qp_attr_mask & IB_QP_RQ_PSN) {
1692 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1693 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1694 }
1695 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1696 qp->qplib_qp.modify_flags |=
1697 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1698
1699 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1700 dev_attr->max_qp_rd_atom);
1701 }
1702 if (qp_attr_mask & IB_QP_SQ_PSN) {
1703 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1704 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1705 }
1706 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1707 if (qp_attr->max_dest_rd_atomic >
1708 dev_attr->max_qp_init_rd_atom) {
1709 dev_err(rdev_to_dev(rdev),
1710 "max_dest_rd_atomic requested%d is > dev_max%d",
1711 qp_attr->max_dest_rd_atomic,
1712 dev_attr->max_qp_init_rd_atom);
1713 return -EINVAL;
1714 }
1715
1716 qp->qplib_qp.modify_flags |=
1717 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1718 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1719 }
1720 if (qp_attr_mask & IB_QP_CAP) {
1721 qp->qplib_qp.modify_flags |=
1722 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1723 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1724 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1725 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1726 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1727 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1728 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1729 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1730 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1731 (qp_attr->cap.max_inline_data >=
1732 dev_attr->max_inline_data)) {
1733 dev_err(rdev_to_dev(rdev),
1734 "Create QP failed - max exceeded");
1735 return -EINVAL;
1736 }
1737 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1738 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1739 dev_attr->max_qp_wqes + 1);
1740 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1741 qp_attr->cap.max_send_wr;
1742
1743
1744
1745
1746
1747 qp->qplib_qp.sq.q_full_delta -= 1;
1748 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1749 if (qp->qplib_qp.rq.max_wqe) {
1750 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1751 qp->qplib_qp.rq.max_wqe =
1752 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1753 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1754 qp_attr->cap.max_recv_wr;
1755 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1756 } else {
1757
1758 }
1759 }
1760 if (qp_attr_mask & IB_QP_DEST_QPN) {
1761 qp->qplib_qp.modify_flags |=
1762 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1763 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1764 }
1765 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1766 if (rc) {
1767 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1768 return rc;
1769 }
1770 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1771 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1772 return rc;
1773 }
1774
1775 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1776 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1777 {
1778 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1779 struct bnxt_re_dev *rdev = qp->rdev;
1780 struct bnxt_qplib_qp *qplib_qp;
1781 int rc;
1782
1783 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1784 if (!qplib_qp)
1785 return -ENOMEM;
1786
1787 qplib_qp->id = qp->qplib_qp.id;
1788 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1789
1790 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
1791 if (rc) {
1792 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1793 goto out;
1794 }
1795 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1796 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1797 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1798 qp_attr->pkey_index = qplib_qp->pkey_index;
1799 qp_attr->qkey = qplib_qp->qkey;
1800 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1801 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1802 qplib_qp->ah.host_sgid_index,
1803 qplib_qp->ah.hop_limit,
1804 qplib_qp->ah.traffic_class);
1805 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1806 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1807 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1808 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1809 qp_attr->timeout = qplib_qp->timeout;
1810 qp_attr->retry_cnt = qplib_qp->retry_cnt;
1811 qp_attr->rnr_retry = qplib_qp->rnr_retry;
1812 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1813 qp_attr->rq_psn = qplib_qp->rq.psn;
1814 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1815 qp_attr->sq_psn = qplib_qp->sq.psn;
1816 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1817 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1818 IB_SIGNAL_REQ_WR;
1819 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
1820
1821 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1822 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1823 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1824 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1825 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1826 qp_init_attr->cap = qp_attr->cap;
1827
1828 out:
1829 kfree(qplib_qp);
1830 return rc;
1831 }
1832
1833
1834
1835 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1836 const struct ib_send_wr *wr,
1837 struct bnxt_qplib_swqe *wqe,
1838 int payload_size)
1839 {
1840 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1841 ib_ah);
1842 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1843 const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
1844 struct bnxt_qplib_sge sge;
1845 u8 nw_type;
1846 u16 ether_type;
1847 union ib_gid dgid;
1848 bool is_eth = false;
1849 bool is_vlan = false;
1850 bool is_grh = false;
1851 bool is_udp = false;
1852 u8 ip_version = 0;
1853 u16 vlan_id = 0xFFFF;
1854 void *buf;
1855 int i, rc = 0;
1856
1857 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1858
1859 rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
1860 if (rc)
1861 return rc;
1862
1863
1864 nw_type = rdma_gid_attr_network_type(sgid_attr);
1865 switch (nw_type) {
1866 case RDMA_NETWORK_IPV4:
1867 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1868 break;
1869 case RDMA_NETWORK_IPV6:
1870 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1871 break;
1872 default:
1873 nw_type = BNXT_RE_ROCE_V1_PACKET;
1874 break;
1875 }
1876 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1877 is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1878 if (is_udp) {
1879 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
1880 ip_version = 4;
1881 ether_type = ETH_P_IP;
1882 } else {
1883 ip_version = 6;
1884 ether_type = ETH_P_IPV6;
1885 }
1886 is_grh = false;
1887 } else {
1888 ether_type = ETH_P_IBOE;
1889 is_grh = true;
1890 }
1891
1892 is_eth = true;
1893 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1894
1895 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1896 ip_version, is_udp, 0, &qp->qp1_hdr);
1897
1898
1899 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1900 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1901
1902
1903
1904 if (!is_vlan) {
1905 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1906 } else {
1907 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1908 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1909 }
1910
1911 if (is_grh || (ip_version == 6)) {
1912 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
1913 sizeof(sgid_attr->gid));
1914 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1915 sizeof(sgid_attr->gid));
1916 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1917 }
1918
1919 if (ip_version == 4) {
1920 qp->qp1_hdr.ip4.tos = 0;
1921 qp->qp1_hdr.ip4.id = 0;
1922 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1923 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1924
1925 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
1926 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1927 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1928 }
1929
1930 if (is_udp) {
1931 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1932 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1933 qp->qp1_hdr.udp.csum = 0;
1934 }
1935
1936
1937 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1938 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1939 qp->qp1_hdr.immediate_present = 1;
1940 } else {
1941 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1942 }
1943 if (wr->send_flags & IB_SEND_SOLICITED)
1944 qp->qp1_hdr.bth.solicited_event = 1;
1945
1946 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1947
1948
1949 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1950 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1951 qp->qp1_hdr.bth.ack_req = 0;
1952 qp->send_psn++;
1953 qp->send_psn &= BTH_PSN_MASK;
1954 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1955
1956
1957 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1958 qp->qp1_hdr.deth.source_qpn = IB_QP1;
1959
1960
1961 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1962 if (buf) {
1963 ib_ud_header_pack(&qp->qp1_hdr, buf);
1964 for (i = wqe->num_sge; i; i--) {
1965 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1966 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1967 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1968 }
1969
1970
1971
1972
1973
1974
1975
1976
1977 if (is_udp && ip_version == 4)
1978 sge.size -= 20;
1979
1980
1981
1982
1983
1984 if (!is_udp)
1985 sge.size -= 8;
1986
1987
1988 if (!is_vlan)
1989 sge.size -= 4;
1990
1991 wqe->sg_list[0].addr = sge.addr;
1992 wqe->sg_list[0].lkey = sge.lkey;
1993 wqe->sg_list[0].size = sge.size;
1994 wqe->num_sge++;
1995
1996 } else {
1997 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
1998 rc = -ENOMEM;
1999 }
2000 return rc;
2001 }
2002
2003
2004
2005
2006
2007
2008
2009 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2010 const struct ib_recv_wr *wr,
2011 struct bnxt_qplib_swqe *wqe,
2012 int payload_size)
2013 {
2014 struct bnxt_qplib_sge ref, sge;
2015 u32 rq_prod_index;
2016 struct bnxt_re_sqp_entries *sqp_entry;
2017
2018 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2019
2020 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2021 return -ENOMEM;
2022
2023
2024
2025
2026
2027 ref.addr = wqe->sg_list[0].addr;
2028 ref.lkey = wqe->sg_list[0].lkey;
2029 ref.size = wqe->sg_list[0].size;
2030
2031 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
2032
2033
2034 wqe->sg_list[0].addr = sge.addr;
2035 wqe->sg_list[0].lkey = sge.lkey;
2036 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2037 sge.size -= wqe->sg_list[0].size;
2038
2039 sqp_entry->sge.addr = ref.addr;
2040 sqp_entry->sge.lkey = ref.lkey;
2041 sqp_entry->sge.size = ref.size;
2042
2043 sqp_entry->wrid = wqe->wr_id;
2044
2045 wqe->wr_id = rq_prod_index;
2046 return 0;
2047 }
2048
2049 static int is_ud_qp(struct bnxt_re_qp *qp)
2050 {
2051 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2052 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2053 }
2054
2055 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2056 const struct ib_send_wr *wr,
2057 struct bnxt_qplib_swqe *wqe)
2058 {
2059 struct bnxt_re_ah *ah = NULL;
2060
2061 if (is_ud_qp(qp)) {
2062 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2063 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2064 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2065 wqe->send.avid = ah->qplib_ah.id;
2066 }
2067 switch (wr->opcode) {
2068 case IB_WR_SEND:
2069 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2070 break;
2071 case IB_WR_SEND_WITH_IMM:
2072 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2073 wqe->send.imm_data = wr->ex.imm_data;
2074 break;
2075 case IB_WR_SEND_WITH_INV:
2076 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2077 wqe->send.inv_key = wr->ex.invalidate_rkey;
2078 break;
2079 default:
2080 return -EINVAL;
2081 }
2082 if (wr->send_flags & IB_SEND_SIGNALED)
2083 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2084 if (wr->send_flags & IB_SEND_FENCE)
2085 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2086 if (wr->send_flags & IB_SEND_SOLICITED)
2087 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2088 if (wr->send_flags & IB_SEND_INLINE)
2089 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2090
2091 return 0;
2092 }
2093
2094 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2095 struct bnxt_qplib_swqe *wqe)
2096 {
2097 switch (wr->opcode) {
2098 case IB_WR_RDMA_WRITE:
2099 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2100 break;
2101 case IB_WR_RDMA_WRITE_WITH_IMM:
2102 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2103 wqe->rdma.imm_data = wr->ex.imm_data;
2104 break;
2105 case IB_WR_RDMA_READ:
2106 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2107 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2108 break;
2109 default:
2110 return -EINVAL;
2111 }
2112 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2113 wqe->rdma.r_key = rdma_wr(wr)->rkey;
2114 if (wr->send_flags & IB_SEND_SIGNALED)
2115 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2116 if (wr->send_flags & IB_SEND_FENCE)
2117 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2118 if (wr->send_flags & IB_SEND_SOLICITED)
2119 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2120 if (wr->send_flags & IB_SEND_INLINE)
2121 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2122
2123 return 0;
2124 }
2125
2126 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2127 struct bnxt_qplib_swqe *wqe)
2128 {
2129 switch (wr->opcode) {
2130 case IB_WR_ATOMIC_CMP_AND_SWP:
2131 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2132 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2133 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2134 break;
2135 case IB_WR_ATOMIC_FETCH_AND_ADD:
2136 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2137 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2138 break;
2139 default:
2140 return -EINVAL;
2141 }
2142 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2143 wqe->atomic.r_key = atomic_wr(wr)->rkey;
2144 if (wr->send_flags & IB_SEND_SIGNALED)
2145 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2146 if (wr->send_flags & IB_SEND_FENCE)
2147 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2148 if (wr->send_flags & IB_SEND_SOLICITED)
2149 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2150 return 0;
2151 }
2152
2153 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2154 struct bnxt_qplib_swqe *wqe)
2155 {
2156 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2157 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2158
2159
2160
2161
2162 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2163
2164 if (wr->send_flags & IB_SEND_SIGNALED)
2165 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2166 if (wr->send_flags & IB_SEND_SOLICITED)
2167 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2168
2169 return 0;
2170 }
2171
2172 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2173 struct bnxt_qplib_swqe *wqe)
2174 {
2175 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2176 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2177 int access = wr->access;
2178
2179 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2180 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2181 wqe->frmr.page_list = mr->pages;
2182 wqe->frmr.page_list_len = mr->npages;
2183 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2184 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2185
2186
2187
2188
2189
2190 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2191
2192 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2193 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2194
2195 if (access & IB_ACCESS_LOCAL_WRITE)
2196 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2197 if (access & IB_ACCESS_REMOTE_READ)
2198 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2199 if (access & IB_ACCESS_REMOTE_WRITE)
2200 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2201 if (access & IB_ACCESS_REMOTE_ATOMIC)
2202 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2203 if (access & IB_ACCESS_MW_BIND)
2204 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2205
2206 wqe->frmr.l_key = wr->key;
2207 wqe->frmr.length = wr->mr->length;
2208 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2209 wqe->frmr.va = wr->mr->iova;
2210 return 0;
2211 }
2212
2213 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2214 const struct ib_send_wr *wr,
2215 struct bnxt_qplib_swqe *wqe)
2216 {
2217
2218 u8 *in_data;
2219 u32 i, sge_len;
2220 void *sge_addr;
2221
2222 in_data = wqe->inline_data;
2223 for (i = 0; i < wr->num_sge; i++) {
2224 sge_addr = (void *)(unsigned long)
2225 wr->sg_list[i].addr;
2226 sge_len = wr->sg_list[i].length;
2227
2228 if ((sge_len + wqe->inline_len) >
2229 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2230 dev_err(rdev_to_dev(rdev),
2231 "Inline data size requested > supported value");
2232 return -EINVAL;
2233 }
2234 sge_len = wr->sg_list[i].length;
2235
2236 memcpy(in_data, sge_addr, sge_len);
2237 in_data += wr->sg_list[i].length;
2238 wqe->inline_len += wr->sg_list[i].length;
2239 }
2240 return wqe->inline_len;
2241 }
2242
2243 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2244 const struct ib_send_wr *wr,
2245 struct bnxt_qplib_swqe *wqe)
2246 {
2247 int payload_sz = 0;
2248
2249 if (wr->send_flags & IB_SEND_INLINE)
2250 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2251 else
2252 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2253 wqe->num_sge);
2254
2255 return payload_sz;
2256 }
2257
2258 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2259 {
2260 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2261 qp->ib_qp.qp_type == IB_QPT_GSI ||
2262 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2263 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2264 int qp_attr_mask;
2265 struct ib_qp_attr qp_attr;
2266
2267 qp_attr_mask = IB_QP_STATE;
2268 qp_attr.qp_state = IB_QPS_RTS;
2269 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2270 qp->qplib_qp.wqe_cnt = 0;
2271 }
2272 }
2273
2274 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2275 struct bnxt_re_qp *qp,
2276 const struct ib_send_wr *wr)
2277 {
2278 struct bnxt_qplib_swqe wqe;
2279 int rc = 0, payload_sz = 0;
2280 unsigned long flags;
2281
2282 spin_lock_irqsave(&qp->sq_lock, flags);
2283 memset(&wqe, 0, sizeof(wqe));
2284 while (wr) {
2285
2286 memset(&wqe, 0, sizeof(wqe));
2287
2288
2289 wqe.num_sge = wr->num_sge;
2290 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2291 dev_err(rdev_to_dev(rdev),
2292 "Limit exceeded for Send SGEs");
2293 rc = -EINVAL;
2294 goto bad;
2295 }
2296
2297 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2298 if (payload_sz < 0) {
2299 rc = -EINVAL;
2300 goto bad;
2301 }
2302 wqe.wr_id = wr->wr_id;
2303
2304 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2305
2306 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2307 if (!rc)
2308 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2309 bad:
2310 if (rc) {
2311 dev_err(rdev_to_dev(rdev),
2312 "Post send failed opcode = %#x rc = %d",
2313 wr->opcode, rc);
2314 break;
2315 }
2316 wr = wr->next;
2317 }
2318 bnxt_qplib_post_send_db(&qp->qplib_qp);
2319 bnxt_ud_qp_hw_stall_workaround(qp);
2320 spin_unlock_irqrestore(&qp->sq_lock, flags);
2321 return rc;
2322 }
2323
2324 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2325 const struct ib_send_wr **bad_wr)
2326 {
2327 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2328 struct bnxt_qplib_swqe wqe;
2329 int rc = 0, payload_sz = 0;
2330 unsigned long flags;
2331
2332 spin_lock_irqsave(&qp->sq_lock, flags);
2333 while (wr) {
2334
2335 memset(&wqe, 0, sizeof(wqe));
2336
2337
2338 wqe.num_sge = wr->num_sge;
2339 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2340 dev_err(rdev_to_dev(qp->rdev),
2341 "Limit exceeded for Send SGEs");
2342 rc = -EINVAL;
2343 goto bad;
2344 }
2345
2346 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2347 if (payload_sz < 0) {
2348 rc = -EINVAL;
2349 goto bad;
2350 }
2351 wqe.wr_id = wr->wr_id;
2352
2353 switch (wr->opcode) {
2354 case IB_WR_SEND:
2355 case IB_WR_SEND_WITH_IMM:
2356 if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2357 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2358 payload_sz);
2359 if (rc)
2360 goto bad;
2361 wqe.rawqp1.lflags |=
2362 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2363 }
2364 switch (wr->send_flags) {
2365 case IB_SEND_IP_CSUM:
2366 wqe.rawqp1.lflags |=
2367 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2368 break;
2369 default:
2370 break;
2371 }
2372
2373 case IB_WR_SEND_WITH_INV:
2374 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2375 break;
2376 case IB_WR_RDMA_WRITE:
2377 case IB_WR_RDMA_WRITE_WITH_IMM:
2378 case IB_WR_RDMA_READ:
2379 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2380 break;
2381 case IB_WR_ATOMIC_CMP_AND_SWP:
2382 case IB_WR_ATOMIC_FETCH_AND_ADD:
2383 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2384 break;
2385 case IB_WR_RDMA_READ_WITH_INV:
2386 dev_err(rdev_to_dev(qp->rdev),
2387 "RDMA Read with Invalidate is not supported");
2388 rc = -EINVAL;
2389 goto bad;
2390 case IB_WR_LOCAL_INV:
2391 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2392 break;
2393 case IB_WR_REG_MR:
2394 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2395 break;
2396 default:
2397
2398 dev_err(rdev_to_dev(qp->rdev),
2399 "WR (%#x) is not supported", wr->opcode);
2400 rc = -EINVAL;
2401 goto bad;
2402 }
2403 if (!rc)
2404 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2405 bad:
2406 if (rc) {
2407 dev_err(rdev_to_dev(qp->rdev),
2408 "post_send failed op:%#x qps = %#x rc = %d\n",
2409 wr->opcode, qp->qplib_qp.state, rc);
2410 *bad_wr = wr;
2411 break;
2412 }
2413 wr = wr->next;
2414 }
2415 bnxt_qplib_post_send_db(&qp->qplib_qp);
2416 bnxt_ud_qp_hw_stall_workaround(qp);
2417 spin_unlock_irqrestore(&qp->sq_lock, flags);
2418
2419 return rc;
2420 }
2421
2422 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2423 struct bnxt_re_qp *qp,
2424 const struct ib_recv_wr *wr)
2425 {
2426 struct bnxt_qplib_swqe wqe;
2427 int rc = 0;
2428
2429 memset(&wqe, 0, sizeof(wqe));
2430 while (wr) {
2431
2432 memset(&wqe, 0, sizeof(wqe));
2433
2434
2435 wqe.num_sge = wr->num_sge;
2436 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2437 dev_err(rdev_to_dev(rdev),
2438 "Limit exceeded for Receive SGEs");
2439 rc = -EINVAL;
2440 break;
2441 }
2442 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2443 wqe.wr_id = wr->wr_id;
2444 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2445
2446 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2447 if (rc)
2448 break;
2449
2450 wr = wr->next;
2451 }
2452 if (!rc)
2453 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2454 return rc;
2455 }
2456
2457 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2458 const struct ib_recv_wr **bad_wr)
2459 {
2460 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2461 struct bnxt_qplib_swqe wqe;
2462 int rc = 0, payload_sz = 0;
2463 unsigned long flags;
2464 u32 count = 0;
2465
2466 spin_lock_irqsave(&qp->rq_lock, flags);
2467 while (wr) {
2468
2469 memset(&wqe, 0, sizeof(wqe));
2470
2471
2472 wqe.num_sge = wr->num_sge;
2473 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2474 dev_err(rdev_to_dev(qp->rdev),
2475 "Limit exceeded for Receive SGEs");
2476 rc = -EINVAL;
2477 *bad_wr = wr;
2478 break;
2479 }
2480
2481 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2482 wr->num_sge);
2483 wqe.wr_id = wr->wr_id;
2484 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2485
2486 if (ib_qp->qp_type == IB_QPT_GSI &&
2487 qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2488 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2489 payload_sz);
2490 if (!rc)
2491 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2492 if (rc) {
2493 *bad_wr = wr;
2494 break;
2495 }
2496
2497
2498 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2499 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2500 count = 0;
2501 }
2502
2503 wr = wr->next;
2504 }
2505
2506 if (count)
2507 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2508
2509 spin_unlock_irqrestore(&qp->rq_lock, flags);
2510
2511 return rc;
2512 }
2513
2514
2515 void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2516 {
2517 struct bnxt_re_cq *cq;
2518 struct bnxt_qplib_nq *nq;
2519 struct bnxt_re_dev *rdev;
2520
2521 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2522 rdev = cq->rdev;
2523 nq = cq->qplib_cq.nq;
2524
2525 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2526 ib_umem_release(cq->umem);
2527
2528 atomic_dec(&rdev->cq_count);
2529 nq->budget--;
2530 kfree(cq->cql);
2531 }
2532
2533 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
2534 struct ib_udata *udata)
2535 {
2536 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
2537 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2538 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
2539 int rc, entries;
2540 int cqe = attr->cqe;
2541 struct bnxt_qplib_nq *nq = NULL;
2542 unsigned int nq_alloc_cnt;
2543
2544
2545 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2546 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2547 return -EINVAL;
2548 }
2549
2550 cq->rdev = rdev;
2551 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2552
2553 entries = roundup_pow_of_two(cqe + 1);
2554 if (entries > dev_attr->max_cq_wqes + 1)
2555 entries = dev_attr->max_cq_wqes + 1;
2556
2557 if (udata) {
2558 struct bnxt_re_cq_req req;
2559 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
2560 udata, struct bnxt_re_ucontext, ib_uctx);
2561 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2562 rc = -EFAULT;
2563 goto fail;
2564 }
2565
2566 cq->umem = ib_umem_get(udata, req.cq_va,
2567 entries * sizeof(struct cq_base),
2568 IB_ACCESS_LOCAL_WRITE, 1);
2569 if (IS_ERR(cq->umem)) {
2570 rc = PTR_ERR(cq->umem);
2571 goto fail;
2572 }
2573 cq->qplib_cq.sg_info.sglist = cq->umem->sg_head.sgl;
2574 cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem);
2575 cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
2576 cq->qplib_cq.dpi = &uctx->dpi;
2577 } else {
2578 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2579 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2580 GFP_KERNEL);
2581 if (!cq->cql) {
2582 rc = -ENOMEM;
2583 goto fail;
2584 }
2585
2586 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2587 }
2588
2589
2590
2591
2592 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2593 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2594 cq->qplib_cq.max_wqe = entries;
2595 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2596 cq->qplib_cq.nq = nq;
2597
2598 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2599 if (rc) {
2600 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2601 goto fail;
2602 }
2603
2604 cq->ib_cq.cqe = entries;
2605 cq->cq_period = cq->qplib_cq.period;
2606 nq->budget++;
2607
2608 atomic_inc(&rdev->cq_count);
2609 spin_lock_init(&cq->cq_lock);
2610
2611 if (udata) {
2612 struct bnxt_re_cq_resp resp;
2613
2614 resp.cqid = cq->qplib_cq.id;
2615 resp.tail = cq->qplib_cq.hwq.cons;
2616 resp.phase = cq->qplib_cq.period;
2617 resp.rsvd = 0;
2618 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2619 if (rc) {
2620 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2621 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2622 goto c2fail;
2623 }
2624 }
2625
2626 return 0;
2627
2628 c2fail:
2629 ib_umem_release(cq->umem);
2630 fail:
2631 kfree(cq->cql);
2632 return rc;
2633 }
2634
2635 static u8 __req_to_ib_wc_status(u8 qstatus)
2636 {
2637 switch (qstatus) {
2638 case CQ_REQ_STATUS_OK:
2639 return IB_WC_SUCCESS;
2640 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2641 return IB_WC_BAD_RESP_ERR;
2642 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2643 return IB_WC_LOC_LEN_ERR;
2644 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2645 return IB_WC_LOC_QP_OP_ERR;
2646 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2647 return IB_WC_LOC_PROT_ERR;
2648 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2649 return IB_WC_GENERAL_ERR;
2650 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2651 return IB_WC_REM_INV_REQ_ERR;
2652 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2653 return IB_WC_REM_ACCESS_ERR;
2654 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2655 return IB_WC_REM_OP_ERR;
2656 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2657 return IB_WC_RNR_RETRY_EXC_ERR;
2658 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2659 return IB_WC_RETRY_EXC_ERR;
2660 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2661 return IB_WC_WR_FLUSH_ERR;
2662 default:
2663 return IB_WC_GENERAL_ERR;
2664 }
2665 return 0;
2666 }
2667
2668 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2669 {
2670 switch (qstatus) {
2671 case CQ_RES_RAWETH_QP1_STATUS_OK:
2672 return IB_WC_SUCCESS;
2673 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2674 return IB_WC_LOC_ACCESS_ERR;
2675 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2676 return IB_WC_LOC_LEN_ERR;
2677 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2678 return IB_WC_LOC_PROT_ERR;
2679 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2680 return IB_WC_LOC_QP_OP_ERR;
2681 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2682 return IB_WC_GENERAL_ERR;
2683 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2684 return IB_WC_WR_FLUSH_ERR;
2685 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2686 return IB_WC_WR_FLUSH_ERR;
2687 default:
2688 return IB_WC_GENERAL_ERR;
2689 }
2690 }
2691
2692 static u8 __rc_to_ib_wc_status(u8 qstatus)
2693 {
2694 switch (qstatus) {
2695 case CQ_RES_RC_STATUS_OK:
2696 return IB_WC_SUCCESS;
2697 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2698 return IB_WC_LOC_ACCESS_ERR;
2699 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2700 return IB_WC_LOC_LEN_ERR;
2701 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2702 return IB_WC_LOC_PROT_ERR;
2703 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2704 return IB_WC_LOC_QP_OP_ERR;
2705 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2706 return IB_WC_GENERAL_ERR;
2707 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2708 return IB_WC_REM_INV_REQ_ERR;
2709 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2710 return IB_WC_WR_FLUSH_ERR;
2711 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2712 return IB_WC_WR_FLUSH_ERR;
2713 default:
2714 return IB_WC_GENERAL_ERR;
2715 }
2716 }
2717
2718 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2719 {
2720 switch (cqe->type) {
2721 case BNXT_QPLIB_SWQE_TYPE_SEND:
2722 wc->opcode = IB_WC_SEND;
2723 break;
2724 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2725 wc->opcode = IB_WC_SEND;
2726 wc->wc_flags |= IB_WC_WITH_IMM;
2727 break;
2728 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2729 wc->opcode = IB_WC_SEND;
2730 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2731 break;
2732 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2733 wc->opcode = IB_WC_RDMA_WRITE;
2734 break;
2735 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2736 wc->opcode = IB_WC_RDMA_WRITE;
2737 wc->wc_flags |= IB_WC_WITH_IMM;
2738 break;
2739 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2740 wc->opcode = IB_WC_RDMA_READ;
2741 break;
2742 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2743 wc->opcode = IB_WC_COMP_SWAP;
2744 break;
2745 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2746 wc->opcode = IB_WC_FETCH_ADD;
2747 break;
2748 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2749 wc->opcode = IB_WC_LOCAL_INV;
2750 break;
2751 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2752 wc->opcode = IB_WC_REG_MR;
2753 break;
2754 default:
2755 wc->opcode = IB_WC_SEND;
2756 break;
2757 }
2758
2759 wc->status = __req_to_ib_wc_status(cqe->status);
2760 }
2761
2762 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2763 u16 raweth_qp1_flags2)
2764 {
2765 bool is_ipv6 = false, is_ipv4 = false;
2766
2767
2768 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2769 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2770 return -1;
2771
2772 if (raweth_qp1_flags2 &
2773 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2774 raweth_qp1_flags2 &
2775 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2776
2777 (raweth_qp1_flags2 &
2778 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2779 (is_ipv6 = true) : (is_ipv4 = true);
2780 return ((is_ipv6) ?
2781 BNXT_RE_ROCEV2_IPV6_PACKET :
2782 BNXT_RE_ROCEV2_IPV4_PACKET);
2783 } else {
2784 return BNXT_RE_ROCE_V1_PACKET;
2785 }
2786 }
2787
2788 static int bnxt_re_to_ib_nw_type(int nw_type)
2789 {
2790 u8 nw_hdr_type = 0xFF;
2791
2792 switch (nw_type) {
2793 case BNXT_RE_ROCE_V1_PACKET:
2794 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2795 break;
2796 case BNXT_RE_ROCEV2_IPV4_PACKET:
2797 nw_hdr_type = RDMA_NETWORK_IPV4;
2798 break;
2799 case BNXT_RE_ROCEV2_IPV6_PACKET:
2800 nw_hdr_type = RDMA_NETWORK_IPV6;
2801 break;
2802 }
2803 return nw_hdr_type;
2804 }
2805
2806 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2807 void *rq_hdr_buf)
2808 {
2809 u8 *tmp_buf = NULL;
2810 struct ethhdr *eth_hdr;
2811 u16 eth_type;
2812 bool rc = false;
2813
2814 tmp_buf = (u8 *)rq_hdr_buf;
2815
2816
2817
2818
2819
2820 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2821 tmp_buf += 4;
2822
2823 eth_hdr = (struct ethhdr *)tmp_buf;
2824 eth_type = ntohs(eth_hdr->h_proto);
2825 switch (eth_type) {
2826 case ETH_P_IBOE:
2827 rc = true;
2828 break;
2829 case ETH_P_IP:
2830 case ETH_P_IPV6: {
2831 u32 len;
2832 struct udphdr *udp_hdr;
2833
2834 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2835 sizeof(struct ipv6hdr));
2836 tmp_buf += sizeof(struct ethhdr) + len;
2837 udp_hdr = (struct udphdr *)tmp_buf;
2838 if (ntohs(udp_hdr->dest) ==
2839 ROCE_V2_UDP_DPORT)
2840 rc = true;
2841 break;
2842 }
2843 default:
2844 break;
2845 }
2846 }
2847
2848 return rc;
2849 }
2850
2851 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2852 struct bnxt_qplib_cqe *cqe)
2853 {
2854 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2855 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2856 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2857 struct ib_send_wr *swr;
2858 struct ib_ud_wr udwr;
2859 struct ib_recv_wr rwr;
2860 int pkt_type = 0;
2861 u32 tbl_idx;
2862 void *rq_hdr_buf;
2863 dma_addr_t rq_hdr_buf_map;
2864 dma_addr_t shrq_hdr_buf_map;
2865 u32 offset = 0;
2866 u32 skip_bytes = 0;
2867 struct ib_sge s_sge[2];
2868 struct ib_sge r_sge[2];
2869 int rc;
2870
2871 memset(&udwr, 0, sizeof(udwr));
2872 memset(&rwr, 0, sizeof(rwr));
2873 memset(&s_sge, 0, sizeof(s_sge));
2874 memset(&r_sge, 0, sizeof(r_sge));
2875
2876 swr = &udwr.wr;
2877 tbl_idx = cqe->wr_id;
2878
2879 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2880 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2881 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2882 tbl_idx);
2883
2884
2885 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2886 tbl_idx);
2887 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2888
2889
2890 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2891 sqp_entry->qp1_qp = qp1_qp;
2892
2893
2894
2895 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2896 cqe->raweth_qp1_flags2);
2897 if (pkt_type < 0) {
2898 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2899 return -EINVAL;
2900 }
2901
2902
2903
2904 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2905 offset = 20;
2906
2907
2908
2909
2910
2911 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2912 skip_bytes = 4;
2913
2914
2915 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2916 + skip_bytes;
2917 s_sge[0].lkey = 0xFFFFFFFF;
2918 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2919 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2920
2921
2922 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2923 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2924 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2925 s_sge[1].addr += 8;
2926 s_sge[1].lkey = 0xFFFFFFFF;
2927 s_sge[1].length = 256;
2928
2929
2930
2931 r_sge[0].addr = shrq_hdr_buf_map;
2932 r_sge[0].lkey = 0xFFFFFFFF;
2933 r_sge[0].length = 40;
2934
2935 r_sge[1].addr = sqp_entry->sge.addr + offset;
2936 r_sge[1].lkey = sqp_entry->sge.lkey;
2937 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2938
2939
2940 rwr.num_sge = 2;
2941 rwr.sg_list = r_sge;
2942 rwr.wr_id = tbl_idx;
2943 rwr.next = NULL;
2944
2945 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2946 if (rc) {
2947 dev_err(rdev_to_dev(rdev),
2948 "Failed to post Rx buffers to shadow QP");
2949 return -ENOMEM;
2950 }
2951
2952 swr->num_sge = 2;
2953 swr->sg_list = s_sge;
2954 swr->wr_id = tbl_idx;
2955 swr->opcode = IB_WR_SEND;
2956 swr->next = NULL;
2957
2958 udwr.ah = &rdev->sqp_ah->ib_ah;
2959 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2960 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2961
2962
2963 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2964
2965 return 0;
2966 }
2967
2968 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2969 struct bnxt_qplib_cqe *cqe)
2970 {
2971 wc->opcode = IB_WC_RECV;
2972 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2973 wc->wc_flags |= IB_WC_GRH;
2974 }
2975
2976 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
2977 u16 *vid, u8 *sl)
2978 {
2979 bool ret = false;
2980 u32 metadata;
2981 u16 tpid;
2982
2983 metadata = orig_cqe->raweth_qp1_metadata;
2984 if (orig_cqe->raweth_qp1_flags2 &
2985 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
2986 tpid = ((metadata &
2987 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
2988 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
2989 if (tpid == ETH_P_8021Q) {
2990 *vid = metadata &
2991 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
2992 *sl = (metadata &
2993 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
2994 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
2995 ret = true;
2996 }
2997 }
2998
2999 return ret;
3000 }
3001
3002 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3003 struct bnxt_qplib_cqe *cqe)
3004 {
3005 wc->opcode = IB_WC_RECV;
3006 wc->status = __rc_to_ib_wc_status(cqe->status);
3007
3008 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3009 wc->wc_flags |= IB_WC_WITH_IMM;
3010 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3011 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3012 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3013 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3014 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3015 }
3016
3017 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
3018 struct ib_wc *wc,
3019 struct bnxt_qplib_cqe *cqe)
3020 {
3021 struct bnxt_re_dev *rdev = qp->rdev;
3022 struct bnxt_re_qp *qp1_qp = NULL;
3023 struct bnxt_qplib_cqe *orig_cqe = NULL;
3024 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3025 int nw_type;
3026 u32 tbl_idx;
3027 u16 vlan_id;
3028 u8 sl;
3029
3030 tbl_idx = cqe->wr_id;
3031
3032 sqp_entry = &rdev->sqp_tbl[tbl_idx];
3033 qp1_qp = sqp_entry->qp1_qp;
3034 orig_cqe = &sqp_entry->cqe;
3035
3036 wc->wr_id = sqp_entry->wrid;
3037 wc->byte_len = orig_cqe->length;
3038 wc->qp = &qp1_qp->ib_qp;
3039
3040 wc->ex.imm_data = orig_cqe->immdata;
3041 wc->src_qp = orig_cqe->src_qp;
3042 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3043 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3044 wc->vlan_id = vlan_id;
3045 wc->sl = sl;
3046 wc->wc_flags |= IB_WC_WITH_VLAN;
3047 }
3048 wc->port_num = 1;
3049 wc->vendor_err = orig_cqe->status;
3050
3051 wc->opcode = IB_WC_RECV;
3052 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3053 wc->wc_flags |= IB_WC_GRH;
3054
3055 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3056 orig_cqe->raweth_qp1_flags2);
3057 if (nw_type >= 0) {
3058 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3059 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3060 }
3061 }
3062
3063 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3064 struct ib_wc *wc,
3065 struct bnxt_qplib_cqe *cqe)
3066 {
3067 u8 nw_type;
3068
3069 wc->opcode = IB_WC_RECV;
3070 wc->status = __rc_to_ib_wc_status(cqe->status);
3071
3072 if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3073 wc->wc_flags |= IB_WC_WITH_IMM;
3074
3075 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3076 wc->wc_flags |= IB_WC_GRH;
3077 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3078 wc->wc_flags |= IB_WC_WITH_SMAC;
3079 if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3080 wc->vlan_id = (cqe->cfa_meta & 0xFFF);
3081 if (wc->vlan_id < 0x1000)
3082 wc->wc_flags |= IB_WC_WITH_VLAN;
3083 }
3084 nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3085 CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3086 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3087 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3088 }
3089
3090 }
3091
3092 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3093 {
3094 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3095 unsigned long flags;
3096 int rc = 0;
3097
3098 spin_lock_irqsave(&qp->sq_lock, flags);
3099
3100 rc = bnxt_re_bind_fence_mw(lib_qp);
3101 if (!rc) {
3102 lib_qp->sq.phantom_wqe_cnt++;
3103 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
3104 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3105 lib_qp->id, lib_qp->sq.hwq.prod,
3106 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3107 lib_qp->sq.phantom_wqe_cnt);
3108 }
3109
3110 spin_unlock_irqrestore(&qp->sq_lock, flags);
3111 return rc;
3112 }
3113
3114 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3115 {
3116 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3117 struct bnxt_re_qp *qp;
3118 struct bnxt_qplib_cqe *cqe;
3119 int i, ncqe, budget;
3120 struct bnxt_qplib_q *sq;
3121 struct bnxt_qplib_qp *lib_qp;
3122 u32 tbl_idx;
3123 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3124 unsigned long flags;
3125
3126 spin_lock_irqsave(&cq->cq_lock, flags);
3127 budget = min_t(u32, num_entries, cq->max_cql);
3128 num_entries = budget;
3129 if (!cq->cql) {
3130 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
3131 goto exit;
3132 }
3133 cqe = &cq->cql[0];
3134 while (budget) {
3135 lib_qp = NULL;
3136 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3137 if (lib_qp) {
3138 sq = &lib_qp->sq;
3139 if (sq->send_phantom) {
3140 qp = container_of(lib_qp,
3141 struct bnxt_re_qp, qplib_qp);
3142 if (send_phantom_wqe(qp) == -ENOMEM)
3143 dev_err(rdev_to_dev(cq->rdev),
3144 "Phantom failed! Scheduled to send again\n");
3145 else
3146 sq->send_phantom = false;
3147 }
3148 }
3149 if (ncqe < budget)
3150 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3151 cqe + ncqe,
3152 budget - ncqe);
3153
3154 if (!ncqe)
3155 break;
3156
3157 for (i = 0; i < ncqe; i++, cqe++) {
3158
3159 memset(wc, 0, sizeof(*wc));
3160
3161 wc->wr_id = cqe->wr_id;
3162 wc->byte_len = cqe->length;
3163 qp = container_of
3164 ((struct bnxt_qplib_qp *)
3165 (unsigned long)(cqe->qp_handle),
3166 struct bnxt_re_qp, qplib_qp);
3167 if (!qp) {
3168 dev_err(rdev_to_dev(cq->rdev),
3169 "POLL CQ : bad QP handle");
3170 continue;
3171 }
3172 wc->qp = &qp->ib_qp;
3173 wc->ex.imm_data = cqe->immdata;
3174 wc->src_qp = cqe->src_qp;
3175 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3176 wc->port_num = 1;
3177 wc->vendor_err = cqe->status;
3178
3179 switch (cqe->opcode) {
3180 case CQ_BASE_CQE_TYPE_REQ:
3181 if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
3182 qp->rdev->qp1_sqp->qplib_qp.id) {
3183
3184
3185
3186 memset(wc, 0, sizeof(*wc));
3187 continue;
3188 }
3189 bnxt_re_process_req_wc(wc, cqe);
3190 break;
3191 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3192 if (!cqe->status) {
3193 int rc = 0;
3194
3195 rc = bnxt_re_process_raw_qp_pkt_rx
3196 (qp, cqe);
3197 if (!rc) {
3198 memset(wc, 0, sizeof(*wc));
3199 continue;
3200 }
3201 cqe->status = -1;
3202 }
3203
3204
3205
3206
3207 tbl_idx = cqe->wr_id;
3208 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
3209 wc->wr_id = sqp_entry->wrid;
3210 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3211 break;
3212 case CQ_BASE_CQE_TYPE_RES_RC:
3213 bnxt_re_process_res_rc_wc(wc, cqe);
3214 break;
3215 case CQ_BASE_CQE_TYPE_RES_UD:
3216 if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
3217 qp->rdev->qp1_sqp->qplib_qp.id) {
3218
3219
3220
3221 if (cqe->status) {
3222 continue;
3223 } else {
3224 bnxt_re_process_res_shadow_qp_wc
3225 (qp, wc, cqe);
3226 break;
3227 }
3228 }
3229 bnxt_re_process_res_ud_wc(qp, wc, cqe);
3230 break;
3231 default:
3232 dev_err(rdev_to_dev(cq->rdev),
3233 "POLL CQ : type 0x%x not handled",
3234 cqe->opcode);
3235 continue;
3236 }
3237 wc++;
3238 budget--;
3239 }
3240 }
3241 exit:
3242 spin_unlock_irqrestore(&cq->cq_lock, flags);
3243 return num_entries - budget;
3244 }
3245
3246 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3247 enum ib_cq_notify_flags ib_cqn_flags)
3248 {
3249 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3250 int type = 0, rc = 0;
3251 unsigned long flags;
3252
3253 spin_lock_irqsave(&cq->cq_lock, flags);
3254
3255 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3256 type = DBC_DBC_TYPE_CQ_ARMALL;
3257
3258 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3259 type = DBC_DBC_TYPE_CQ_ARMSE;
3260
3261
3262 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3263 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3264 rc = 1;
3265 goto exit;
3266 }
3267 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3268
3269 exit:
3270 spin_unlock_irqrestore(&cq->cq_lock, flags);
3271 return rc;
3272 }
3273
3274
3275 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3276 {
3277 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3278 struct bnxt_re_dev *rdev = pd->rdev;
3279 struct bnxt_re_mr *mr;
3280 u64 pbl = 0;
3281 int rc;
3282
3283 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3284 if (!mr)
3285 return ERR_PTR(-ENOMEM);
3286
3287 mr->rdev = rdev;
3288 mr->qplib_mr.pd = &pd->qplib_pd;
3289 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3290 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3291
3292
3293 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3294 if (rc)
3295 goto fail;
3296
3297 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3298 mr->qplib_mr.total_size = -1;
3299 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
3300 PAGE_SIZE);
3301 if (rc)
3302 goto fail_mr;
3303
3304 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3305 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3306 IB_ACCESS_REMOTE_ATOMIC))
3307 mr->ib_mr.rkey = mr->ib_mr.lkey;
3308 atomic_inc(&rdev->mr_count);
3309
3310 return &mr->ib_mr;
3311
3312 fail_mr:
3313 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3314 fail:
3315 kfree(mr);
3316 return ERR_PTR(rc);
3317 }
3318
3319 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3320 {
3321 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3322 struct bnxt_re_dev *rdev = mr->rdev;
3323 int rc;
3324
3325 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3326 if (rc) {
3327 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3328 return rc;
3329 }
3330
3331 if (mr->pages) {
3332 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3333 &mr->qplib_frpl);
3334 kfree(mr->pages);
3335 mr->npages = 0;
3336 mr->pages = NULL;
3337 }
3338 ib_umem_release(mr->ib_umem);
3339
3340 kfree(mr);
3341 atomic_dec(&rdev->mr_count);
3342 return rc;
3343 }
3344
3345 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3346 {
3347 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3348
3349 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3350 return -ENOMEM;
3351
3352 mr->pages[mr->npages++] = addr;
3353 return 0;
3354 }
3355
3356 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3357 unsigned int *sg_offset)
3358 {
3359 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3360
3361 mr->npages = 0;
3362 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3363 }
3364
3365 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3366 u32 max_num_sg, struct ib_udata *udata)
3367 {
3368 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3369 struct bnxt_re_dev *rdev = pd->rdev;
3370 struct bnxt_re_mr *mr = NULL;
3371 int rc;
3372
3373 if (type != IB_MR_TYPE_MEM_REG) {
3374 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3375 return ERR_PTR(-EINVAL);
3376 }
3377 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3378 return ERR_PTR(-EINVAL);
3379
3380 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3381 if (!mr)
3382 return ERR_PTR(-ENOMEM);
3383
3384 mr->rdev = rdev;
3385 mr->qplib_mr.pd = &pd->qplib_pd;
3386 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3387 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3388
3389 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3390 if (rc)
3391 goto bail;
3392
3393 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3394 mr->ib_mr.rkey = mr->ib_mr.lkey;
3395
3396 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3397 if (!mr->pages) {
3398 rc = -ENOMEM;
3399 goto fail;
3400 }
3401 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3402 &mr->qplib_frpl, max_num_sg);
3403 if (rc) {
3404 dev_err(rdev_to_dev(rdev),
3405 "Failed to allocate HW FR page list");
3406 goto fail_mr;
3407 }
3408
3409 atomic_inc(&rdev->mr_count);
3410 return &mr->ib_mr;
3411
3412 fail_mr:
3413 kfree(mr->pages);
3414 fail:
3415 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3416 bail:
3417 kfree(mr);
3418 return ERR_PTR(rc);
3419 }
3420
3421 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3422 struct ib_udata *udata)
3423 {
3424 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3425 struct bnxt_re_dev *rdev = pd->rdev;
3426 struct bnxt_re_mw *mw;
3427 int rc;
3428
3429 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3430 if (!mw)
3431 return ERR_PTR(-ENOMEM);
3432 mw->rdev = rdev;
3433 mw->qplib_mw.pd = &pd->qplib_pd;
3434
3435 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3436 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3437 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3438 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3439 if (rc) {
3440 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3441 goto fail;
3442 }
3443 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3444
3445 atomic_inc(&rdev->mw_count);
3446 return &mw->ib_mw;
3447
3448 fail:
3449 kfree(mw);
3450 return ERR_PTR(rc);
3451 }
3452
3453 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3454 {
3455 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3456 struct bnxt_re_dev *rdev = mw->rdev;
3457 int rc;
3458
3459 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3460 if (rc) {
3461 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3462 return rc;
3463 }
3464
3465 kfree(mw);
3466 atomic_dec(&rdev->mw_count);
3467 return rc;
3468 }
3469
3470 static int bnxt_re_page_size_ok(int page_shift)
3471 {
3472 switch (page_shift) {
3473 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
3474 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
3475 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
3476 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
3477 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
3478 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
3479 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
3480 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
3481 return 1;
3482 default:
3483 return 0;
3484 }
3485 }
3486
3487 static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
3488 int page_shift)
3489 {
3490 u64 *pbl_tbl = pbl_tbl_orig;
3491 u64 page_size = BIT_ULL(page_shift);
3492 struct ib_block_iter biter;
3493
3494 rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, page_size)
3495 *pbl_tbl++ = rdma_block_iter_dma_address(&biter);
3496
3497 return pbl_tbl - pbl_tbl_orig;
3498 }
3499
3500
3501 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3502 u64 virt_addr, int mr_access_flags,
3503 struct ib_udata *udata)
3504 {
3505 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3506 struct bnxt_re_dev *rdev = pd->rdev;
3507 struct bnxt_re_mr *mr;
3508 struct ib_umem *umem;
3509 u64 *pbl_tbl = NULL;
3510 int umem_pgs, page_shift, rc;
3511
3512 if (length > BNXT_RE_MAX_MR_SIZE) {
3513 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
3514 length, BNXT_RE_MAX_MR_SIZE);
3515 return ERR_PTR(-ENOMEM);
3516 }
3517
3518 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3519 if (!mr)
3520 return ERR_PTR(-ENOMEM);
3521
3522 mr->rdev = rdev;
3523 mr->qplib_mr.pd = &pd->qplib_pd;
3524 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3525 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3526
3527 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3528 if (rc) {
3529 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3530 goto free_mr;
3531 }
3532
3533 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3534
3535 umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
3536 if (IS_ERR(umem)) {
3537 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3538 rc = -EFAULT;
3539 goto free_mrw;
3540 }
3541 mr->ib_umem = umem;
3542
3543 mr->qplib_mr.va = virt_addr;
3544 umem_pgs = ib_umem_page_count(umem);
3545 if (!umem_pgs) {
3546 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3547 rc = -EINVAL;
3548 goto free_umem;
3549 }
3550 mr->qplib_mr.total_size = length;
3551
3552 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3553 if (!pbl_tbl) {
3554 rc = -ENOMEM;
3555 goto free_umem;
3556 }
3557
3558 page_shift = __ffs(ib_umem_find_best_pgsz(umem,
3559 BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M,
3560 virt_addr));
3561
3562 if (!bnxt_re_page_size_ok(page_shift)) {
3563 dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
3564 rc = -EFAULT;
3565 goto fail;
3566 }
3567
3568 if (page_shift == BNXT_RE_PAGE_SHIFT_4K &&
3569 length > BNXT_RE_MAX_MR_SIZE_LOW) {
3570 dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
3571 length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
3572 rc = -EINVAL;
3573 goto fail;
3574 }
3575
3576
3577 umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
3578 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
3579 umem_pgs, false, 1 << page_shift);
3580 if (rc) {
3581 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3582 goto fail;
3583 }
3584
3585 kfree(pbl_tbl);
3586
3587 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3588 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3589 atomic_inc(&rdev->mr_count);
3590
3591 return &mr->ib_mr;
3592 fail:
3593 kfree(pbl_tbl);
3594 free_umem:
3595 ib_umem_release(umem);
3596 free_mrw:
3597 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3598 free_mr:
3599 kfree(mr);
3600 return ERR_PTR(rc);
3601 }
3602
3603 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
3604 {
3605 struct ib_device *ibdev = ctx->device;
3606 struct bnxt_re_ucontext *uctx =
3607 container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
3608 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3609 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3610 struct bnxt_re_uctx_resp resp;
3611 u32 chip_met_rev_num = 0;
3612 int rc;
3613
3614 dev_dbg(rdev_to_dev(rdev), "ABI version requested %u",
3615 ibdev->ops.uverbs_abi_ver);
3616
3617 if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3618 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3619 BNXT_RE_ABI_VERSION);
3620 return -EPERM;
3621 }
3622
3623 uctx->rdev = rdev;
3624
3625 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3626 if (!uctx->shpg) {
3627 rc = -ENOMEM;
3628 goto fail;
3629 }
3630 spin_lock_init(&uctx->sh_lock);
3631
3632 resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
3633 chip_met_rev_num = rdev->chip_ctx.chip_num;
3634 chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_rev & 0xFF) <<
3635 BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
3636 chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_metal & 0xFF) <<
3637 BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
3638 resp.chip_id0 = chip_met_rev_num;
3639
3640 resp.chip_id1 = 0;
3641
3642 resp.dev_id = rdev->en_dev->pdev->devfn;
3643 resp.max_qp = rdev->qplib_ctx.qpc_count;
3644 resp.pg_size = PAGE_SIZE;
3645 resp.cqe_sz = sizeof(struct cq_base);
3646 resp.max_cqd = dev_attr->max_cq_wqes;
3647 resp.rsvd = 0;
3648
3649 rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
3650 if (rc) {
3651 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3652 rc = -EFAULT;
3653 goto cfail;
3654 }
3655
3656 return 0;
3657 cfail:
3658 free_page((unsigned long)uctx->shpg);
3659 uctx->shpg = NULL;
3660 fail:
3661 return rc;
3662 }
3663
3664 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3665 {
3666 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3667 struct bnxt_re_ucontext,
3668 ib_uctx);
3669
3670 struct bnxt_re_dev *rdev = uctx->rdev;
3671
3672 if (uctx->shpg)
3673 free_page((unsigned long)uctx->shpg);
3674
3675 if (uctx->dpi.dbr) {
3676
3677
3678
3679 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3680 &rdev->qplib_res.dpi_tbl, &uctx->dpi);
3681 uctx->dpi.dbr = NULL;
3682 }
3683 }
3684
3685
3686 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3687 {
3688 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3689 struct bnxt_re_ucontext,
3690 ib_uctx);
3691 struct bnxt_re_dev *rdev = uctx->rdev;
3692 u64 pfn;
3693
3694 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3695 return -EINVAL;
3696
3697 if (vma->vm_pgoff) {
3698 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3699 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3700 PAGE_SIZE, vma->vm_page_prot)) {
3701 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3702 return -EAGAIN;
3703 }
3704 } else {
3705 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3706 if (remap_pfn_range(vma, vma->vm_start,
3707 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3708 dev_err(rdev_to_dev(rdev),
3709 "Failed to map shared page");
3710 return -EAGAIN;
3711 }
3712 }
3713
3714 return 0;
3715 }