This source file includes following definitions.
- hns_roce_ib_cq_comp
- hns_roce_ib_cq_event
- hns_roce_sw2hw_cq
- hns_roce_cq_alloc
- hns_roce_hw2sw_cq
- hns_roce_free_cq
- hns_roce_ib_get_cq_umem
- hns_roce_ib_alloc_cq_buf
- hns_roce_ib_free_cq_buf
- create_user_cq
- create_kernel_cq
- destroy_user_cq
- destroy_kernel_cq
- hns_roce_ib_create_cq
- hns_roce_ib_destroy_cq
- hns_roce_cq_completion
- hns_roce_cq_event
- hns_roce_init_cq_table
- hns_roce_cleanup_cq_table
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/platform_device.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/uverbs_ioctl.h>
36 #include "hns_roce_device.h"
37 #include "hns_roce_cmd.h"
38 #include "hns_roce_hem.h"
39 #include <rdma/hns-abi.h>
40 #include "hns_roce_common.h"
41
42 static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
43 {
44 struct ib_cq *ibcq = &hr_cq->ib_cq;
45
46 ibcq->comp_handler(ibcq, ibcq->cq_context);
47 }
48
49 static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
50 enum hns_roce_event event_type)
51 {
52 struct hns_roce_dev *hr_dev;
53 struct ib_event event;
54 struct ib_cq *ibcq;
55
56 ibcq = &hr_cq->ib_cq;
57 hr_dev = to_hr_dev(ibcq->device);
58
59 if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
60 event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
61 event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
62 dev_err(hr_dev->dev,
63 "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
64 event_type, hr_cq->cqn);
65 return;
66 }
67
68 if (ibcq->event_handler) {
69 event.device = ibcq->device;
70 event.event = IB_EVENT_CQ_ERR;
71 event.element.cq = ibcq;
72 ibcq->event_handler(&event, ibcq->cq_context);
73 }
74 }
75
76 static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
77 struct hns_roce_cmd_mailbox *mailbox,
78 unsigned long cq_num)
79 {
80 return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
81 HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS);
82 }
83
84 static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
85 struct hns_roce_mtt *hr_mtt,
86 struct hns_roce_cq *hr_cq, int vector)
87 {
88 struct hns_roce_cmd_mailbox *mailbox;
89 struct hns_roce_hem_table *mtt_table;
90 struct hns_roce_cq_table *cq_table;
91 struct device *dev = hr_dev->dev;
92 dma_addr_t dma_handle;
93 u64 *mtts;
94 int ret;
95
96 cq_table = &hr_dev->cq_table;
97
98
99 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
100 mtt_table = &hr_dev->mr_table.mtt_cqe_table;
101 else
102 mtt_table = &hr_dev->mr_table.mtt_table;
103
104 mtts = hns_roce_table_find(hr_dev, mtt_table,
105 hr_mtt->first_seg, &dma_handle);
106 if (!mtts) {
107 dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
108 return -EINVAL;
109 }
110
111 if (vector >= hr_dev->caps.num_comp_vectors) {
112 dev_err(dev, "CQ alloc.Invalid vector.\n");
113 return -EINVAL;
114 }
115 hr_cq->vector = vector;
116
117 ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
118 if (ret == -1) {
119 dev_err(dev, "CQ alloc.Failed to alloc index.\n");
120 return -ENOMEM;
121 }
122
123
124 ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
125 if (ret) {
126 dev_err(dev, "CQ alloc.Failed to get context mem.\n");
127 goto err_out;
128 }
129
130 ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
131 if (ret) {
132 dev_err(dev, "CQ alloc failed xa_store.\n");
133 goto err_put;
134 }
135
136
137 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
138 if (IS_ERR(mailbox)) {
139 ret = PTR_ERR(mailbox);
140 goto err_xa;
141 }
142
143 hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
144 nent, vector);
145
146
147 ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
148 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
149 if (ret) {
150 dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
151 goto err_xa;
152 }
153
154 hr_cq->cons_index = 0;
155 hr_cq->arm_sn = 1;
156
157 atomic_set(&hr_cq->refcount, 1);
158 init_completion(&hr_cq->free);
159
160 return 0;
161
162 err_xa:
163 xa_erase(&cq_table->array, hr_cq->cqn);
164
165 err_put:
166 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
167
168 err_out:
169 hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
170 return ret;
171 }
172
173 static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
174 struct hns_roce_cmd_mailbox *mailbox,
175 unsigned long cq_num)
176 {
177 return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
178 mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
179 HNS_ROCE_CMD_TIMEOUT_MSECS);
180 }
181
182 void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
183 {
184 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
185 struct device *dev = hr_dev->dev;
186 int ret;
187
188 ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
189 if (ret)
190 dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
191 hr_cq->cqn);
192
193 xa_erase(&cq_table->array, hr_cq->cqn);
194
195
196 synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
197
198
199 if (atomic_dec_and_test(&hr_cq->refcount))
200 complete(&hr_cq->free);
201 wait_for_completion(&hr_cq->free);
202
203 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
204 hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
205 }
206
207 static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
208 struct ib_udata *udata,
209 struct hns_roce_cq_buf *buf,
210 struct ib_umem **umem, u64 buf_addr, int cqe)
211 {
212 int ret;
213 u32 page_shift;
214 u32 npages;
215
216 *umem = ib_umem_get(udata, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
217 IB_ACCESS_LOCAL_WRITE, 1);
218 if (IS_ERR(*umem))
219 return PTR_ERR(*umem);
220
221 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
222 buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
223 else
224 buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
225
226 if (hr_dev->caps.cqe_buf_pg_sz) {
227 npages = (ib_umem_page_count(*umem) +
228 (1 << hr_dev->caps.cqe_buf_pg_sz) - 1) /
229 (1 << hr_dev->caps.cqe_buf_pg_sz);
230 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
231 ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
232 &buf->hr_mtt);
233 } else {
234 ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
235 PAGE_SHIFT, &buf->hr_mtt);
236 }
237 if (ret)
238 goto err_buf;
239
240 ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
241 if (ret)
242 goto err_mtt;
243
244 return 0;
245
246 err_mtt:
247 hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
248
249 err_buf:
250 ib_umem_release(*umem);
251 return ret;
252 }
253
254 static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
255 struct hns_roce_cq_buf *buf, u32 nent)
256 {
257 int ret;
258 u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
259
260 ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
261 (1 << page_shift) * 2, &buf->hr_buf,
262 page_shift);
263 if (ret)
264 goto out;
265
266 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
267 buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
268 else
269 buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
270
271 ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
272 buf->hr_buf.page_shift, &buf->hr_mtt);
273 if (ret)
274 goto err_buf;
275
276 ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
277 if (ret)
278 goto err_mtt;
279
280 return 0;
281
282 err_mtt:
283 hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
284
285 err_buf:
286 hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
287 &buf->hr_buf);
288 out:
289 return ret;
290 }
291
292 static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
293 struct hns_roce_cq_buf *buf, int cqe)
294 {
295 hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
296 &buf->hr_buf);
297 }
298
299 static int create_user_cq(struct hns_roce_dev *hr_dev,
300 struct hns_roce_cq *hr_cq,
301 struct ib_udata *udata,
302 struct hns_roce_ib_create_cq_resp *resp,
303 int cq_entries)
304 {
305 struct hns_roce_ib_create_cq ucmd;
306 struct device *dev = hr_dev->dev;
307 int ret;
308 struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
309 udata, struct hns_roce_ucontext, ibucontext);
310
311 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
312 dev_err(dev, "Failed to copy_from_udata.\n");
313 return -EFAULT;
314 }
315
316
317 ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
318 &hr_cq->umem, ucmd.buf_addr,
319 cq_entries);
320 if (ret) {
321 dev_err(dev, "Failed to get_cq_umem.\n");
322 return ret;
323 }
324
325 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
326 (udata->outlen >= sizeof(*resp))) {
327 ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
328 &hr_cq->db);
329 if (ret) {
330 dev_err(dev, "cq record doorbell map failed!\n");
331 goto err_mtt;
332 }
333 hr_cq->db_en = 1;
334 resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
335 }
336
337 return 0;
338
339 err_mtt:
340 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
341 ib_umem_release(hr_cq->umem);
342
343 return ret;
344 }
345
346 static int create_kernel_cq(struct hns_roce_dev *hr_dev,
347 struct hns_roce_cq *hr_cq, int cq_entries)
348 {
349 struct device *dev = hr_dev->dev;
350 struct hns_roce_uar *uar;
351 int ret;
352
353 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
354 ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
355 if (ret)
356 return ret;
357
358 hr_cq->set_ci_db = hr_cq->db.db_record;
359 *hr_cq->set_ci_db = 0;
360 hr_cq->db_en = 1;
361 }
362
363
364 ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries);
365 if (ret) {
366 dev_err(dev, "Failed to alloc_cq_buf.\n");
367 goto err_db;
368 }
369
370 uar = &hr_dev->priv_uar;
371 hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
372 DB_REG_OFFSET * uar->index;
373
374 return 0;
375
376 err_db:
377 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
378 hns_roce_free_db(hr_dev, &hr_cq->db);
379
380 return ret;
381 }
382
383 static void destroy_user_cq(struct hns_roce_dev *hr_dev,
384 struct hns_roce_cq *hr_cq,
385 struct ib_udata *udata,
386 struct hns_roce_ib_create_cq_resp *resp)
387 {
388 struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
389 udata, struct hns_roce_ucontext, ibucontext);
390
391 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
392 (udata->outlen >= sizeof(*resp)))
393 hns_roce_db_unmap_user(context, &hr_cq->db);
394
395 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
396 ib_umem_release(hr_cq->umem);
397 }
398
399 static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
400 struct hns_roce_cq *hr_cq)
401 {
402 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
403 hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe);
404
405 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
406 hns_roce_free_db(hr_dev, &hr_cq->db);
407 }
408
409 int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
410 const struct ib_cq_init_attr *attr,
411 struct ib_udata *udata)
412 {
413 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
414 struct device *dev = hr_dev->dev;
415 struct hns_roce_ib_create_cq_resp resp = {};
416 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
417 int vector = attr->comp_vector;
418 int cq_entries = attr->cqe;
419 int ret;
420
421 if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
422 dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
423 cq_entries, hr_dev->caps.max_cqes);
424 return -EINVAL;
425 }
426
427 if (hr_dev->caps.min_cqes)
428 cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
429
430 cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
431 hr_cq->ib_cq.cqe = cq_entries - 1;
432 spin_lock_init(&hr_cq->lock);
433
434 if (udata) {
435 ret = create_user_cq(hr_dev, hr_cq, udata, &resp, cq_entries);
436 if (ret) {
437 dev_err(dev, "Create cq failed in user mode!\n");
438 goto err_cq;
439 }
440 } else {
441 ret = create_kernel_cq(hr_dev, hr_cq, cq_entries);
442 if (ret) {
443 dev_err(dev, "Create cq failed in kernel mode!\n");
444 goto err_cq;
445 }
446 }
447
448
449 ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
450 hr_cq, vector);
451 if (ret) {
452 dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
453 goto err_dbmap;
454 }
455
456
457
458
459
460
461
462 if (!udata && hr_cq->tptr_addr)
463 *hr_cq->tptr_addr = 0;
464
465
466 hr_cq->comp = hns_roce_ib_cq_comp;
467 hr_cq->event = hns_roce_ib_cq_event;
468 hr_cq->cq_depth = cq_entries;
469
470 if (udata) {
471 resp.cqn = hr_cq->cqn;
472 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
473 if (ret)
474 goto err_cqc;
475 }
476
477 return 0;
478
479 err_cqc:
480 hns_roce_free_cq(hr_dev, hr_cq);
481
482 err_dbmap:
483 if (udata)
484 destroy_user_cq(hr_dev, hr_cq, udata, &resp);
485 else
486 destroy_kernel_cq(hr_dev, hr_cq);
487
488 err_cq:
489 return ret;
490 }
491
492 void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
493 {
494 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
495 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
496
497 if (hr_dev->hw->destroy_cq) {
498 hr_dev->hw->destroy_cq(ib_cq, udata);
499 return;
500 }
501
502 hns_roce_free_cq(hr_dev, hr_cq);
503 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
504
505 ib_umem_release(hr_cq->umem);
506 if (udata) {
507 if (hr_cq->db_en == 1)
508 hns_roce_db_unmap_user(rdma_udata_to_drv_context(
509 udata,
510 struct hns_roce_ucontext,
511 ibucontext),
512 &hr_cq->db);
513 } else {
514
515 hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
516 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
517 hns_roce_free_db(hr_dev, &hr_cq->db);
518 }
519 }
520
521 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
522 {
523 struct device *dev = hr_dev->dev;
524 struct hns_roce_cq *cq;
525
526 cq = xa_load(&hr_dev->cq_table.array, cqn & (hr_dev->caps.num_cqs - 1));
527 if (!cq) {
528 dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
529 return;
530 }
531
532 ++cq->arm_sn;
533 cq->comp(cq);
534 }
535
536 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
537 {
538 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
539 struct device *dev = hr_dev->dev;
540 struct hns_roce_cq *cq;
541
542 cq = xa_load(&cq_table->array, cqn & (hr_dev->caps.num_cqs - 1));
543 if (cq)
544 atomic_inc(&cq->refcount);
545
546 if (!cq) {
547 dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
548 return;
549 }
550
551 cq->event(cq, (enum hns_roce_event)event_type);
552
553 if (atomic_dec_and_test(&cq->refcount))
554 complete(&cq->free);
555 }
556
557 int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
558 {
559 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
560
561 xa_init(&cq_table->array);
562
563 return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs,
564 hr_dev->caps.num_cqs - 1,
565 hr_dev->caps.reserved_cqs, 0);
566 }
567
568 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
569 {
570 hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap);
571 }