This source file includes following definitions.
- rvt_cq_enter
- send_complete
- rvt_create_cq
- rvt_destroy_cq
- rvt_req_notify_cq
- rvt_resize_cq
- rvt_poll_cq
- rvt_driver_cq_init
- rvt_cq_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include "cq.h"
51 #include "vt.h"
52 #include "trace.h"
53
54 static struct workqueue_struct *comp_vector_wq;
55
56
57
58
59
60
61
62
63
64
65
66
67 bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
68 {
69 struct ib_uverbs_wc *uqueue = NULL;
70 struct ib_wc *kqueue = NULL;
71 struct rvt_cq_wc *u_wc = NULL;
72 struct rvt_k_cq_wc *k_wc = NULL;
73 unsigned long flags;
74 u32 head;
75 u32 next;
76 u32 tail;
77
78 spin_lock_irqsave(&cq->lock, flags);
79
80 if (cq->ip) {
81 u_wc = cq->queue;
82 uqueue = &u_wc->uqueue[0];
83 head = RDMA_READ_UAPI_ATOMIC(u_wc->head);
84 tail = RDMA_READ_UAPI_ATOMIC(u_wc->tail);
85 } else {
86 k_wc = cq->kqueue;
87 kqueue = &k_wc->kqueue[0];
88 head = k_wc->head;
89 tail = k_wc->tail;
90 }
91
92
93
94
95
96 if (head >= (unsigned)cq->ibcq.cqe) {
97 head = cq->ibcq.cqe;
98 next = 0;
99 } else {
100 next = head + 1;
101 }
102
103 if (unlikely(next == tail || cq->cq_full)) {
104 struct rvt_dev_info *rdi = cq->rdi;
105
106 if (!cq->cq_full)
107 rvt_pr_err_ratelimited(rdi, "CQ is full!\n");
108 cq->cq_full = true;
109 spin_unlock_irqrestore(&cq->lock, flags);
110 if (cq->ibcq.event_handler) {
111 struct ib_event ev;
112
113 ev.device = cq->ibcq.device;
114 ev.element.cq = &cq->ibcq;
115 ev.event = IB_EVENT_CQ_ERR;
116 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
117 }
118 return false;
119 }
120 trace_rvt_cq_enter(cq, entry, head);
121 if (uqueue) {
122 uqueue[head].wr_id = entry->wr_id;
123 uqueue[head].status = entry->status;
124 uqueue[head].opcode = entry->opcode;
125 uqueue[head].vendor_err = entry->vendor_err;
126 uqueue[head].byte_len = entry->byte_len;
127 uqueue[head].ex.imm_data = entry->ex.imm_data;
128 uqueue[head].qp_num = entry->qp->qp_num;
129 uqueue[head].src_qp = entry->src_qp;
130 uqueue[head].wc_flags = entry->wc_flags;
131 uqueue[head].pkey_index = entry->pkey_index;
132 uqueue[head].slid = ib_lid_cpu16(entry->slid);
133 uqueue[head].sl = entry->sl;
134 uqueue[head].dlid_path_bits = entry->dlid_path_bits;
135 uqueue[head].port_num = entry->port_num;
136
137 RDMA_WRITE_UAPI_ATOMIC(u_wc->head, next);
138 } else {
139 kqueue[head] = *entry;
140 k_wc->head = next;
141 }
142
143 if (cq->notify == IB_CQ_NEXT_COMP ||
144 (cq->notify == IB_CQ_SOLICITED &&
145 (solicited || entry->status != IB_WC_SUCCESS))) {
146
147
148
149
150 cq->notify = RVT_CQ_NONE;
151 cq->triggered++;
152 queue_work_on(cq->comp_vector_cpu, comp_vector_wq,
153 &cq->comptask);
154 }
155
156 spin_unlock_irqrestore(&cq->lock, flags);
157 return true;
158 }
159 EXPORT_SYMBOL(rvt_cq_enter);
160
161 static void send_complete(struct work_struct *work)
162 {
163 struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
164
165
166
167
168
169
170
171
172 for (;;) {
173 u8 triggered = cq->triggered;
174
175
176
177
178
179
180
181 local_bh_disable();
182 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
183 local_bh_enable();
184
185 if (cq->triggered == triggered)
186 return;
187 }
188 }
189
190
191
192
193
194
195
196
197
198
199
200 int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
201 struct ib_udata *udata)
202 {
203 struct ib_device *ibdev = ibcq->device;
204 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
205 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
206 struct rvt_cq_wc *u_wc = NULL;
207 struct rvt_k_cq_wc *k_wc = NULL;
208 u32 sz;
209 unsigned int entries = attr->cqe;
210 int comp_vector = attr->comp_vector;
211 int err;
212
213 if (attr->flags)
214 return -EINVAL;
215
216 if (entries < 1 || entries > rdi->dparms.props.max_cqe)
217 return -EINVAL;
218
219 if (comp_vector < 0)
220 comp_vector = 0;
221
222 comp_vector = comp_vector % rdi->ibdev.num_comp_vectors;
223
224
225
226
227
228
229
230
231 if (udata && udata->outlen >= sizeof(__u64)) {
232 sz = sizeof(struct ib_uverbs_wc) * (entries + 1);
233 sz += sizeof(*u_wc);
234 u_wc = vmalloc_user(sz);
235 if (!u_wc)
236 return -ENOMEM;
237 } else {
238 sz = sizeof(struct ib_wc) * (entries + 1);
239 sz += sizeof(*k_wc);
240 k_wc = vzalloc_node(sz, rdi->dparms.node);
241 if (!k_wc)
242 return -ENOMEM;
243 }
244
245
246
247
248
249 if (udata && udata->outlen >= sizeof(__u64)) {
250 cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
251 if (IS_ERR(cq->ip)) {
252 err = PTR_ERR(cq->ip);
253 goto bail_wc;
254 }
255
256 err = ib_copy_to_udata(udata, &cq->ip->offset,
257 sizeof(cq->ip->offset));
258 if (err)
259 goto bail_ip;
260 }
261
262 spin_lock_irq(&rdi->n_cqs_lock);
263 if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
264 spin_unlock_irq(&rdi->n_cqs_lock);
265 err = -ENOMEM;
266 goto bail_ip;
267 }
268
269 rdi->n_cqs_allocated++;
270 spin_unlock_irq(&rdi->n_cqs_lock);
271
272 if (cq->ip) {
273 spin_lock_irq(&rdi->pending_lock);
274 list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
275 spin_unlock_irq(&rdi->pending_lock);
276 }
277
278
279
280
281
282
283 cq->rdi = rdi;
284 if (rdi->driver_f.comp_vect_cpu_lookup)
285 cq->comp_vector_cpu =
286 rdi->driver_f.comp_vect_cpu_lookup(rdi, comp_vector);
287 else
288 cq->comp_vector_cpu =
289 cpumask_first(cpumask_of_node(rdi->dparms.node));
290
291 cq->ibcq.cqe = entries;
292 cq->notify = RVT_CQ_NONE;
293 spin_lock_init(&cq->lock);
294 INIT_WORK(&cq->comptask, send_complete);
295 if (u_wc)
296 cq->queue = u_wc;
297 else
298 cq->kqueue = k_wc;
299
300 trace_rvt_create_cq(cq, attr);
301 return 0;
302
303 bail_ip:
304 kfree(cq->ip);
305 bail_wc:
306 vfree(u_wc);
307 vfree(k_wc);
308 return err;
309 }
310
311
312
313
314
315
316
317
318 void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
319 {
320 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
321 struct rvt_dev_info *rdi = cq->rdi;
322
323 flush_work(&cq->comptask);
324 spin_lock_irq(&rdi->n_cqs_lock);
325 rdi->n_cqs_allocated--;
326 spin_unlock_irq(&rdi->n_cqs_lock);
327 if (cq->ip)
328 kref_put(&cq->ip->ref, rvt_release_mmap_info);
329 else
330 vfree(cq->kqueue);
331 }
332
333
334
335
336
337
338
339
340
341
342
343 int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
344 {
345 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
346 unsigned long flags;
347 int ret = 0;
348
349 spin_lock_irqsave(&cq->lock, flags);
350
351
352
353
354 if (cq->notify != IB_CQ_NEXT_COMP)
355 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
356
357 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
358 if (cq->queue) {
359 if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) !=
360 RDMA_READ_UAPI_ATOMIC(cq->queue->tail))
361 ret = 1;
362 } else {
363 if (cq->kqueue->head != cq->kqueue->tail)
364 ret = 1;
365 }
366 }
367
368 spin_unlock_irqrestore(&cq->lock, flags);
369
370 return ret;
371 }
372
373
374
375
376
377
378
379 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
380 {
381 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
382 u32 head, tail, n;
383 int ret;
384 u32 sz;
385 struct rvt_dev_info *rdi = cq->rdi;
386 struct rvt_cq_wc *u_wc = NULL;
387 struct rvt_cq_wc *old_u_wc = NULL;
388 struct rvt_k_cq_wc *k_wc = NULL;
389 struct rvt_k_cq_wc *old_k_wc = NULL;
390
391 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
392 return -EINVAL;
393
394
395
396
397 if (udata && udata->outlen >= sizeof(__u64)) {
398 sz = sizeof(struct ib_uverbs_wc) * (cqe + 1);
399 sz += sizeof(*u_wc);
400 u_wc = vmalloc_user(sz);
401 if (!u_wc)
402 return -ENOMEM;
403 } else {
404 sz = sizeof(struct ib_wc) * (cqe + 1);
405 sz += sizeof(*k_wc);
406 k_wc = vzalloc_node(sz, rdi->dparms.node);
407 if (!k_wc)
408 return -ENOMEM;
409 }
410
411 if (udata && udata->outlen >= sizeof(__u64)) {
412 __u64 offset = 0;
413
414 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
415 if (ret)
416 goto bail_free;
417 }
418
419 spin_lock_irq(&cq->lock);
420
421
422
423
424 if (u_wc) {
425 old_u_wc = cq->queue;
426 head = RDMA_READ_UAPI_ATOMIC(old_u_wc->head);
427 tail = RDMA_READ_UAPI_ATOMIC(old_u_wc->tail);
428 } else {
429 old_k_wc = cq->kqueue;
430 head = old_k_wc->head;
431 tail = old_k_wc->tail;
432 }
433
434 if (head > (u32)cq->ibcq.cqe)
435 head = (u32)cq->ibcq.cqe;
436 if (tail > (u32)cq->ibcq.cqe)
437 tail = (u32)cq->ibcq.cqe;
438 if (head < tail)
439 n = cq->ibcq.cqe + 1 + head - tail;
440 else
441 n = head - tail;
442 if (unlikely((u32)cqe < n)) {
443 ret = -EINVAL;
444 goto bail_unlock;
445 }
446 for (n = 0; tail != head; n++) {
447 if (u_wc)
448 u_wc->uqueue[n] = old_u_wc->uqueue[tail];
449 else
450 k_wc->kqueue[n] = old_k_wc->kqueue[tail];
451 if (tail == (u32)cq->ibcq.cqe)
452 tail = 0;
453 else
454 tail++;
455 }
456 cq->ibcq.cqe = cqe;
457 if (u_wc) {
458 RDMA_WRITE_UAPI_ATOMIC(u_wc->head, n);
459 RDMA_WRITE_UAPI_ATOMIC(u_wc->tail, 0);
460 cq->queue = u_wc;
461 } else {
462 k_wc->head = n;
463 k_wc->tail = 0;
464 cq->kqueue = k_wc;
465 }
466 spin_unlock_irq(&cq->lock);
467
468 if (u_wc)
469 vfree(old_u_wc);
470 else
471 vfree(old_k_wc);
472
473 if (cq->ip) {
474 struct rvt_mmap_info *ip = cq->ip;
475
476 rvt_update_mmap_info(rdi, ip, sz, u_wc);
477
478
479
480
481
482 if (udata && udata->outlen >= sizeof(__u64)) {
483 ret = ib_copy_to_udata(udata, &ip->offset,
484 sizeof(ip->offset));
485 if (ret)
486 return ret;
487 }
488
489 spin_lock_irq(&rdi->pending_lock);
490 if (list_empty(&ip->pending_mmaps))
491 list_add(&ip->pending_mmaps, &rdi->pending_mmaps);
492 spin_unlock_irq(&rdi->pending_lock);
493 }
494
495 return 0;
496
497 bail_unlock:
498 spin_unlock_irq(&cq->lock);
499 bail_free:
500 vfree(u_wc);
501 vfree(k_wc);
502
503 return ret;
504 }
505
506
507
508
509
510
511
512
513
514
515
516
517 int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
518 {
519 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
520 struct rvt_k_cq_wc *wc;
521 unsigned long flags;
522 int npolled;
523 u32 tail;
524
525
526 if (cq->ip)
527 return -EINVAL;
528
529 spin_lock_irqsave(&cq->lock, flags);
530
531 wc = cq->kqueue;
532 tail = wc->tail;
533 if (tail > (u32)cq->ibcq.cqe)
534 tail = (u32)cq->ibcq.cqe;
535 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
536 if (tail == wc->head)
537 break;
538
539 trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled);
540 *entry = wc->kqueue[tail];
541 if (tail >= cq->ibcq.cqe)
542 tail = 0;
543 else
544 tail++;
545 }
546 wc->tail = tail;
547
548 spin_unlock_irqrestore(&cq->lock, flags);
549
550 return npolled;
551 }
552
553
554
555
556
557
558
559 int rvt_driver_cq_init(void)
560 {
561 comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE,
562 0, "rdmavt_cq");
563 if (!comp_vector_wq)
564 return -ENOMEM;
565
566 return 0;
567 }
568
569
570
571
572
573 void rvt_cq_exit(void)
574 {
575 destroy_workqueue(comp_vector_wq);
576 comp_vector_wq = NULL;
577 }