This source file includes following definitions.
- restart_sge
- qib_make_rc_ack
- qib_make_rc_req
- qib_send_rc_ack
- reset_psn
- qib_restart_rc
- reset_sending_psn
- qib_rc_send_complete
- update_last_psn
- do_rc_completion
- do_rc_ack
- rdma_seq_err
- qib_rc_rcv_resp
- qib_rc_rcv_error
- qib_update_ack_queue
- qib_rc_rcv
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #include <linux/io.h>
35
36 #include "qib.h"
37
38
39 #define OP(x) IB_OPCODE_RC_##x
40
41
42 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
43 u32 psn, u32 pmtu)
44 {
45 u32 len;
46
47 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
48 return rvt_restart_sge(ss, wqe, len);
49 }
50
51
52
53
54
55
56
57
58
59
60
61
62 static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
63 struct ib_other_headers *ohdr, u32 pmtu)
64 {
65 struct rvt_ack_entry *e;
66 u32 hwords;
67 u32 len;
68 u32 bth0;
69 u32 bth2;
70
71
72 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
73 goto bail;
74
75
76 hwords = 5;
77
78 switch (qp->s_ack_state) {
79 case OP(RDMA_READ_RESPONSE_LAST):
80 case OP(RDMA_READ_RESPONSE_ONLY):
81 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
82 if (e->rdma_sge.mr) {
83 rvt_put_mr(e->rdma_sge.mr);
84 e->rdma_sge.mr = NULL;
85 }
86
87 case OP(ATOMIC_ACKNOWLEDGE):
88
89
90
91
92
93 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
94 qp->s_tail_ack_queue = 0;
95
96 case OP(SEND_ONLY):
97 case OP(ACKNOWLEDGE):
98
99 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
100 if (qp->s_flags & RVT_S_ACK_PENDING)
101 goto normal;
102 goto bail;
103 }
104
105 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
106 if (e->opcode == OP(RDMA_READ_REQUEST)) {
107
108
109
110
111
112
113 len = e->rdma_sge.sge_length;
114 if (len && !e->rdma_sge.mr) {
115 qp->s_tail_ack_queue = qp->r_head_ack_queue;
116 goto bail;
117 }
118
119 qp->s_rdma_mr = e->rdma_sge.mr;
120 if (qp->s_rdma_mr)
121 rvt_get_mr(qp->s_rdma_mr);
122 qp->s_ack_rdma_sge.sge = e->rdma_sge;
123 qp->s_ack_rdma_sge.num_sge = 1;
124 qp->s_cur_sge = &qp->s_ack_rdma_sge;
125 if (len > pmtu) {
126 len = pmtu;
127 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
128 } else {
129 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
130 e->sent = 1;
131 }
132 ohdr->u.aeth = rvt_compute_aeth(qp);
133 hwords++;
134 qp->s_ack_rdma_psn = e->psn;
135 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
136 } else {
137
138 qp->s_cur_sge = NULL;
139 len = 0;
140 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
141 ohdr->u.at.aeth = rvt_compute_aeth(qp);
142 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
143 hwords += sizeof(ohdr->u.at) / sizeof(u32);
144 bth2 = e->psn & QIB_PSN_MASK;
145 e->sent = 1;
146 }
147 bth0 = qp->s_ack_state << 24;
148 break;
149
150 case OP(RDMA_READ_RESPONSE_FIRST):
151 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
152
153 case OP(RDMA_READ_RESPONSE_MIDDLE):
154 qp->s_cur_sge = &qp->s_ack_rdma_sge;
155 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
156 if (qp->s_rdma_mr)
157 rvt_get_mr(qp->s_rdma_mr);
158 len = qp->s_ack_rdma_sge.sge.sge_length;
159 if (len > pmtu)
160 len = pmtu;
161 else {
162 ohdr->u.aeth = rvt_compute_aeth(qp);
163 hwords++;
164 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
165 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
166 e->sent = 1;
167 }
168 bth0 = qp->s_ack_state << 24;
169 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
170 break;
171
172 default:
173 normal:
174
175
176
177
178
179
180 qp->s_ack_state = OP(SEND_ONLY);
181 qp->s_flags &= ~RVT_S_ACK_PENDING;
182 qp->s_cur_sge = NULL;
183 if (qp->s_nak_state)
184 ohdr->u.aeth =
185 cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
186 (qp->s_nak_state <<
187 IB_AETH_CREDIT_SHIFT));
188 else
189 ohdr->u.aeth = rvt_compute_aeth(qp);
190 hwords++;
191 len = 0;
192 bth0 = OP(ACKNOWLEDGE) << 24;
193 bth2 = qp->s_ack_psn & QIB_PSN_MASK;
194 }
195 qp->s_rdma_ack_cnt++;
196 qp->s_hdrwords = hwords;
197 qp->s_cur_size = len;
198 qib_make_ruc_header(qp, ohdr, bth0, bth2);
199 return 1;
200
201 bail:
202 qp->s_ack_state = OP(ACKNOWLEDGE);
203 qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING);
204 return 0;
205 }
206
207
208
209
210
211
212
213
214
215 int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
216 {
217 struct qib_qp_priv *priv = qp->priv;
218 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
219 struct ib_other_headers *ohdr;
220 struct rvt_sge_state *ss;
221 struct rvt_swqe *wqe;
222 u32 hwords;
223 u32 len;
224 u32 bth0;
225 u32 bth2;
226 u32 pmtu = qp->pmtu;
227 char newreq;
228 int ret = 0;
229 int delta;
230
231 ohdr = &priv->s_hdr->u.oth;
232 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
233 ohdr = &priv->s_hdr->u.l.oth;
234
235
236 if ((qp->s_flags & RVT_S_RESP_PENDING) &&
237 qib_make_rc_ack(dev, qp, ohdr, pmtu))
238 goto done;
239
240 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
241 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
242 goto bail;
243
244 if (qp->s_last == READ_ONCE(qp->s_head))
245 goto bail;
246
247 if (atomic_read(&priv->s_dma_busy)) {
248 qp->s_flags |= RVT_S_WAIT_DMA;
249 goto bail;
250 }
251 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
252 rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
253 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
254
255 goto done;
256 }
257
258 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
259 goto bail;
260
261 if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
262 if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
263 qp->s_flags |= RVT_S_WAIT_PSN;
264 goto bail;
265 }
266 qp->s_sending_psn = qp->s_psn;
267 qp->s_sending_hpsn = qp->s_psn - 1;
268 }
269
270
271 hwords = 5;
272 bth0 = 0;
273
274
275 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
276 switch (qp->s_state) {
277 default:
278 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
279 goto bail;
280
281
282
283
284
285
286
287 newreq = 0;
288 if (qp->s_cur == qp->s_tail) {
289
290 if (qp->s_tail == READ_ONCE(qp->s_head))
291 goto bail;
292
293
294
295
296 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
297 qp->s_num_rd_atomic) {
298 qp->s_flags |= RVT_S_WAIT_FENCE;
299 goto bail;
300 }
301 newreq = 1;
302 qp->s_psn = wqe->psn;
303 }
304
305
306
307
308
309 len = wqe->length;
310 ss = &qp->s_sge;
311 bth2 = qp->s_psn & QIB_PSN_MASK;
312 switch (wqe->wr.opcode) {
313 case IB_WR_SEND:
314 case IB_WR_SEND_WITH_IMM:
315
316 if (!rvt_rc_credit_avail(qp, wqe))
317 goto bail;
318 if (len > pmtu) {
319 qp->s_state = OP(SEND_FIRST);
320 len = pmtu;
321 break;
322 }
323 if (wqe->wr.opcode == IB_WR_SEND)
324 qp->s_state = OP(SEND_ONLY);
325 else {
326 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
327
328 ohdr->u.imm_data = wqe->wr.ex.imm_data;
329 hwords += 1;
330 }
331 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
332 bth0 |= IB_BTH_SOLICITED;
333 bth2 |= IB_BTH_REQ_ACK;
334 if (++qp->s_cur == qp->s_size)
335 qp->s_cur = 0;
336 break;
337
338 case IB_WR_RDMA_WRITE:
339 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
340 qp->s_lsn++;
341 goto no_flow_control;
342 case IB_WR_RDMA_WRITE_WITH_IMM:
343
344 if (!rvt_rc_credit_avail(qp, wqe))
345 goto bail;
346 no_flow_control:
347 ohdr->u.rc.reth.vaddr =
348 cpu_to_be64(wqe->rdma_wr.remote_addr);
349 ohdr->u.rc.reth.rkey =
350 cpu_to_be32(wqe->rdma_wr.rkey);
351 ohdr->u.rc.reth.length = cpu_to_be32(len);
352 hwords += sizeof(struct ib_reth) / sizeof(u32);
353 if (len > pmtu) {
354 qp->s_state = OP(RDMA_WRITE_FIRST);
355 len = pmtu;
356 break;
357 }
358 if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE)
359 qp->s_state = OP(RDMA_WRITE_ONLY);
360 else {
361 qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
362
363 ohdr->u.rc.imm_data =
364 wqe->rdma_wr.wr.ex.imm_data;
365 hwords += 1;
366 if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED)
367 bth0 |= IB_BTH_SOLICITED;
368 }
369 bth2 |= IB_BTH_REQ_ACK;
370 if (++qp->s_cur == qp->s_size)
371 qp->s_cur = 0;
372 break;
373
374 case IB_WR_RDMA_READ:
375
376
377
378
379 if (newreq) {
380 if (qp->s_num_rd_atomic >=
381 qp->s_max_rd_atomic) {
382 qp->s_flags |= RVT_S_WAIT_RDMAR;
383 goto bail;
384 }
385 qp->s_num_rd_atomic++;
386 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
387 qp->s_lsn++;
388 }
389
390 ohdr->u.rc.reth.vaddr =
391 cpu_to_be64(wqe->rdma_wr.remote_addr);
392 ohdr->u.rc.reth.rkey =
393 cpu_to_be32(wqe->rdma_wr.rkey);
394 ohdr->u.rc.reth.length = cpu_to_be32(len);
395 qp->s_state = OP(RDMA_READ_REQUEST);
396 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
397 ss = NULL;
398 len = 0;
399 bth2 |= IB_BTH_REQ_ACK;
400 if (++qp->s_cur == qp->s_size)
401 qp->s_cur = 0;
402 break;
403
404 case IB_WR_ATOMIC_CMP_AND_SWP:
405 case IB_WR_ATOMIC_FETCH_AND_ADD:
406
407
408
409
410 if (newreq) {
411 if (qp->s_num_rd_atomic >=
412 qp->s_max_rd_atomic) {
413 qp->s_flags |= RVT_S_WAIT_RDMAR;
414 goto bail;
415 }
416 qp->s_num_rd_atomic++;
417 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
418 qp->s_lsn++;
419 }
420 if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
421 qp->s_state = OP(COMPARE_SWAP);
422 put_ib_ateth_swap(wqe->atomic_wr.swap,
423 &ohdr->u.atomic_eth);
424 put_ib_ateth_compare(wqe->atomic_wr.compare_add,
425 &ohdr->u.atomic_eth);
426 } else {
427 qp->s_state = OP(FETCH_ADD);
428 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
429 &ohdr->u.atomic_eth);
430 put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
431 }
432 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
433 &ohdr->u.atomic_eth);
434 ohdr->u.atomic_eth.rkey = cpu_to_be32(
435 wqe->atomic_wr.rkey);
436 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
437 ss = NULL;
438 len = 0;
439 bth2 |= IB_BTH_REQ_ACK;
440 if (++qp->s_cur == qp->s_size)
441 qp->s_cur = 0;
442 break;
443
444 default:
445 goto bail;
446 }
447 qp->s_sge.sge = wqe->sg_list[0];
448 qp->s_sge.sg_list = wqe->sg_list + 1;
449 qp->s_sge.num_sge = wqe->wr.num_sge;
450 qp->s_sge.total_len = wqe->length;
451 qp->s_len = wqe->length;
452 if (newreq) {
453 qp->s_tail++;
454 if (qp->s_tail >= qp->s_size)
455 qp->s_tail = 0;
456 }
457 if (wqe->wr.opcode == IB_WR_RDMA_READ)
458 qp->s_psn = wqe->lpsn + 1;
459 else
460 qp->s_psn++;
461 break;
462
463 case OP(RDMA_READ_RESPONSE_FIRST):
464
465
466
467
468
469
470
471
472
473 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
474
475 case OP(SEND_FIRST):
476 qp->s_state = OP(SEND_MIDDLE);
477
478 case OP(SEND_MIDDLE):
479 bth2 = qp->s_psn++ & QIB_PSN_MASK;
480 ss = &qp->s_sge;
481 len = qp->s_len;
482 if (len > pmtu) {
483 len = pmtu;
484 break;
485 }
486 if (wqe->wr.opcode == IB_WR_SEND)
487 qp->s_state = OP(SEND_LAST);
488 else {
489 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
490
491 ohdr->u.imm_data = wqe->wr.ex.imm_data;
492 hwords += 1;
493 }
494 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
495 bth0 |= IB_BTH_SOLICITED;
496 bth2 |= IB_BTH_REQ_ACK;
497 qp->s_cur++;
498 if (qp->s_cur >= qp->s_size)
499 qp->s_cur = 0;
500 break;
501
502 case OP(RDMA_READ_RESPONSE_LAST):
503
504
505
506
507
508
509
510
511
512 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
513
514 case OP(RDMA_WRITE_FIRST):
515 qp->s_state = OP(RDMA_WRITE_MIDDLE);
516
517 case OP(RDMA_WRITE_MIDDLE):
518 bth2 = qp->s_psn++ & QIB_PSN_MASK;
519 ss = &qp->s_sge;
520 len = qp->s_len;
521 if (len > pmtu) {
522 len = pmtu;
523 break;
524 }
525 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
526 qp->s_state = OP(RDMA_WRITE_LAST);
527 else {
528 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
529
530 ohdr->u.imm_data = wqe->wr.ex.imm_data;
531 hwords += 1;
532 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
533 bth0 |= IB_BTH_SOLICITED;
534 }
535 bth2 |= IB_BTH_REQ_ACK;
536 qp->s_cur++;
537 if (qp->s_cur >= qp->s_size)
538 qp->s_cur = 0;
539 break;
540
541 case OP(RDMA_READ_RESPONSE_MIDDLE):
542
543
544
545
546
547
548
549
550
551 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
552 ohdr->u.rc.reth.vaddr =
553 cpu_to_be64(wqe->rdma_wr.remote_addr + len);
554 ohdr->u.rc.reth.rkey =
555 cpu_to_be32(wqe->rdma_wr.rkey);
556 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
557 qp->s_state = OP(RDMA_READ_REQUEST);
558 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
559 bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
560 qp->s_psn = wqe->lpsn + 1;
561 ss = NULL;
562 len = 0;
563 qp->s_cur++;
564 if (qp->s_cur == qp->s_size)
565 qp->s_cur = 0;
566 break;
567 }
568 qp->s_sending_hpsn = bth2;
569 delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
570 if (delta && delta % QIB_PSN_CREDIT == 0)
571 bth2 |= IB_BTH_REQ_ACK;
572 if (qp->s_flags & RVT_S_SEND_ONE) {
573 qp->s_flags &= ~RVT_S_SEND_ONE;
574 qp->s_flags |= RVT_S_WAIT_ACK;
575 bth2 |= IB_BTH_REQ_ACK;
576 }
577 qp->s_len -= len;
578 qp->s_hdrwords = hwords;
579 qp->s_cur_sge = ss;
580 qp->s_cur_size = len;
581 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
582 done:
583 return 1;
584 bail:
585 qp->s_flags &= ~RVT_S_BUSY;
586 return ret;
587 }
588
589
590
591
592
593
594
595
596
597 void qib_send_rc_ack(struct rvt_qp *qp)
598 {
599 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
600 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
601 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
602 u64 pbc;
603 u16 lrh0;
604 u32 bth0;
605 u32 hwords;
606 u32 pbufn;
607 u32 __iomem *piobuf;
608 struct ib_header hdr;
609 struct ib_other_headers *ohdr;
610 u32 control;
611 unsigned long flags;
612
613 spin_lock_irqsave(&qp->s_lock, flags);
614
615 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
616 goto unlock;
617
618
619 if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
620 goto queue_ack;
621
622
623 ohdr = &hdr.u.oth;
624 lrh0 = QIB_LRH_BTH;
625
626 hwords = 6;
627 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
628 IB_AH_GRH)) {
629 hwords += qib_make_grh(ibp, &hdr.u.l.grh,
630 rdma_ah_read_grh(&qp->remote_ah_attr),
631 hwords, 0);
632 ohdr = &hdr.u.l.oth;
633 lrh0 = QIB_LRH_GRH;
634 }
635
636 bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
637 if (qp->s_mig_state == IB_MIG_MIGRATED)
638 bth0 |= IB_BTH_MIG_REQ;
639 if (qp->r_nak_state)
640 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
641 (qp->r_nak_state <<
642 IB_AETH_CREDIT_SHIFT));
643 else
644 ohdr->u.aeth = rvt_compute_aeth(qp);
645 lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(&qp->remote_ah_attr)] << 12 |
646 rdma_ah_get_sl(&qp->remote_ah_attr) << 4;
647 hdr.lrh[0] = cpu_to_be16(lrh0);
648 hdr.lrh[1] = cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
649 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
650 hdr.lrh[3] = cpu_to_be16(ppd->lid |
651 rdma_ah_get_path_bits(&qp->remote_ah_attr));
652 ohdr->bth[0] = cpu_to_be32(bth0);
653 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
654 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
655
656 spin_unlock_irqrestore(&qp->s_lock, flags);
657
658
659 if (!(ppd->lflags & QIBL_LINKACTIVE))
660 goto done;
661
662 control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
663 qp->s_srate, lrh0 >> 12);
664
665 pbc = ((u64) control << 32) | (hwords + 1);
666
667 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
668 if (!piobuf) {
669
670
671
672
673
674
675
676 spin_lock_irqsave(&qp->s_lock, flags);
677 goto queue_ack;
678 }
679
680
681
682
683
684
685 writeq(pbc, piobuf);
686
687 if (dd->flags & QIB_PIO_FLUSH_WC) {
688 u32 *hdrp = (u32 *) &hdr;
689
690 qib_flush_wc();
691 qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
692 qib_flush_wc();
693 __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
694 } else
695 qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
696
697 if (dd->flags & QIB_USE_SPCL_TRIG) {
698 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
699
700 qib_flush_wc();
701 __raw_writel(0xaebecede, piobuf + spcl_off);
702 }
703
704 qib_flush_wc();
705 qib_sendbuf_done(dd, pbufn);
706
707 this_cpu_inc(ibp->pmastats->n_unicast_xmit);
708 goto done;
709
710 queue_ack:
711 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
712 this_cpu_inc(*ibp->rvp.rc_qacks);
713 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
714 qp->s_nak_state = qp->r_nak_state;
715 qp->s_ack_psn = qp->r_ack_psn;
716
717
718 qib_schedule_send(qp);
719 }
720 unlock:
721 spin_unlock_irqrestore(&qp->s_lock, flags);
722 done:
723 return;
724 }
725
726
727
728
729
730
731
732
733
734
735 static void reset_psn(struct rvt_qp *qp, u32 psn)
736 {
737 u32 n = qp->s_acked;
738 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
739 u32 opcode;
740
741 qp->s_cur = n;
742
743
744
745
746
747 if (qib_cmp24(psn, wqe->psn) <= 0) {
748 qp->s_state = OP(SEND_LAST);
749 goto done;
750 }
751
752
753 opcode = wqe->wr.opcode;
754 for (;;) {
755 int diff;
756
757 if (++n == qp->s_size)
758 n = 0;
759 if (n == qp->s_tail)
760 break;
761 wqe = rvt_get_swqe_ptr(qp, n);
762 diff = qib_cmp24(psn, wqe->psn);
763 if (diff < 0)
764 break;
765 qp->s_cur = n;
766
767
768
769
770 if (diff == 0) {
771 qp->s_state = OP(SEND_LAST);
772 goto done;
773 }
774 opcode = wqe->wr.opcode;
775 }
776
777
778
779
780
781
782 switch (opcode) {
783 case IB_WR_SEND:
784 case IB_WR_SEND_WITH_IMM:
785 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
786 break;
787
788 case IB_WR_RDMA_WRITE:
789 case IB_WR_RDMA_WRITE_WITH_IMM:
790 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
791 break;
792
793 case IB_WR_RDMA_READ:
794 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
795 break;
796
797 default:
798
799
800
801
802 qp->s_state = OP(SEND_LAST);
803 }
804 done:
805 qp->s_psn = psn;
806
807
808
809
810
811 if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
812 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
813 qp->s_flags |= RVT_S_WAIT_PSN;
814 }
815
816
817
818
819
820 void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
821 {
822 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
823 struct qib_ibport *ibp;
824
825 if (qp->s_retry == 0) {
826 if (qp->s_mig_state == IB_MIG_ARMED) {
827 qib_migrate_qp(qp);
828 qp->s_retry = qp->s_retry_cnt;
829 } else if (qp->s_last == qp->s_acked) {
830 rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
831 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
832 return;
833 } else
834 return;
835 } else
836 qp->s_retry--;
837
838 ibp = to_iport(qp->ibqp.device, qp->port_num);
839 if (wqe->wr.opcode == IB_WR_RDMA_READ)
840 ibp->rvp.n_rc_resends++;
841 else
842 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
843
844 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
845 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
846 RVT_S_WAIT_ACK);
847 if (wait)
848 qp->s_flags |= RVT_S_SEND_ONE;
849 reset_psn(qp, psn);
850 }
851
852
853
854
855
856 static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
857 {
858 struct rvt_swqe *wqe;
859 u32 n = qp->s_last;
860
861
862 for (;;) {
863 wqe = rvt_get_swqe_ptr(qp, n);
864 if (qib_cmp24(psn, wqe->lpsn) <= 0) {
865 if (wqe->wr.opcode == IB_WR_RDMA_READ)
866 qp->s_sending_psn = wqe->lpsn + 1;
867 else
868 qp->s_sending_psn = psn + 1;
869 break;
870 }
871 if (++n == qp->s_size)
872 n = 0;
873 if (n == qp->s_tail)
874 break;
875 }
876 }
877
878
879
880
881 void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
882 {
883 struct ib_other_headers *ohdr;
884 struct rvt_swqe *wqe;
885 u32 opcode;
886 u32 psn;
887
888 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
889 return;
890
891
892 if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
893 ohdr = &hdr->u.oth;
894 else
895 ohdr = &hdr->u.l.oth;
896
897 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
898 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
899 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
900 WARN_ON(!qp->s_rdma_ack_cnt);
901 qp->s_rdma_ack_cnt--;
902 return;
903 }
904
905 psn = be32_to_cpu(ohdr->bth[2]);
906 reset_sending_psn(qp, psn);
907
908
909
910
911
912 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
913 !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
914 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
915 rvt_add_retry_timer(qp);
916
917 while (qp->s_last != qp->s_acked) {
918 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
919 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
920 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
921 break;
922 rvt_qp_complete_swqe(qp,
923 wqe,
924 ib_qib_wc_opcode[wqe->wr.opcode],
925 IB_WC_SUCCESS);
926 }
927
928
929
930
931 if (qp->s_flags & RVT_S_WAIT_PSN &&
932 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
933 qp->s_flags &= ~RVT_S_WAIT_PSN;
934 qp->s_sending_psn = qp->s_psn;
935 qp->s_sending_hpsn = qp->s_psn - 1;
936 qib_schedule_send(qp);
937 }
938 }
939
940 static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
941 {
942 qp->s_last_psn = psn;
943 }
944
945
946
947
948
949
950 static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
951 struct rvt_swqe *wqe,
952 struct qib_ibport *ibp)
953 {
954
955
956
957
958
959 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
960 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0)
961 rvt_qp_complete_swqe(qp,
962 wqe,
963 ib_qib_wc_opcode[wqe->wr.opcode],
964 IB_WC_SUCCESS);
965 else
966 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
967
968 qp->s_retry = qp->s_retry_cnt;
969 update_last_psn(qp, wqe->lpsn);
970
971
972
973
974
975
976 if (qp->s_acked == qp->s_cur) {
977 if (++qp->s_cur >= qp->s_size)
978 qp->s_cur = 0;
979 qp->s_acked = qp->s_cur;
980 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
981 if (qp->s_acked != qp->s_tail) {
982 qp->s_state = OP(SEND_LAST);
983 qp->s_psn = wqe->psn;
984 }
985 } else {
986 if (++qp->s_acked >= qp->s_size)
987 qp->s_acked = 0;
988 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
989 qp->s_draining = 0;
990 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
991 }
992 return wqe;
993 }
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006 static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1007 u64 val, struct qib_ctxtdata *rcd)
1008 {
1009 struct qib_ibport *ibp;
1010 enum ib_wc_status status;
1011 struct rvt_swqe *wqe;
1012 int ret = 0;
1013 u32 ack_psn;
1014 int diff;
1015
1016
1017
1018
1019
1020
1021
1022 ack_psn = psn;
1023 if (aeth >> IB_AETH_NAK_SHIFT)
1024 ack_psn--;
1025 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1026 ibp = to_iport(qp->ibqp.device, qp->port_num);
1027
1028
1029
1030
1031
1032 while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
1033
1034
1035
1036
1037
1038
1039 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1040 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1041 diff == 0) {
1042 ret = 1;
1043 goto bail;
1044 }
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1055 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1056 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1057 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1058 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1059
1060 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1061 qp->r_flags |= RVT_R_RDMAR_SEQ;
1062 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1063 if (list_empty(&qp->rspwait)) {
1064 qp->r_flags |= RVT_R_RSP_SEND;
1065 rvt_get_qp(qp);
1066 list_add_tail(&qp->rspwait,
1067 &rcd->qp_wait_list);
1068 }
1069 }
1070
1071
1072
1073
1074 goto bail;
1075 }
1076 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1077 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1078 u64 *vaddr = wqe->sg_list[0].vaddr;
1079 *vaddr = val;
1080 }
1081 if (qp->s_num_rd_atomic &&
1082 (wqe->wr.opcode == IB_WR_RDMA_READ ||
1083 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1084 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1085 qp->s_num_rd_atomic--;
1086
1087 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
1088 !qp->s_num_rd_atomic) {
1089 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
1090 RVT_S_WAIT_ACK);
1091 qib_schedule_send(qp);
1092 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
1093 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
1094 RVT_S_WAIT_ACK);
1095 qib_schedule_send(qp);
1096 }
1097 }
1098 wqe = do_rc_completion(qp, wqe, ibp);
1099 if (qp->s_acked == qp->s_tail)
1100 break;
1101 }
1102
1103 switch (aeth >> IB_AETH_NAK_SHIFT) {
1104 case 0:
1105 this_cpu_inc(*ibp->rvp.rc_acks);
1106 if (qp->s_acked != qp->s_tail) {
1107
1108
1109
1110
1111 rvt_mod_retry_timer(qp);
1112
1113
1114
1115
1116 if (qib_cmp24(qp->s_psn, psn) <= 0)
1117 reset_psn(qp, psn + 1);
1118 } else {
1119
1120 rvt_stop_rc_timers(qp);
1121 if (qib_cmp24(qp->s_psn, psn) <= 0) {
1122 qp->s_state = OP(SEND_LAST);
1123 qp->s_psn = psn + 1;
1124 }
1125 }
1126 if (qp->s_flags & RVT_S_WAIT_ACK) {
1127 qp->s_flags &= ~RVT_S_WAIT_ACK;
1128 qib_schedule_send(qp);
1129 }
1130 rvt_get_credit(qp, aeth);
1131 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1132 qp->s_retry = qp->s_retry_cnt;
1133 update_last_psn(qp, psn);
1134 return 1;
1135
1136 case 1:
1137 ibp->rvp.n_rnr_naks++;
1138 if (qp->s_acked == qp->s_tail)
1139 goto bail;
1140 if (qp->s_flags & RVT_S_WAIT_RNR)
1141 goto bail;
1142 if (qp->s_rnr_retry == 0) {
1143 status = IB_WC_RNR_RETRY_EXC_ERR;
1144 goto class_b;
1145 }
1146 if (qp->s_rnr_retry_cnt < 7)
1147 qp->s_rnr_retry--;
1148
1149
1150 update_last_psn(qp, psn - 1);
1151
1152 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
1153
1154 reset_psn(qp, psn);
1155
1156 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
1157 rvt_stop_rc_timers(qp);
1158 rvt_add_rnr_timer(qp, aeth);
1159 return 0;
1160
1161 case 3:
1162 if (qp->s_acked == qp->s_tail)
1163 goto bail;
1164
1165 update_last_psn(qp, psn - 1);
1166 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
1167 IB_AETH_CREDIT_MASK) {
1168 case 0:
1169 ibp->rvp.n_seq_naks++;
1170
1171
1172
1173
1174
1175
1176 qib_restart_rc(qp, psn, 0);
1177 qib_schedule_send(qp);
1178 break;
1179
1180 case 1:
1181 status = IB_WC_REM_INV_REQ_ERR;
1182 ibp->rvp.n_other_naks++;
1183 goto class_b;
1184
1185 case 2:
1186 status = IB_WC_REM_ACCESS_ERR;
1187 ibp->rvp.n_other_naks++;
1188 goto class_b;
1189
1190 case 3:
1191 status = IB_WC_REM_OP_ERR;
1192 ibp->rvp.n_other_naks++;
1193 class_b:
1194 if (qp->s_last == qp->s_acked) {
1195 rvt_send_complete(qp, wqe, status);
1196 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1197 }
1198 break;
1199
1200 default:
1201
1202 goto reserved;
1203 }
1204 qp->s_retry = qp->s_retry_cnt;
1205 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1206 goto bail;
1207
1208 default:
1209 reserved:
1210
1211 goto bail;
1212 }
1213
1214 bail:
1215 rvt_stop_rc_timers(qp);
1216 return ret;
1217 }
1218
1219
1220
1221
1222
1223 static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
1224 struct qib_ctxtdata *rcd)
1225 {
1226 struct rvt_swqe *wqe;
1227
1228
1229 rvt_stop_rc_timers(qp);
1230
1231 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1232
1233 while (qib_cmp24(psn, wqe->lpsn) > 0) {
1234 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1235 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1236 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1237 break;
1238 wqe = do_rc_completion(qp, wqe, ibp);
1239 }
1240
1241 ibp->rvp.n_rdma_seq++;
1242 qp->r_flags |= RVT_R_RDMAR_SEQ;
1243 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1244 if (list_empty(&qp->rspwait)) {
1245 qp->r_flags |= RVT_R_RSP_SEND;
1246 rvt_get_qp(qp);
1247 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1248 }
1249 }
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267 static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1268 struct ib_other_headers *ohdr,
1269 void *data, u32 tlen,
1270 struct rvt_qp *qp,
1271 u32 opcode,
1272 u32 psn, u32 hdrsize, u32 pmtu,
1273 struct qib_ctxtdata *rcd)
1274 {
1275 struct rvt_swqe *wqe;
1276 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1277 enum ib_wc_status status;
1278 unsigned long flags;
1279 int diff;
1280 u32 pad;
1281 u32 aeth;
1282 u64 val;
1283
1284 if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
1285
1286
1287
1288
1289 if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
1290 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
1291
1292
1293
1294
1295
1296 if (!(qp->s_flags & RVT_S_BUSY)) {
1297
1298 spin_lock_irqsave(&ppd->sdma_lock, flags);
1299
1300 qib_sdma_make_progress(ppd);
1301
1302 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1303 }
1304 }
1305 }
1306
1307 spin_lock_irqsave(&qp->s_lock, flags);
1308 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1309 goto ack_done;
1310
1311
1312 if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
1313 goto ack_done;
1314
1315
1316 diff = qib_cmp24(psn, qp->s_last_psn);
1317 if (unlikely(diff <= 0)) {
1318
1319 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1320 aeth = be32_to_cpu(ohdr->u.aeth);
1321 if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
1322 rvt_get_credit(qp, aeth);
1323 }
1324 goto ack_done;
1325 }
1326
1327
1328
1329
1330
1331 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
1332 if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
1333 goto ack_done;
1334 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
1335 }
1336
1337 if (unlikely(qp->s_acked == qp->s_tail))
1338 goto ack_done;
1339 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1340 status = IB_WC_SUCCESS;
1341
1342 switch (opcode) {
1343 case OP(ACKNOWLEDGE):
1344 case OP(ATOMIC_ACKNOWLEDGE):
1345 case OP(RDMA_READ_RESPONSE_FIRST):
1346 aeth = be32_to_cpu(ohdr->u.aeth);
1347 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1348 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
1349 else
1350 val = 0;
1351 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1352 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1353 goto ack_done;
1354 hdrsize += 4;
1355 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1356 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1357 goto ack_op_err;
1358
1359
1360
1361
1362
1363 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1364 wqe, psn, pmtu);
1365 goto read_middle;
1366
1367 case OP(RDMA_READ_RESPONSE_MIDDLE):
1368
1369 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1370 goto ack_seq_err;
1371 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1372 goto ack_op_err;
1373 read_middle:
1374 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1375 goto ack_len_err;
1376 if (unlikely(pmtu >= qp->s_rdma_read_len))
1377 goto ack_len_err;
1378
1379
1380
1381
1382
1383 rvt_mod_retry_timer(qp);
1384 if (qp->s_flags & RVT_S_WAIT_ACK) {
1385 qp->s_flags &= ~RVT_S_WAIT_ACK;
1386 qib_schedule_send(qp);
1387 }
1388
1389 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1390 qp->s_retry = qp->s_retry_cnt;
1391
1392
1393
1394
1395
1396 qp->s_rdma_read_len -= pmtu;
1397 update_last_psn(qp, psn);
1398 spin_unlock_irqrestore(&qp->s_lock, flags);
1399 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
1400 data, pmtu, false, false);
1401 goto bail;
1402
1403 case OP(RDMA_READ_RESPONSE_ONLY):
1404 aeth = be32_to_cpu(ohdr->u.aeth);
1405 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1406 goto ack_done;
1407
1408 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1409
1410
1411
1412
1413
1414 if (unlikely(tlen < (hdrsize + pad + 8)))
1415 goto ack_len_err;
1416
1417
1418
1419
1420
1421 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1422 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1423 wqe, psn, pmtu);
1424 goto read_last;
1425
1426 case OP(RDMA_READ_RESPONSE_LAST):
1427
1428 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1429 goto ack_seq_err;
1430 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1431 goto ack_op_err;
1432
1433 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1434
1435
1436
1437
1438
1439 if (unlikely(tlen <= (hdrsize + pad + 8)))
1440 goto ack_len_err;
1441 read_last:
1442 tlen -= hdrsize + pad + 8;
1443 if (unlikely(tlen != qp->s_rdma_read_len))
1444 goto ack_len_err;
1445 aeth = be32_to_cpu(ohdr->u.aeth);
1446 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
1447 data, tlen, false, false);
1448 WARN_ON(qp->s_rdma_read_sge.num_sge);
1449 (void) do_rc_ack(qp, aeth, psn,
1450 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1451 goto ack_done;
1452 }
1453
1454 ack_op_err:
1455 status = IB_WC_LOC_QP_OP_ERR;
1456 goto ack_err;
1457
1458 ack_seq_err:
1459 rdma_seq_err(qp, ibp, psn, rcd);
1460 goto ack_done;
1461
1462 ack_len_err:
1463 status = IB_WC_LOC_LEN_ERR;
1464 ack_err:
1465 if (qp->s_last == qp->s_acked) {
1466 rvt_send_complete(qp, wqe, status);
1467 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1468 }
1469 ack_done:
1470 spin_unlock_irqrestore(&qp->s_lock, flags);
1471 bail:
1472 return;
1473 }
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490 static int qib_rc_rcv_error(struct ib_other_headers *ohdr,
1491 void *data,
1492 struct rvt_qp *qp,
1493 u32 opcode,
1494 u32 psn,
1495 int diff,
1496 struct qib_ctxtdata *rcd)
1497 {
1498 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1499 struct rvt_ack_entry *e;
1500 unsigned long flags;
1501 u8 i, prev;
1502 int old_req;
1503
1504 if (diff > 0) {
1505
1506
1507
1508
1509
1510 if (!qp->r_nak_state) {
1511 ibp->rvp.n_rc_seqnak++;
1512 qp->r_nak_state = IB_NAK_PSN_ERROR;
1513
1514 qp->r_ack_psn = qp->r_psn;
1515
1516
1517
1518
1519
1520 if (list_empty(&qp->rspwait)) {
1521 qp->r_flags |= RVT_R_RSP_NAK;
1522 rvt_get_qp(qp);
1523 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1524 }
1525 }
1526 goto done;
1527 }
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545 e = NULL;
1546 old_req = 1;
1547 ibp->rvp.n_rc_dupreq++;
1548
1549 spin_lock_irqsave(&qp->s_lock, flags);
1550
1551 for (i = qp->r_head_ack_queue; ; i = prev) {
1552 if (i == qp->s_tail_ack_queue)
1553 old_req = 0;
1554 if (i)
1555 prev = i - 1;
1556 else
1557 prev = QIB_MAX_RDMA_ATOMIC;
1558 if (prev == qp->r_head_ack_queue) {
1559 e = NULL;
1560 break;
1561 }
1562 e = &qp->s_ack_queue[prev];
1563 if (!e->opcode) {
1564 e = NULL;
1565 break;
1566 }
1567 if (qib_cmp24(psn, e->psn) >= 0) {
1568 if (prev == qp->s_tail_ack_queue &&
1569 qib_cmp24(psn, e->lpsn) <= 0)
1570 old_req = 0;
1571 break;
1572 }
1573 }
1574 switch (opcode) {
1575 case OP(RDMA_READ_REQUEST): {
1576 struct ib_reth *reth;
1577 u32 offset;
1578 u32 len;
1579
1580
1581
1582
1583
1584 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1585 goto unlock_done;
1586
1587 reth = &ohdr->u.rc.reth;
1588
1589
1590
1591
1592
1593
1594
1595 offset = ((psn - e->psn) & QIB_PSN_MASK) *
1596 qp->pmtu;
1597 len = be32_to_cpu(reth->length);
1598 if (unlikely(offset + len != e->rdma_sge.sge_length))
1599 goto unlock_done;
1600 if (e->rdma_sge.mr) {
1601 rvt_put_mr(e->rdma_sge.mr);
1602 e->rdma_sge.mr = NULL;
1603 }
1604 if (len != 0) {
1605 u32 rkey = be32_to_cpu(reth->rkey);
1606 u64 vaddr = be64_to_cpu(reth->vaddr);
1607 int ok;
1608
1609 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
1610 IB_ACCESS_REMOTE_READ);
1611 if (unlikely(!ok))
1612 goto unlock_done;
1613 } else {
1614 e->rdma_sge.vaddr = NULL;
1615 e->rdma_sge.length = 0;
1616 e->rdma_sge.sge_length = 0;
1617 }
1618 e->psn = psn;
1619 if (old_req)
1620 goto unlock_done;
1621 qp->s_tail_ack_queue = prev;
1622 break;
1623 }
1624
1625 case OP(COMPARE_SWAP):
1626 case OP(FETCH_ADD): {
1627
1628
1629
1630
1631
1632 if (!e || e->opcode != (u8) opcode || old_req)
1633 goto unlock_done;
1634 qp->s_tail_ack_queue = prev;
1635 break;
1636 }
1637
1638 default:
1639
1640
1641
1642
1643 if (!(psn & IB_BTH_REQ_ACK) || old_req)
1644 goto unlock_done;
1645
1646
1647
1648
1649 if (i == qp->r_head_ack_queue) {
1650 spin_unlock_irqrestore(&qp->s_lock, flags);
1651 qp->r_nak_state = 0;
1652 qp->r_ack_psn = qp->r_psn - 1;
1653 goto send_ack;
1654 }
1655
1656
1657
1658
1659
1660 if (!(qp->s_flags & RVT_S_RESP_PENDING)) {
1661 spin_unlock_irqrestore(&qp->s_lock, flags);
1662 qp->r_nak_state = 0;
1663 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1664 goto send_ack;
1665 }
1666
1667
1668
1669
1670 qp->s_tail_ack_queue = i;
1671 break;
1672 }
1673 qp->s_ack_state = OP(ACKNOWLEDGE);
1674 qp->s_flags |= RVT_S_RESP_PENDING;
1675 qp->r_nak_state = 0;
1676 qib_schedule_send(qp);
1677
1678 unlock_done:
1679 spin_unlock_irqrestore(&qp->s_lock, flags);
1680 done:
1681 return 1;
1682
1683 send_ack:
1684 return 0;
1685 }
1686
1687 static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
1688 {
1689 unsigned next;
1690
1691 next = n + 1;
1692 if (next > QIB_MAX_RDMA_ATOMIC)
1693 next = 0;
1694 qp->s_tail_ack_queue = next;
1695 qp->s_ack_state = OP(ACKNOWLEDGE);
1696 }
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711 void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
1712 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
1713 {
1714 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
1715 struct ib_other_headers *ohdr;
1716 u32 opcode;
1717 u32 hdrsize;
1718 u32 psn;
1719 u32 pad;
1720 struct ib_wc wc;
1721 u32 pmtu = qp->pmtu;
1722 int diff;
1723 struct ib_reth *reth;
1724 unsigned long flags;
1725 int ret;
1726
1727
1728 if (!has_grh) {
1729 ohdr = &hdr->u.oth;
1730 hdrsize = 8 + 12;
1731 } else {
1732 ohdr = &hdr->u.l.oth;
1733 hdrsize = 8 + 40 + 12;
1734 }
1735
1736 opcode = be32_to_cpu(ohdr->bth[0]);
1737 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
1738 return;
1739
1740 psn = be32_to_cpu(ohdr->bth[2]);
1741 opcode >>= 24;
1742
1743
1744
1745
1746
1747
1748
1749 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1750 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1751 qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
1752 hdrsize, pmtu, rcd);
1753 return;
1754 }
1755
1756
1757 diff = qib_cmp24(psn, qp->r_psn);
1758 if (unlikely(diff)) {
1759 if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
1760 return;
1761 goto send_ack;
1762 }
1763
1764
1765 switch (qp->r_state) {
1766 case OP(SEND_FIRST):
1767 case OP(SEND_MIDDLE):
1768 if (opcode == OP(SEND_MIDDLE) ||
1769 opcode == OP(SEND_LAST) ||
1770 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1771 break;
1772 goto nack_inv;
1773
1774 case OP(RDMA_WRITE_FIRST):
1775 case OP(RDMA_WRITE_MIDDLE):
1776 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1777 opcode == OP(RDMA_WRITE_LAST) ||
1778 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1779 break;
1780 goto nack_inv;
1781
1782 default:
1783 if (opcode == OP(SEND_MIDDLE) ||
1784 opcode == OP(SEND_LAST) ||
1785 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1786 opcode == OP(RDMA_WRITE_MIDDLE) ||
1787 opcode == OP(RDMA_WRITE_LAST) ||
1788 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1789 goto nack_inv;
1790
1791
1792
1793
1794
1795 break;
1796 }
1797
1798 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
1799 rvt_comm_est(qp);
1800
1801
1802 switch (opcode) {
1803 case OP(SEND_FIRST):
1804 ret = rvt_get_rwqe(qp, false);
1805 if (ret < 0)
1806 goto nack_op_err;
1807 if (!ret)
1808 goto rnr_nak;
1809 qp->r_rcv_len = 0;
1810
1811 case OP(SEND_MIDDLE):
1812 case OP(RDMA_WRITE_MIDDLE):
1813 send_middle:
1814
1815 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1816 goto nack_inv;
1817 qp->r_rcv_len += pmtu;
1818 if (unlikely(qp->r_rcv_len > qp->r_len))
1819 goto nack_inv;
1820 rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
1821 break;
1822
1823 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1824
1825 ret = rvt_get_rwqe(qp, true);
1826 if (ret < 0)
1827 goto nack_op_err;
1828 if (!ret)
1829 goto rnr_nak;
1830 goto send_last_imm;
1831
1832 case OP(SEND_ONLY):
1833 case OP(SEND_ONLY_WITH_IMMEDIATE):
1834 ret = rvt_get_rwqe(qp, false);
1835 if (ret < 0)
1836 goto nack_op_err;
1837 if (!ret)
1838 goto rnr_nak;
1839 qp->r_rcv_len = 0;
1840 if (opcode == OP(SEND_ONLY))
1841 goto no_immediate_data;
1842
1843 case OP(SEND_LAST_WITH_IMMEDIATE):
1844 send_last_imm:
1845 wc.ex.imm_data = ohdr->u.imm_data;
1846 hdrsize += 4;
1847 wc.wc_flags = IB_WC_WITH_IMM;
1848 goto send_last;
1849 case OP(SEND_LAST):
1850 case OP(RDMA_WRITE_LAST):
1851 no_immediate_data:
1852 wc.wc_flags = 0;
1853 wc.ex.imm_data = 0;
1854 send_last:
1855
1856 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1857
1858
1859 if (unlikely(tlen < (hdrsize + pad + 4)))
1860 goto nack_inv;
1861
1862 tlen -= (hdrsize + pad + 4);
1863 wc.byte_len = tlen + qp->r_rcv_len;
1864 if (unlikely(wc.byte_len > qp->r_len))
1865 goto nack_inv;
1866 rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
1867 rvt_put_ss(&qp->r_sge);
1868 qp->r_msn++;
1869 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
1870 break;
1871 wc.wr_id = qp->r_wr_id;
1872 wc.status = IB_WC_SUCCESS;
1873 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
1874 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
1875 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
1876 else
1877 wc.opcode = IB_WC_RECV;
1878 wc.qp = &qp->ibqp;
1879 wc.src_qp = qp->remote_qpn;
1880 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
1881 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
1882
1883 wc.vendor_err = 0;
1884 wc.pkey_index = 0;
1885 wc.dlid_path_bits = 0;
1886 wc.port_num = 0;
1887
1888 rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
1889 break;
1890
1891 case OP(RDMA_WRITE_FIRST):
1892 case OP(RDMA_WRITE_ONLY):
1893 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1894 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
1895 goto nack_inv;
1896
1897 reth = &ohdr->u.rc.reth;
1898 hdrsize += sizeof(*reth);
1899 qp->r_len = be32_to_cpu(reth->length);
1900 qp->r_rcv_len = 0;
1901 qp->r_sge.sg_list = NULL;
1902 if (qp->r_len != 0) {
1903 u32 rkey = be32_to_cpu(reth->rkey);
1904 u64 vaddr = be64_to_cpu(reth->vaddr);
1905 int ok;
1906
1907
1908 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
1909 rkey, IB_ACCESS_REMOTE_WRITE);
1910 if (unlikely(!ok))
1911 goto nack_acc;
1912 qp->r_sge.num_sge = 1;
1913 } else {
1914 qp->r_sge.num_sge = 0;
1915 qp->r_sge.sge.mr = NULL;
1916 qp->r_sge.sge.vaddr = NULL;
1917 qp->r_sge.sge.length = 0;
1918 qp->r_sge.sge.sge_length = 0;
1919 }
1920 if (opcode == OP(RDMA_WRITE_FIRST))
1921 goto send_middle;
1922 else if (opcode == OP(RDMA_WRITE_ONLY))
1923 goto no_immediate_data;
1924 ret = rvt_get_rwqe(qp, true);
1925 if (ret < 0)
1926 goto nack_op_err;
1927 if (!ret) {
1928 rvt_put_ss(&qp->r_sge);
1929 goto rnr_nak;
1930 }
1931 wc.ex.imm_data = ohdr->u.rc.imm_data;
1932 hdrsize += 4;
1933 wc.wc_flags = IB_WC_WITH_IMM;
1934 goto send_last;
1935
1936 case OP(RDMA_READ_REQUEST): {
1937 struct rvt_ack_entry *e;
1938 u32 len;
1939 u8 next;
1940
1941 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
1942 goto nack_inv;
1943 next = qp->r_head_ack_queue + 1;
1944
1945 if (next > QIB_MAX_RDMA_ATOMIC)
1946 next = 0;
1947 spin_lock_irqsave(&qp->s_lock, flags);
1948 if (unlikely(next == qp->s_tail_ack_queue)) {
1949 if (!qp->s_ack_queue[next].sent)
1950 goto nack_inv_unlck;
1951 qib_update_ack_queue(qp, next);
1952 }
1953 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1954 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
1955 rvt_put_mr(e->rdma_sge.mr);
1956 e->rdma_sge.mr = NULL;
1957 }
1958 reth = &ohdr->u.rc.reth;
1959 len = be32_to_cpu(reth->length);
1960 if (len) {
1961 u32 rkey = be32_to_cpu(reth->rkey);
1962 u64 vaddr = be64_to_cpu(reth->vaddr);
1963 int ok;
1964
1965
1966 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
1967 rkey, IB_ACCESS_REMOTE_READ);
1968 if (unlikely(!ok))
1969 goto nack_acc_unlck;
1970
1971
1972
1973
1974 qp->r_psn += rvt_div_mtu(qp, len - 1);
1975 } else {
1976 e->rdma_sge.mr = NULL;
1977 e->rdma_sge.vaddr = NULL;
1978 e->rdma_sge.length = 0;
1979 e->rdma_sge.sge_length = 0;
1980 }
1981 e->opcode = opcode;
1982 e->sent = 0;
1983 e->psn = psn;
1984 e->lpsn = qp->r_psn;
1985
1986
1987
1988
1989
1990 qp->r_msn++;
1991 qp->r_psn++;
1992 qp->r_state = opcode;
1993 qp->r_nak_state = 0;
1994 qp->r_head_ack_queue = next;
1995
1996
1997 qp->s_flags |= RVT_S_RESP_PENDING;
1998 qib_schedule_send(qp);
1999
2000 goto sunlock;
2001 }
2002
2003 case OP(COMPARE_SWAP):
2004 case OP(FETCH_ADD): {
2005 struct ib_atomic_eth *ateth;
2006 struct rvt_ack_entry *e;
2007 u64 vaddr;
2008 atomic64_t *maddr;
2009 u64 sdata;
2010 u32 rkey;
2011 u8 next;
2012
2013 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2014 goto nack_inv;
2015 next = qp->r_head_ack_queue + 1;
2016 if (next > QIB_MAX_RDMA_ATOMIC)
2017 next = 0;
2018 spin_lock_irqsave(&qp->s_lock, flags);
2019 if (unlikely(next == qp->s_tail_ack_queue)) {
2020 if (!qp->s_ack_queue[next].sent)
2021 goto nack_inv_unlck;
2022 qib_update_ack_queue(qp, next);
2023 }
2024 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2025 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2026 rvt_put_mr(e->rdma_sge.mr);
2027 e->rdma_sge.mr = NULL;
2028 }
2029 ateth = &ohdr->u.atomic_eth;
2030 vaddr = get_ib_ateth_vaddr(ateth);
2031 if (unlikely(vaddr & (sizeof(u64) - 1)))
2032 goto nack_inv_unlck;
2033 rkey = be32_to_cpu(ateth->rkey);
2034
2035 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2036 vaddr, rkey,
2037 IB_ACCESS_REMOTE_ATOMIC)))
2038 goto nack_acc_unlck;
2039
2040 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
2041 sdata = get_ib_ateth_swap(ateth);
2042 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2043 (u64) atomic64_add_return(sdata, maddr) - sdata :
2044 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
2045 get_ib_ateth_compare(ateth),
2046 sdata);
2047 rvt_put_mr(qp->r_sge.sge.mr);
2048 qp->r_sge.num_sge = 0;
2049 e->opcode = opcode;
2050 e->sent = 0;
2051 e->psn = psn;
2052 e->lpsn = psn;
2053 qp->r_msn++;
2054 qp->r_psn++;
2055 qp->r_state = opcode;
2056 qp->r_nak_state = 0;
2057 qp->r_head_ack_queue = next;
2058
2059
2060 qp->s_flags |= RVT_S_RESP_PENDING;
2061 qib_schedule_send(qp);
2062
2063 goto sunlock;
2064 }
2065
2066 default:
2067
2068 goto nack_inv;
2069 }
2070 qp->r_psn++;
2071 qp->r_state = opcode;
2072 qp->r_ack_psn = psn;
2073 qp->r_nak_state = 0;
2074
2075 if (psn & (1 << 31))
2076 goto send_ack;
2077 return;
2078
2079 rnr_nak:
2080 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
2081 qp->r_ack_psn = qp->r_psn;
2082
2083 if (list_empty(&qp->rspwait)) {
2084 qp->r_flags |= RVT_R_RSP_NAK;
2085 rvt_get_qp(qp);
2086 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2087 }
2088 return;
2089
2090 nack_op_err:
2091 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2092 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2093 qp->r_ack_psn = qp->r_psn;
2094
2095 if (list_empty(&qp->rspwait)) {
2096 qp->r_flags |= RVT_R_RSP_NAK;
2097 rvt_get_qp(qp);
2098 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2099 }
2100 return;
2101
2102 nack_inv_unlck:
2103 spin_unlock_irqrestore(&qp->s_lock, flags);
2104 nack_inv:
2105 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2106 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2107 qp->r_ack_psn = qp->r_psn;
2108
2109 if (list_empty(&qp->rspwait)) {
2110 qp->r_flags |= RVT_R_RSP_NAK;
2111 rvt_get_qp(qp);
2112 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2113 }
2114 return;
2115
2116 nack_acc_unlck:
2117 spin_unlock_irqrestore(&qp->s_lock, flags);
2118 nack_acc:
2119 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
2120 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2121 qp->r_ack_psn = qp->r_psn;
2122 send_ack:
2123 qib_send_rc_ack(qp);
2124 return;
2125
2126 sunlock:
2127 spin_unlock_irqrestore(&qp->s_lock, flags);
2128 }