1 /*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51 #include "hfi.h"
52 #include "sdma.h"
53 #include "qp.h"
54
55 /* cut down ridiculously long IB macro names */
56 #define OP(x) IB_OPCODE_UC_##x
57
58 /**
59 * hfi1_make_uc_req - construct a request packet (SEND, RDMA write)
60 * @qp: a pointer to the QP
61 *
62 * Return 1 if constructed; otherwise, return 0.
63 */
hfi1_make_uc_req(struct hfi1_qp * qp)64 int hfi1_make_uc_req(struct hfi1_qp *qp)
65 {
66 struct hfi1_other_headers *ohdr;
67 struct hfi1_swqe *wqe;
68 unsigned long flags;
69 u32 hwords = 5;
70 u32 bth0 = 0;
71 u32 len;
72 u32 pmtu = qp->pmtu;
73 int ret = 0;
74 int middle = 0;
75
76 spin_lock_irqsave(&qp->s_lock, flags);
77
78 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_SEND_OK)) {
79 if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND))
80 goto bail;
81 /* We are in the error state, flush the work request. */
82 if (qp->s_last == qp->s_head)
83 goto bail;
84 /* If DMAs are in progress, we can't flush immediately. */
85 if (atomic_read(&qp->s_iowait.sdma_busy)) {
86 qp->s_flags |= HFI1_S_WAIT_DMA;
87 goto bail;
88 }
89 clear_ahg(qp);
90 wqe = get_swqe_ptr(qp, qp->s_last);
91 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
92 goto done;
93 }
94
95 ohdr = &qp->s_hdr->ibh.u.oth;
96 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
97 ohdr = &qp->s_hdr->ibh.u.l.oth;
98
99 /* Get the next send request. */
100 wqe = get_swqe_ptr(qp, qp->s_cur);
101 qp->s_wqe = NULL;
102 switch (qp->s_state) {
103 default:
104 if (!(ib_hfi1_state_ops[qp->state] &
105 HFI1_PROCESS_NEXT_SEND_OK))
106 goto bail;
107 /* Check if send work queue is empty. */
108 if (qp->s_cur == qp->s_head) {
109 clear_ahg(qp);
110 goto bail;
111 }
112 /*
113 * Start a new request.
114 */
115 wqe->psn = qp->s_next_psn;
116 qp->s_psn = qp->s_next_psn;
117 qp->s_sge.sge = wqe->sg_list[0];
118 qp->s_sge.sg_list = wqe->sg_list + 1;
119 qp->s_sge.num_sge = wqe->wr.num_sge;
120 qp->s_sge.total_len = wqe->length;
121 len = wqe->length;
122 qp->s_len = len;
123 switch (wqe->wr.opcode) {
124 case IB_WR_SEND:
125 case IB_WR_SEND_WITH_IMM:
126 if (len > pmtu) {
127 qp->s_state = OP(SEND_FIRST);
128 len = pmtu;
129 break;
130 }
131 if (wqe->wr.opcode == IB_WR_SEND)
132 qp->s_state = OP(SEND_ONLY);
133 else {
134 qp->s_state =
135 OP(SEND_ONLY_WITH_IMMEDIATE);
136 /* Immediate data comes after the BTH */
137 ohdr->u.imm_data = wqe->wr.ex.imm_data;
138 hwords += 1;
139 }
140 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
141 bth0 |= IB_BTH_SOLICITED;
142 qp->s_wqe = wqe;
143 if (++qp->s_cur >= qp->s_size)
144 qp->s_cur = 0;
145 break;
146
147 case IB_WR_RDMA_WRITE:
148 case IB_WR_RDMA_WRITE_WITH_IMM:
149 ohdr->u.rc.reth.vaddr =
150 cpu_to_be64(wqe->rdma_wr.remote_addr);
151 ohdr->u.rc.reth.rkey =
152 cpu_to_be32(wqe->rdma_wr.rkey);
153 ohdr->u.rc.reth.length = cpu_to_be32(len);
154 hwords += sizeof(struct ib_reth) / 4;
155 if (len > pmtu) {
156 qp->s_state = OP(RDMA_WRITE_FIRST);
157 len = pmtu;
158 break;
159 }
160 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
161 qp->s_state = OP(RDMA_WRITE_ONLY);
162 else {
163 qp->s_state =
164 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
165 /* Immediate data comes after the RETH */
166 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
167 hwords += 1;
168 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
169 bth0 |= IB_BTH_SOLICITED;
170 }
171 qp->s_wqe = wqe;
172 if (++qp->s_cur >= qp->s_size)
173 qp->s_cur = 0;
174 break;
175
176 default:
177 goto bail;
178 }
179 break;
180
181 case OP(SEND_FIRST):
182 qp->s_state = OP(SEND_MIDDLE);
183 /* FALLTHROUGH */
184 case OP(SEND_MIDDLE):
185 len = qp->s_len;
186 if (len > pmtu) {
187 len = pmtu;
188 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
189 break;
190 }
191 if (wqe->wr.opcode == IB_WR_SEND)
192 qp->s_state = OP(SEND_LAST);
193 else {
194 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
195 /* Immediate data comes after the BTH */
196 ohdr->u.imm_data = wqe->wr.ex.imm_data;
197 hwords += 1;
198 }
199 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
200 bth0 |= IB_BTH_SOLICITED;
201 qp->s_wqe = wqe;
202 if (++qp->s_cur >= qp->s_size)
203 qp->s_cur = 0;
204 break;
205
206 case OP(RDMA_WRITE_FIRST):
207 qp->s_state = OP(RDMA_WRITE_MIDDLE);
208 /* FALLTHROUGH */
209 case OP(RDMA_WRITE_MIDDLE):
210 len = qp->s_len;
211 if (len > pmtu) {
212 len = pmtu;
213 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
214 break;
215 }
216 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
217 qp->s_state = OP(RDMA_WRITE_LAST);
218 else {
219 qp->s_state =
220 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
221 /* Immediate data comes after the BTH */
222 ohdr->u.imm_data = wqe->wr.ex.imm_data;
223 hwords += 1;
224 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
225 bth0 |= IB_BTH_SOLICITED;
226 }
227 qp->s_wqe = wqe;
228 if (++qp->s_cur >= qp->s_size)
229 qp->s_cur = 0;
230 break;
231 }
232 qp->s_len -= len;
233 qp->s_hdrwords = hwords;
234 qp->s_cur_sge = &qp->s_sge;
235 qp->s_cur_size = len;
236 hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
237 mask_psn(qp->s_next_psn++), middle);
238 done:
239 ret = 1;
240 goto unlock;
241
242 bail:
243 qp->s_flags &= ~HFI1_S_BUSY;
244 unlock:
245 spin_unlock_irqrestore(&qp->s_lock, flags);
246 return ret;
247 }
248
249 /**
250 * hfi1_uc_rcv - handle an incoming UC packet
251 * @ibp: the port the packet came in on
252 * @hdr: the header of the packet
253 * @rcv_flags: flags relevant to rcv processing
254 * @data: the packet data
255 * @tlen: the length of the packet
256 * @qp: the QP for this packet.
257 *
258 * This is called from qp_rcv() to process an incoming UC packet
259 * for the given QP.
260 * Called at interrupt level.
261 */
hfi1_uc_rcv(struct hfi1_packet * packet)262 void hfi1_uc_rcv(struct hfi1_packet *packet)
263 {
264 struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
265 struct hfi1_ib_header *hdr = packet->hdr;
266 u32 rcv_flags = packet->rcv_flags;
267 void *data = packet->ebuf;
268 u32 tlen = packet->tlen;
269 struct hfi1_qp *qp = packet->qp;
270 struct hfi1_other_headers *ohdr = packet->ohdr;
271 u32 opcode;
272 u32 hdrsize = packet->hlen;
273 u32 psn;
274 u32 pad;
275 struct ib_wc wc;
276 u32 pmtu = qp->pmtu;
277 struct ib_reth *reth;
278 int has_grh = rcv_flags & HFI1_HAS_GRH;
279 int ret;
280 u32 bth1;
281 struct ib_grh *grh = NULL;
282
283 opcode = be32_to_cpu(ohdr->bth[0]);
284 if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
285 return;
286
287 bth1 = be32_to_cpu(ohdr->bth[1]);
288 if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
289 if (bth1 & HFI1_BECN_SMASK) {
290 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
291 u32 rqpn, lqpn;
292 u16 rlid = be16_to_cpu(hdr->lrh[3]);
293 u8 sl, sc5;
294
295 lqpn = bth1 & HFI1_QPN_MASK;
296 rqpn = qp->remote_qpn;
297
298 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
299 sl = ibp->sc_to_sl[sc5];
300
301 process_becn(ppd, sl, rlid, lqpn, rqpn,
302 IB_CC_SVCTYPE_UC);
303 }
304
305 if (bth1 & HFI1_FECN_SMASK) {
306 u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
307 u16 slid = be16_to_cpu(hdr->lrh[3]);
308 u16 dlid = be16_to_cpu(hdr->lrh[1]);
309 u32 src_qp = qp->remote_qpn;
310 u8 sc5;
311
312 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
313
314 return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
315 }
316 }
317
318 psn = be32_to_cpu(ohdr->bth[2]);
319 opcode >>= 24;
320
321 /* Compare the PSN verses the expected PSN. */
322 if (unlikely(cmp_psn(psn, qp->r_psn) != 0)) {
323 /*
324 * Handle a sequence error.
325 * Silently drop any current message.
326 */
327 qp->r_psn = psn;
328 inv:
329 if (qp->r_state == OP(SEND_FIRST) ||
330 qp->r_state == OP(SEND_MIDDLE)) {
331 set_bit(HFI1_R_REWIND_SGE, &qp->r_aflags);
332 qp->r_sge.num_sge = 0;
333 } else
334 hfi1_put_ss(&qp->r_sge);
335 qp->r_state = OP(SEND_LAST);
336 switch (opcode) {
337 case OP(SEND_FIRST):
338 case OP(SEND_ONLY):
339 case OP(SEND_ONLY_WITH_IMMEDIATE):
340 goto send_first;
341
342 case OP(RDMA_WRITE_FIRST):
343 case OP(RDMA_WRITE_ONLY):
344 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
345 goto rdma_first;
346
347 default:
348 goto drop;
349 }
350 }
351
352 /* Check for opcode sequence errors. */
353 switch (qp->r_state) {
354 case OP(SEND_FIRST):
355 case OP(SEND_MIDDLE):
356 if (opcode == OP(SEND_MIDDLE) ||
357 opcode == OP(SEND_LAST) ||
358 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
359 break;
360 goto inv;
361
362 case OP(RDMA_WRITE_FIRST):
363 case OP(RDMA_WRITE_MIDDLE):
364 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
365 opcode == OP(RDMA_WRITE_LAST) ||
366 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
367 break;
368 goto inv;
369
370 default:
371 if (opcode == OP(SEND_FIRST) ||
372 opcode == OP(SEND_ONLY) ||
373 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
374 opcode == OP(RDMA_WRITE_FIRST) ||
375 opcode == OP(RDMA_WRITE_ONLY) ||
376 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
377 break;
378 goto inv;
379 }
380
381 if (qp->state == IB_QPS_RTR && !(qp->r_flags & HFI1_R_COMM_EST))
382 qp_comm_est(qp);
383
384 /* OK, process the packet. */
385 switch (opcode) {
386 case OP(SEND_FIRST):
387 case OP(SEND_ONLY):
388 case OP(SEND_ONLY_WITH_IMMEDIATE):
389 send_first:
390 if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags))
391 qp->r_sge = qp->s_rdma_read_sge;
392 else {
393 ret = hfi1_get_rwqe(qp, 0);
394 if (ret < 0)
395 goto op_err;
396 if (!ret)
397 goto drop;
398 /*
399 * qp->s_rdma_read_sge will be the owner
400 * of the mr references.
401 */
402 qp->s_rdma_read_sge = qp->r_sge;
403 }
404 qp->r_rcv_len = 0;
405 if (opcode == OP(SEND_ONLY))
406 goto no_immediate_data;
407 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
408 goto send_last_imm;
409 /* FALLTHROUGH */
410 case OP(SEND_MIDDLE):
411 /* Check for invalid length PMTU or posted rwqe len. */
412 if (unlikely(tlen != (hdrsize + pmtu + 4)))
413 goto rewind;
414 qp->r_rcv_len += pmtu;
415 if (unlikely(qp->r_rcv_len > qp->r_len))
416 goto rewind;
417 hfi1_copy_sge(&qp->r_sge, data, pmtu, 0);
418 break;
419
420 case OP(SEND_LAST_WITH_IMMEDIATE):
421 send_last_imm:
422 wc.ex.imm_data = ohdr->u.imm_data;
423 wc.wc_flags = IB_WC_WITH_IMM;
424 goto send_last;
425 case OP(SEND_LAST):
426 no_immediate_data:
427 wc.ex.imm_data = 0;
428 wc.wc_flags = 0;
429 send_last:
430 /* Get the number of bytes the message was padded by. */
431 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
432 /* Check for invalid length. */
433 /* LAST len should be >= 1 */
434 if (unlikely(tlen < (hdrsize + pad + 4)))
435 goto rewind;
436 /* Don't count the CRC. */
437 tlen -= (hdrsize + pad + 4);
438 wc.byte_len = tlen + qp->r_rcv_len;
439 if (unlikely(wc.byte_len > qp->r_len))
440 goto rewind;
441 wc.opcode = IB_WC_RECV;
442 hfi1_copy_sge(&qp->r_sge, data, tlen, 0);
443 hfi1_put_ss(&qp->s_rdma_read_sge);
444 last_imm:
445 wc.wr_id = qp->r_wr_id;
446 wc.status = IB_WC_SUCCESS;
447 wc.qp = &qp->ibqp;
448 wc.src_qp = qp->remote_qpn;
449 wc.slid = qp->remote_ah_attr.dlid;
450 /*
451 * It seems that IB mandates the presence of an SL in a
452 * work completion only for the UD transport (see section
453 * 11.4.2 of IBTA Vol. 1).
454 *
455 * However, the way the SL is chosen below is consistent
456 * with the way that IB/qib works and is trying avoid
457 * introducing incompatibilities.
458 *
459 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
460 */
461 wc.sl = qp->remote_ah_attr.sl;
462 /* zero fields that are N/A */
463 wc.vendor_err = 0;
464 wc.pkey_index = 0;
465 wc.dlid_path_bits = 0;
466 wc.port_num = 0;
467 /* Signal completion event if the solicited bit is set. */
468 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
469 (ohdr->bth[0] &
470 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
471 break;
472
473 case OP(RDMA_WRITE_FIRST):
474 case OP(RDMA_WRITE_ONLY):
475 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
476 rdma_first:
477 if (unlikely(!(qp->qp_access_flags &
478 IB_ACCESS_REMOTE_WRITE))) {
479 goto drop;
480 }
481 reth = &ohdr->u.rc.reth;
482 qp->r_len = be32_to_cpu(reth->length);
483 qp->r_rcv_len = 0;
484 qp->r_sge.sg_list = NULL;
485 if (qp->r_len != 0) {
486 u32 rkey = be32_to_cpu(reth->rkey);
487 u64 vaddr = be64_to_cpu(reth->vaddr);
488 int ok;
489
490 /* Check rkey */
491 ok = hfi1_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
492 vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
493 if (unlikely(!ok))
494 goto drop;
495 qp->r_sge.num_sge = 1;
496 } else {
497 qp->r_sge.num_sge = 0;
498 qp->r_sge.sge.mr = NULL;
499 qp->r_sge.sge.vaddr = NULL;
500 qp->r_sge.sge.length = 0;
501 qp->r_sge.sge.sge_length = 0;
502 }
503 if (opcode == OP(RDMA_WRITE_ONLY))
504 goto rdma_last;
505 else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
506 wc.ex.imm_data = ohdr->u.rc.imm_data;
507 goto rdma_last_imm;
508 }
509 /* FALLTHROUGH */
510 case OP(RDMA_WRITE_MIDDLE):
511 /* Check for invalid length PMTU or posted rwqe len. */
512 if (unlikely(tlen != (hdrsize + pmtu + 4)))
513 goto drop;
514 qp->r_rcv_len += pmtu;
515 if (unlikely(qp->r_rcv_len > qp->r_len))
516 goto drop;
517 hfi1_copy_sge(&qp->r_sge, data, pmtu, 1);
518 break;
519
520 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
521 wc.ex.imm_data = ohdr->u.imm_data;
522 rdma_last_imm:
523 wc.wc_flags = IB_WC_WITH_IMM;
524
525 /* Get the number of bytes the message was padded by. */
526 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
527 /* Check for invalid length. */
528 /* LAST len should be >= 1 */
529 if (unlikely(tlen < (hdrsize + pad + 4)))
530 goto drop;
531 /* Don't count the CRC. */
532 tlen -= (hdrsize + pad + 4);
533 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
534 goto drop;
535 if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags))
536 hfi1_put_ss(&qp->s_rdma_read_sge);
537 else {
538 ret = hfi1_get_rwqe(qp, 1);
539 if (ret < 0)
540 goto op_err;
541 if (!ret)
542 goto drop;
543 }
544 wc.byte_len = qp->r_len;
545 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
546 hfi1_copy_sge(&qp->r_sge, data, tlen, 1);
547 hfi1_put_ss(&qp->r_sge);
548 goto last_imm;
549
550 case OP(RDMA_WRITE_LAST):
551 rdma_last:
552 /* Get the number of bytes the message was padded by. */
553 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
554 /* Check for invalid length. */
555 /* LAST len should be >= 1 */
556 if (unlikely(tlen < (hdrsize + pad + 4)))
557 goto drop;
558 /* Don't count the CRC. */
559 tlen -= (hdrsize + pad + 4);
560 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
561 goto drop;
562 hfi1_copy_sge(&qp->r_sge, data, tlen, 1);
563 hfi1_put_ss(&qp->r_sge);
564 break;
565
566 default:
567 /* Drop packet for unknown opcodes. */
568 goto drop;
569 }
570 qp->r_psn++;
571 qp->r_state = opcode;
572 return;
573
574 rewind:
575 set_bit(HFI1_R_REWIND_SGE, &qp->r_aflags);
576 qp->r_sge.num_sge = 0;
577 drop:
578 ibp->n_pkt_drops++;
579 return;
580
581 op_err:
582 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
583 return;
584
585 }
586