This source file includes following definitions.
- lpfc_nvme_cmd_template
- lpfc_nvme_create_queue
- lpfc_nvme_delete_queue
- lpfc_nvme_localport_delete
- lpfc_nvme_remoteport_delete
- lpfc_nvme_cmpl_gen_req
- lpfc_nvme_gen_req
- lpfc_nvme_ls_req
- lpfc_nvme_ls_abort
- lpfc_nvme_adj_fcp_sgls
- lpfc_nvme_ktime
- lpfc_nvme_io_cmd_wqe_cmpl
- lpfc_nvme_prep_io_cmd
- lpfc_nvme_prep_io_dma
- lpfc_nvme_fcp_io_submit
- lpfc_nvme_abort_fcreq_cmpl
- lpfc_nvme_fcp_abort
- lpfc_get_nvme_buf
- lpfc_release_nvme_buf
- lpfc_nvme_create_localport
- lpfc_nvme_lport_unreg_wait
- lpfc_nvme_destroy_localport
- lpfc_nvme_update_localport
- lpfc_nvme_register_port
- lpfc_nvme_rescan_port
- lpfc_nvme_unregister_port
- lpfc_sli4_nvme_xri_aborted
- lpfc_nvme_wait_for_io_drain
- lpfc_nvme_cancel_iocb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42 #include "lpfc_version.h"
43 #include "lpfc_hw4.h"
44 #include "lpfc_hw.h"
45 #include "lpfc_sli.h"
46 #include "lpfc_sli4.h"
47 #include "lpfc_nl.h"
48 #include "lpfc_disc.h"
49 #include "lpfc.h"
50 #include "lpfc_nvme.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_logmsg.h"
53 #include "lpfc_crtn.h"
54 #include "lpfc_vport.h"
55 #include "lpfc_debugfs.h"
56
57
58
59 static struct lpfc_io_buf *
60 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
61 int idx, int expedite);
62
63 static void
64 lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
65
66 static struct nvme_fc_port_template lpfc_nvme_template;
67
68 static union lpfc_wqe128 lpfc_iread_cmd_template;
69 static union lpfc_wqe128 lpfc_iwrite_cmd_template;
70 static union lpfc_wqe128 lpfc_icmnd_cmd_template;
71
72
73 void
74 lpfc_nvme_cmd_template(void)
75 {
76 union lpfc_wqe128 *wqe;
77
78
79 wqe = &lpfc_iread_cmd_template;
80 memset(wqe, 0, sizeof(union lpfc_wqe128));
81
82
83
84
85
86
87
88
89
90
91
92
93 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
94 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
95 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
96 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
97
98
99
100
101
102
103 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
104 bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1);
105 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
106 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
107 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
108 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
109
110
111 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, NVME_READ_CMD);
112 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
113 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
114
115
116
117
118
119
120 wqe = &lpfc_iwrite_cmd_template;
121 memset(wqe, 0, sizeof(union lpfc_wqe128));
122
123
124
125
126
127
128
129
130
131
132
133
134 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
135 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
136 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
137 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
138
139
140
141
142
143
144 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
145 bf_set(wqe_nvme, &wqe->fcp_iwrite.wqe_com, 1);
146 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
147 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
148 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
149 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
150
151
152 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, NVME_WRITE_CMD);
153 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
154 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
155
156
157
158
159
160
161 wqe = &lpfc_icmnd_cmd_template;
162 memset(wqe, 0, sizeof(union lpfc_wqe128));
163
164
165
166
167
168
169
170
171
172
173 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
174 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
175 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
176 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
177
178
179
180
181
182
183 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
184 bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
185 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
186 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
187 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
188 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
189
190
191 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, FCP_COMMAND);
192 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
193 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
194
195
196 }
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213 static int
214 lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
215 unsigned int qidx, u16 qsize,
216 void **handle)
217 {
218 struct lpfc_nvme_lport *lport;
219 struct lpfc_vport *vport;
220 struct lpfc_nvme_qhandle *qhandle;
221 char *str;
222
223 if (!pnvme_lport->private)
224 return -ENOMEM;
225
226 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
227 vport = lport->vport;
228 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
229 if (qhandle == NULL)
230 return -ENOMEM;
231
232 qhandle->cpu_id = raw_smp_processor_id();
233 qhandle->qidx = qidx;
234
235
236
237
238
239 if (qidx) {
240 str = "IO ";
241 qhandle->index = ((qidx - 1) %
242 lpfc_nvme_template.max_hw_queues);
243 } else {
244 str = "ADM";
245 qhandle->index = qidx;
246 }
247
248 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
249 "6073 Binding %s HdwQueue %d (cpu %d) to "
250 "hdw_queue %d qhandle x%px\n", str,
251 qidx, qhandle->cpu_id, qhandle->index, qhandle);
252 *handle = (void *)qhandle;
253 return 0;
254 }
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270 static void
271 lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
272 unsigned int qidx,
273 void *handle)
274 {
275 struct lpfc_nvme_lport *lport;
276 struct lpfc_vport *vport;
277
278 if (!pnvme_lport->private)
279 return;
280
281 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
282 vport = lport->vport;
283
284 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
285 "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
286 lport, qidx, handle);
287 kfree(handle);
288 }
289
290 static void
291 lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
292 {
293 struct lpfc_nvme_lport *lport = localport->private;
294
295 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
296 "6173 localport x%px delete complete\n",
297 lport);
298
299
300 if (lport->vport->localport)
301 complete(lport->lport_unreg_cmp);
302 }
303
304
305
306
307
308
309
310
311
312
313
314
315 static void
316 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
317 {
318 struct lpfc_nvme_rport *rport = remoteport->private;
319 struct lpfc_vport *vport;
320 struct lpfc_nodelist *ndlp;
321
322 ndlp = rport->ndlp;
323 if (!ndlp)
324 goto rport_err;
325
326 vport = ndlp->vport;
327 if (!vport)
328 goto rport_err;
329
330
331
332
333
334 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
335 "6146 remoteport delete of remoteport x%px\n",
336 remoteport);
337 spin_lock_irq(&vport->phba->hbalock);
338
339
340
341
342 if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
343 ndlp->nrport = NULL;
344 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
345 spin_unlock_irq(&vport->phba->hbalock);
346
347
348
349
350 lpfc_nlp_put(ndlp);
351 } else {
352 spin_unlock_irq(&vport->phba->hbalock);
353 }
354
355 rport_err:
356 return;
357 }
358
359 static void
360 lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
361 struct lpfc_wcqe_complete *wcqe)
362 {
363 struct lpfc_vport *vport = cmdwqe->vport;
364 struct lpfc_nvme_lport *lport;
365 uint32_t status;
366 struct nvmefc_ls_req *pnvme_lsreq;
367 struct lpfc_dmabuf *buf_ptr;
368 struct lpfc_nodelist *ndlp;
369
370 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
371 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
372
373 if (vport->localport) {
374 lport = (struct lpfc_nvme_lport *)vport->localport->private;
375 if (lport) {
376 atomic_inc(&lport->fc4NvmeLsCmpls);
377 if (status) {
378 if (bf_get(lpfc_wcqe_c_xb, wcqe))
379 atomic_inc(&lport->cmpl_ls_xb);
380 atomic_inc(&lport->cmpl_ls_err);
381 }
382 }
383 }
384
385 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
386 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
387 "6047 nvme cmpl Enter "
388 "Data %px DID %x Xri: %x status %x reason x%x "
389 "cmd:x%px lsreg:x%px bmp:x%px ndlp:x%px\n",
390 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
391 cmdwqe->sli4_xritag, status,
392 (wcqe->parameter & 0xffff),
393 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
394
395 lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n",
396 cmdwqe->sli4_xritag, status, wcqe->parameter);
397
398 if (cmdwqe->context3) {
399 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
400 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
401 kfree(buf_ptr);
402 cmdwqe->context3 = NULL;
403 }
404 if (pnvme_lsreq->done)
405 pnvme_lsreq->done(pnvme_lsreq, status);
406 else
407 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
408 "6046 nvme cmpl without done call back? "
409 "Data %px DID %x Xri: %x status %x\n",
410 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
411 cmdwqe->sli4_xritag, status);
412 if (ndlp) {
413 lpfc_nlp_put(ndlp);
414 cmdwqe->context1 = NULL;
415 }
416 lpfc_sli_release_iocbq(phba, cmdwqe);
417 }
418
419 static int
420 lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
421 struct lpfc_dmabuf *inp,
422 struct nvmefc_ls_req *pnvme_lsreq,
423 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
424 struct lpfc_wcqe_complete *),
425 struct lpfc_nodelist *ndlp, uint32_t num_entry,
426 uint32_t tmo, uint8_t retry)
427 {
428 struct lpfc_hba *phba = vport->phba;
429 union lpfc_wqe128 *wqe;
430 struct lpfc_iocbq *genwqe;
431 struct ulp_bde64 *bpl;
432 struct ulp_bde64 bde;
433 int i, rc, xmit_len, first_len;
434
435
436 genwqe = lpfc_sli_get_iocbq(phba);
437 if (genwqe == NULL)
438 return 1;
439
440 wqe = &genwqe->wqe;
441
442 memset(wqe, 0, sizeof(union lpfc_wqe));
443
444 genwqe->context3 = (uint8_t *)bmp;
445 genwqe->iocb_flag |= LPFC_IO_NVME_LS;
446
447
448 genwqe->context1 = lpfc_nlp_get(ndlp);
449 genwqe->context2 = (uint8_t *)pnvme_lsreq;
450
451
452 if (!tmo)
453
454 tmo = (3 * phba->fc_ratov);
455
456
457 xmit_len = 0;
458 first_len = 0;
459 bpl = (struct ulp_bde64 *)bmp->virt;
460 for (i = 0; i < num_entry; i++) {
461 bde.tus.w = bpl[i].tus.w;
462 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
463 break;
464 xmit_len += bde.tus.f.bdeSize;
465 if (i == 0)
466 first_len = xmit_len;
467 }
468
469 genwqe->rsvd2 = num_entry;
470 genwqe->hba_wqidx = 0;
471
472
473 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
474 wqe->generic.bde.tus.f.bdeSize = first_len;
475 wqe->generic.bde.addrLow = bpl[0].addrLow;
476 wqe->generic.bde.addrHigh = bpl[0].addrHigh;
477
478
479 wqe->gen_req.request_payload_len = first_len;
480
481
482
483
484 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
485 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
486 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
487 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
488 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
489
490
491 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
492 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
493 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
494
495
496 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
497 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
498 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
499 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
500
501
502 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
503
504
505 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
506
507
508 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
509 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
510 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
511 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
512 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
513
514
515 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
516 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
517
518
519
520 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
521 "6050 Issue GEN REQ WQE to NPORT x%x "
522 "Data: x%x x%x wq:x%px lsreq:x%px bmp:x%px "
523 "xmit:%d 1st:%d\n",
524 ndlp->nlp_DID, genwqe->iotag,
525 vport->port_state,
526 genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
527 genwqe->wqe_cmpl = cmpl;
528 genwqe->iocb_cmpl = NULL;
529 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
530 genwqe->vport = vport;
531 genwqe->retry = retry;
532
533 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
534 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
535
536 rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
537 if (rc) {
538 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
539 "6045 Issue GEN REQ WQE to NPORT x%x "
540 "Data: x%x x%x\n",
541 ndlp->nlp_DID, genwqe->iotag,
542 vport->port_state);
543 lpfc_sli_release_iocbq(phba, genwqe);
544 return 1;
545 }
546 return 0;
547 }
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562 static int
563 lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
564 struct nvme_fc_remote_port *pnvme_rport,
565 struct nvmefc_ls_req *pnvme_lsreq)
566 {
567 int ret = 0;
568 struct lpfc_nvme_lport *lport;
569 struct lpfc_nvme_rport *rport;
570 struct lpfc_vport *vport;
571 struct lpfc_nodelist *ndlp;
572 struct ulp_bde64 *bpl;
573 struct lpfc_dmabuf *bmp;
574 uint16_t ntype, nstate;
575
576
577
578
579
580
581
582
583
584
585
586
587 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
588 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
589 if (unlikely(!lport) || unlikely(!rport))
590 return -EINVAL;
591
592 vport = lport->vport;
593
594 if (vport->load_flag & FC_UNLOADING)
595 return -ENODEV;
596
597
598 ndlp = rport->ndlp;
599 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
600 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
601 "6051 Remoteport x%px, rport has invalid ndlp. "
602 "Failing LS Req\n", pnvme_rport);
603 return -ENODEV;
604 }
605
606
607
608
609 ntype = ndlp->nlp_type;
610 nstate = ndlp->nlp_state;
611 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
612 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
613 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
614 "6088 DID x%06x not ready for "
615 "IO. State x%x, Type x%x\n",
616 pnvme_rport->port_id,
617 ndlp->nlp_state, ndlp->nlp_type);
618 return -ENODEV;
619 }
620 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
621 if (!bmp) {
622
623 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
624 "6044 Could not find node for DID %x\n",
625 pnvme_rport->port_id);
626 return 2;
627 }
628 INIT_LIST_HEAD(&bmp->list);
629 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
630 if (!bmp->virt) {
631 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
632 "6042 Could not find node for DID %x\n",
633 pnvme_rport->port_id);
634 kfree(bmp);
635 return 3;
636 }
637 bpl = (struct ulp_bde64 *)bmp->virt;
638 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
639 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
640 bpl->tus.f.bdeFlags = 0;
641 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
642 bpl->tus.w = le32_to_cpu(bpl->tus.w);
643 bpl++;
644
645 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
646 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
647 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
648 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
649 bpl->tus.w = le32_to_cpu(bpl->tus.w);
650
651
652 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
653 "6149 Issue LS Req to DID 0x%06x lport x%px, "
654 "rport x%px lsreq x%px rqstlen:%d rsplen:%d "
655 "%pad %pad\n",
656 ndlp->nlp_DID, pnvme_lport, pnvme_rport,
657 pnvme_lsreq, pnvme_lsreq->rqstlen,
658 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
659 &pnvme_lsreq->rspdma);
660
661 atomic_inc(&lport->fc4NvmeLsRequests);
662
663
664
665
666 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
667 pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
668 ndlp, 2, 30, 0);
669 if (ret != WQE_SUCCESS) {
670 atomic_inc(&lport->xmt_ls_err);
671 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
672 "6052 EXIT. issue ls wqe failed lport x%px, "
673 "rport x%px lsreq x%px Status %x DID %x\n",
674 pnvme_lport, pnvme_rport, pnvme_lsreq,
675 ret, ndlp->nlp_DID);
676 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
677 kfree(bmp);
678 return ret;
679 }
680
681
682 return ret;
683 }
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698 static void
699 lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
700 struct nvme_fc_remote_port *pnvme_rport,
701 struct nvmefc_ls_req *pnvme_lsreq)
702 {
703 struct lpfc_nvme_lport *lport;
704 struct lpfc_vport *vport;
705 struct lpfc_hba *phba;
706 struct lpfc_nodelist *ndlp;
707 LIST_HEAD(abort_list);
708 struct lpfc_sli_ring *pring;
709 struct lpfc_iocbq *wqe, *next_wqe;
710
711 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
712 if (unlikely(!lport))
713 return;
714 vport = lport->vport;
715 phba = vport->phba;
716
717 if (vport->load_flag & FC_UNLOADING)
718 return;
719
720 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
721 if (!ndlp) {
722 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
723 "6049 Could not find node for DID %x\n",
724 pnvme_rport->port_id);
725 return;
726 }
727
728
729 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
730 "6040 ENTER. lport x%px, rport x%px lsreq x%px rqstlen:%d "
731 "rsplen:%d %pad %pad\n",
732 pnvme_lport, pnvme_rport,
733 pnvme_lsreq, pnvme_lsreq->rqstlen,
734 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
735 &pnvme_lsreq->rspdma);
736
737
738
739
740
741
742 pring = phba->sli4_hba.nvmels_wq->pring;
743 spin_lock_irq(&phba->hbalock);
744 spin_lock(&pring->ring_lock);
745 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
746
747 if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
748 wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
749 list_add_tail(&wqe->dlist, &abort_list);
750 }
751 }
752 spin_unlock(&pring->ring_lock);
753 spin_unlock_irq(&phba->hbalock);
754
755
756 list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
757 atomic_inc(&lport->xmt_ls_abort);
758 spin_lock_irq(&phba->hbalock);
759 list_del_init(&wqe->dlist);
760 lpfc_sli_issue_abort_iotag(phba, pring, wqe);
761 spin_unlock_irq(&phba->hbalock);
762 }
763 }
764
765
766 static inline void
767 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
768 struct lpfc_io_buf *lpfc_ncmd,
769 struct nvmefc_fcp_req *nCmd)
770 {
771 struct lpfc_hba *phba = vport->phba;
772 struct sli4_sge *sgl;
773 union lpfc_wqe128 *wqe;
774 uint32_t *wptr, *dptr;
775
776
777
778
779
780
781
782 wqe = &lpfc_ncmd->cur_iocbq.wqe;
783
784
785
786
787
788
789
790
791 sgl = lpfc_ncmd->dma_sgl;
792 sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
793 if (phba->cfg_nvme_embed_cmd) {
794 sgl->addr_hi = 0;
795 sgl->addr_lo = 0;
796
797
798 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
799 wqe->generic.bde.tus.f.bdeSize = 56;
800 wqe->generic.bde.addrHigh = 0;
801 wqe->generic.bde.addrLow = 64;
802
803
804
805
806
807
808
809
810
811
812
813 wptr = &wqe->words[16];
814 dptr = (uint32_t *)nCmd->cmdaddr;
815 dptr++;
816
817 *wptr++ = *dptr++;
818 *wptr++ = *dptr++;
819 *wptr++ = *dptr++;
820 *wptr++ = *dptr++;
821 dptr++;
822 *wptr++ = *dptr++;
823 *wptr++ = *dptr++;
824 dptr += 8;
825 *wptr++ = *dptr++;
826 *wptr++ = *dptr++;
827 *wptr++ = *dptr++;
828 *wptr++ = *dptr++;
829 *wptr++ = *dptr++;
830 *wptr++ = *dptr++;
831 *wptr++ = *dptr++;
832 *wptr = *dptr;
833 } else {
834 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
835 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
836
837
838 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
839 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
840 wqe->generic.bde.addrHigh = sgl->addr_hi;
841 wqe->generic.bde.addrLow = sgl->addr_lo;
842
843
844 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
845 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
846 }
847
848 sgl++;
849
850
851 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
852 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
853 sgl->word2 = le32_to_cpu(sgl->word2);
854 if (nCmd->sg_cnt)
855 bf_set(lpfc_sli4_sge_last, sgl, 0);
856 else
857 bf_set(lpfc_sli4_sge_last, sgl, 1);
858 sgl->word2 = cpu_to_le32(sgl->word2);
859 sgl->sge_len = cpu_to_le32(nCmd->rsplen);
860 }
861
862 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
863 static void
864 lpfc_nvme_ktime(struct lpfc_hba *phba,
865 struct lpfc_io_buf *lpfc_ncmd)
866 {
867 uint64_t seg1, seg2, seg3, seg4;
868 uint64_t segsum;
869
870 if (!lpfc_ncmd->ts_last_cmd ||
871 !lpfc_ncmd->ts_cmd_start ||
872 !lpfc_ncmd->ts_cmd_wqput ||
873 !lpfc_ncmd->ts_isr_cmpl ||
874 !lpfc_ncmd->ts_data_nvme)
875 return;
876
877 if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
878 return;
879 if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
880 return;
881 if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
882 return;
883 if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
884 return;
885 if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
886 return;
887
888
889
890
891
892
893
894
895
896
897 seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
898 if (seg1 > 5000000)
899 seg1 = 0;
900
901
902 seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
903 segsum = seg2;
904 seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
905 if (segsum > seg3)
906 return;
907 seg3 -= segsum;
908 segsum += seg3;
909
910 seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
911 if (segsum > seg4)
912 return;
913 seg4 -= segsum;
914
915 phba->ktime_data_samples++;
916 phba->ktime_seg1_total += seg1;
917 if (seg1 < phba->ktime_seg1_min)
918 phba->ktime_seg1_min = seg1;
919 else if (seg1 > phba->ktime_seg1_max)
920 phba->ktime_seg1_max = seg1;
921 phba->ktime_seg2_total += seg2;
922 if (seg2 < phba->ktime_seg2_min)
923 phba->ktime_seg2_min = seg2;
924 else if (seg2 > phba->ktime_seg2_max)
925 phba->ktime_seg2_max = seg2;
926 phba->ktime_seg3_total += seg3;
927 if (seg3 < phba->ktime_seg3_min)
928 phba->ktime_seg3_min = seg3;
929 else if (seg3 > phba->ktime_seg3_max)
930 phba->ktime_seg3_max = seg3;
931 phba->ktime_seg4_total += seg4;
932 if (seg4 < phba->ktime_seg4_min)
933 phba->ktime_seg4_min = seg4;
934 else if (seg4 > phba->ktime_seg4_max)
935 phba->ktime_seg4_max = seg4;
936
937 lpfc_ncmd->ts_last_cmd = 0;
938 lpfc_ncmd->ts_cmd_start = 0;
939 lpfc_ncmd->ts_cmd_wqput = 0;
940 lpfc_ncmd->ts_isr_cmpl = 0;
941 lpfc_ncmd->ts_data_nvme = 0;
942 }
943 #endif
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959 static void
960 lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
961 struct lpfc_wcqe_complete *wcqe)
962 {
963 struct lpfc_io_buf *lpfc_ncmd =
964 (struct lpfc_io_buf *)pwqeIn->context1;
965 struct lpfc_vport *vport = pwqeIn->vport;
966 struct nvmefc_fcp_req *nCmd;
967 struct nvme_fc_ersp_iu *ep;
968 struct nvme_fc_cmd_iu *cp;
969 struct lpfc_nodelist *ndlp;
970 struct lpfc_nvme_fcpreq_priv *freqpriv;
971 struct lpfc_nvme_lport *lport;
972 uint32_t code, status, idx;
973 uint16_t cid, sqhd, data;
974 uint32_t *ptr;
975
976
977 if (!lpfc_ncmd) {
978 lpfc_printf_vlog(vport, KERN_ERR,
979 LOG_NODE | LOG_NVME_IOERR,
980 "6071 Null lpfc_ncmd pointer. No "
981 "release, skip completion\n");
982 return;
983 }
984
985
986 spin_lock(&lpfc_ncmd->buf_lock);
987
988 if (!lpfc_ncmd->nvmeCmd) {
989 spin_unlock(&lpfc_ncmd->buf_lock);
990 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
991 "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
992 "nvmeCmd x%px\n",
993 lpfc_ncmd, lpfc_ncmd->nvmeCmd);
994
995
996 lpfc_release_nvme_buf(phba, lpfc_ncmd);
997 return;
998 }
999 nCmd = lpfc_ncmd->nvmeCmd;
1000 status = bf_get(lpfc_wcqe_c_status, wcqe);
1001
1002 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
1003 phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
1004
1005 if (unlikely(status && vport->localport)) {
1006 lport = (struct lpfc_nvme_lport *)vport->localport->private;
1007 if (lport) {
1008 if (bf_get(lpfc_wcqe_c_xb, wcqe))
1009 atomic_inc(&lport->cmpl_fcp_xb);
1010 atomic_inc(&lport->cmpl_fcp_err);
1011 }
1012 }
1013
1014 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
1015 lpfc_ncmd->cur_iocbq.sli4_xritag,
1016 status, wcqe->parameter);
1017
1018
1019
1020
1021 ndlp = lpfc_ncmd->ndlp;
1022 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1023 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1024 "6062 Ignoring NVME cmpl. No ndlp\n");
1025 goto out_err;
1026 }
1027
1028 code = bf_get(lpfc_wcqe_c_code, wcqe);
1029 if (code == CQE_CODE_NVME_ERSP) {
1030
1031 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
1032
1033
1034
1035
1036
1037 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1038 cid = cp->sqe.common.command_id;
1039
1040
1041
1042
1043
1044
1045
1046 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1047
1048
1049 ep->iu_len = cpu_to_be16(8);
1050 ep->rsn = wcqe->parameter;
1051 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1052 ep->rsvd12 = 0;
1053 ptr = (uint32_t *)&ep->cqe.result.u64;
1054 *ptr++ = wcqe->total_data_placed;
1055 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1056 *ptr = (uint32_t)data;
1057 ep->cqe.sq_head = sqhd;
1058 ep->cqe.sq_id = nCmd->sqid;
1059 ep->cqe.command_id = cid;
1060 ep->cqe.status = 0;
1061
1062 lpfc_ncmd->status = IOSTAT_SUCCESS;
1063 lpfc_ncmd->result = 0;
1064 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1065 nCmd->transferred_length = nCmd->payload_length;
1066 } else {
1067 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
1068 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081 switch (lpfc_ncmd->status) {
1082 case IOSTAT_SUCCESS:
1083 nCmd->transferred_length = wcqe->total_data_placed;
1084 nCmd->rcv_rsplen = 0;
1085 nCmd->status = 0;
1086 break;
1087 case IOSTAT_FCP_RSP_ERROR:
1088 nCmd->transferred_length = wcqe->total_data_placed;
1089 nCmd->rcv_rsplen = wcqe->parameter;
1090 nCmd->status = 0;
1091
1092 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
1093 break;
1094 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1095 "6081 NVME Completion Protocol Error: "
1096 "xri %x status x%x result x%x "
1097 "placed x%x\n",
1098 lpfc_ncmd->cur_iocbq.sli4_xritag,
1099 lpfc_ncmd->status, lpfc_ncmd->result,
1100 wcqe->total_data_placed);
1101 break;
1102 case IOSTAT_LOCAL_REJECT:
1103
1104 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1105 lpfc_printf_vlog(vport, KERN_INFO,
1106 LOG_NVME_IOERR,
1107 "6032 Delay Aborted cmd x%px "
1108 "nvme cmd x%px, xri x%x, "
1109 "xb %d\n",
1110 lpfc_ncmd, nCmd,
1111 lpfc_ncmd->cur_iocbq.sli4_xritag,
1112 bf_get(lpfc_wcqe_c_xb, wcqe));
1113
1114 default:
1115 out_err:
1116 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1117 "6072 NVME Completion Error: xri %x "
1118 "status x%x result x%x [x%x] "
1119 "placed x%x\n",
1120 lpfc_ncmd->cur_iocbq.sli4_xritag,
1121 lpfc_ncmd->status, lpfc_ncmd->result,
1122 wcqe->parameter,
1123 wcqe->total_data_placed);
1124 nCmd->transferred_length = 0;
1125 nCmd->rcv_rsplen = 0;
1126 nCmd->status = NVME_SC_INTERNAL;
1127 }
1128 }
1129
1130
1131 if (bf_get(lpfc_wcqe_c_xb, wcqe))
1132 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1133 else
1134 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1135
1136
1137
1138
1139
1140 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1141 if (lpfc_ncmd->ts_cmd_start) {
1142 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1143 lpfc_ncmd->ts_data_nvme = ktime_get_ns();
1144 phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
1145 lpfc_nvme_ktime(phba, lpfc_ncmd);
1146 }
1147 if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) {
1148 uint32_t cpu;
1149 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
1150 cpu = raw_smp_processor_id();
1151 if (cpu < LPFC_CHECK_CPU_CNT) {
1152 if (lpfc_ncmd->cpu != cpu)
1153 lpfc_printf_vlog(vport,
1154 KERN_INFO, LOG_NVME_IOERR,
1155 "6701 CPU Check cmpl: "
1156 "cpu %d expect %d\n",
1157 cpu, lpfc_ncmd->cpu);
1158 phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
1159 }
1160 }
1161 #endif
1162
1163
1164
1165
1166
1167 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1168 freqpriv = nCmd->private;
1169 freqpriv->nvme_buf = NULL;
1170 lpfc_ncmd->nvmeCmd = NULL;
1171 spin_unlock(&lpfc_ncmd->buf_lock);
1172 nCmd->done(nCmd);
1173 } else
1174 spin_unlock(&lpfc_ncmd->buf_lock);
1175
1176
1177 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1178 }
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197 static int
1198 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1199 struct lpfc_io_buf *lpfc_ncmd,
1200 struct lpfc_nodelist *pnode,
1201 struct lpfc_fc4_ctrl_stat *cstat)
1202 {
1203 struct lpfc_hba *phba = vport->phba;
1204 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1205 struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
1206 union lpfc_wqe128 *wqe = &pwqeq->wqe;
1207 uint32_t req_len;
1208
1209 if (!NLP_CHK_NODE_ACT(pnode))
1210 return -EINVAL;
1211
1212
1213
1214
1215
1216 if (nCmd->sg_cnt) {
1217 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1218
1219 memcpy(&wqe->words[7],
1220 &lpfc_iwrite_cmd_template.words[7],
1221 sizeof(uint32_t) * 5);
1222
1223
1224 wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1225
1226
1227 if ((phba->cfg_nvme_enable_fb) &&
1228 (pnode->nlp_flag & NLP_FIRSTBURST)) {
1229 req_len = lpfc_ncmd->nvmeCmd->payload_length;
1230 if (req_len < pnode->nvme_fb_size)
1231 wqe->fcp_iwrite.initial_xfer_len =
1232 req_len;
1233 else
1234 wqe->fcp_iwrite.initial_xfer_len =
1235 pnode->nvme_fb_size;
1236 } else {
1237 wqe->fcp_iwrite.initial_xfer_len = 0;
1238 }
1239 cstat->output_requests++;
1240 } else {
1241
1242 memcpy(&wqe->words[7],
1243 &lpfc_iread_cmd_template.words[7],
1244 sizeof(uint32_t) * 5);
1245
1246
1247 wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1248
1249
1250 wqe->fcp_iread.rsrvd5 = 0;
1251
1252 cstat->input_requests++;
1253 }
1254 } else {
1255
1256 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1257 sizeof(uint32_t) * 8);
1258 cstat->control_requests++;
1259 }
1260
1261 if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
1262 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
1263
1264
1265
1266
1267
1268
1269 bf_set(payload_offset_len, &wqe->fcp_icmd,
1270 (nCmd->rsplen + nCmd->cmdlen));
1271
1272
1273 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1274 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1275 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1276
1277
1278 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1279
1280
1281 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1282
1283
1284
1285 pwqeq->vport = vport;
1286 return 0;
1287 }
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306 static int
1307 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1308 struct lpfc_io_buf *lpfc_ncmd)
1309 {
1310 struct lpfc_hba *phba = vport->phba;
1311 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1312 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1313 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
1314 struct sli4_hybrid_sgl *sgl_xtra = NULL;
1315 struct scatterlist *data_sg;
1316 struct sli4_sge *first_data_sgl;
1317 struct ulp_bde64 *bde;
1318 dma_addr_t physaddr = 0;
1319 uint32_t num_bde = 0;
1320 uint32_t dma_len = 0;
1321 uint32_t dma_offset = 0;
1322 int nseg, i, j;
1323 bool lsp_just_set = false;
1324
1325
1326 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1327
1328
1329
1330
1331
1332 if (nCmd->sg_cnt) {
1333
1334
1335
1336
1337 sgl += 2;
1338
1339 first_data_sgl = sgl;
1340 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1341 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1342 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1343 "6058 Too many sg segments from "
1344 "NVME Transport. Max %d, "
1345 "nvmeIO sg_cnt %d\n",
1346 phba->cfg_nvme_seg_cnt + 1,
1347 lpfc_ncmd->seg_cnt);
1348 lpfc_ncmd->seg_cnt = 0;
1349 return 1;
1350 }
1351
1352
1353
1354
1355
1356
1357
1358 nseg = nCmd->sg_cnt;
1359 data_sg = nCmd->first_sgl;
1360
1361
1362 j = 2;
1363 for (i = 0; i < nseg; i++) {
1364 if (data_sg == NULL) {
1365 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1366 "6059 dptr err %d, nseg %d\n",
1367 i, nseg);
1368 lpfc_ncmd->seg_cnt = 0;
1369 return 1;
1370 }
1371
1372 sgl->word2 = 0;
1373 if ((num_bde + 1) == nseg) {
1374 bf_set(lpfc_sli4_sge_last, sgl, 1);
1375 bf_set(lpfc_sli4_sge_type, sgl,
1376 LPFC_SGE_TYPE_DATA);
1377 } else {
1378 bf_set(lpfc_sli4_sge_last, sgl, 0);
1379
1380
1381 if (!lsp_just_set &&
1382 !((j + 1) % phba->border_sge_num) &&
1383 ((nseg - 1) != i)) {
1384
1385 bf_set(lpfc_sli4_sge_type, sgl,
1386 LPFC_SGE_TYPE_LSP);
1387
1388 sgl_xtra = lpfc_get_sgl_per_hdwq(
1389 phba, lpfc_ncmd);
1390
1391 if (unlikely(!sgl_xtra)) {
1392 lpfc_ncmd->seg_cnt = 0;
1393 return 1;
1394 }
1395 sgl->addr_lo = cpu_to_le32(putPaddrLow(
1396 sgl_xtra->dma_phys_sgl));
1397 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
1398 sgl_xtra->dma_phys_sgl));
1399
1400 } else {
1401 bf_set(lpfc_sli4_sge_type, sgl,
1402 LPFC_SGE_TYPE_DATA);
1403 }
1404 }
1405
1406 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
1407 LPFC_SGE_TYPE_LSP)) {
1408 if ((nseg - 1) == i)
1409 bf_set(lpfc_sli4_sge_last, sgl, 1);
1410
1411 physaddr = data_sg->dma_address;
1412 dma_len = data_sg->length;
1413 sgl->addr_lo = cpu_to_le32(
1414 putPaddrLow(physaddr));
1415 sgl->addr_hi = cpu_to_le32(
1416 putPaddrHigh(physaddr));
1417
1418 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1419 sgl->word2 = cpu_to_le32(sgl->word2);
1420 sgl->sge_len = cpu_to_le32(dma_len);
1421
1422 dma_offset += dma_len;
1423 data_sg = sg_next(data_sg);
1424
1425 sgl++;
1426
1427 lsp_just_set = false;
1428 } else {
1429 sgl->word2 = cpu_to_le32(sgl->word2);
1430
1431 sgl->sge_len = cpu_to_le32(
1432 phba->cfg_sg_dma_buf_size);
1433
1434 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
1435 i = i - 1;
1436
1437 lsp_just_set = true;
1438 }
1439
1440 j++;
1441 }
1442 if (phba->cfg_enable_pbde) {
1443
1444
1445 bde = (struct ulp_bde64 *)
1446 &wqe->words[13];
1447 bde->addrLow = first_data_sgl->addr_lo;
1448 bde->addrHigh = first_data_sgl->addr_hi;
1449 bde->tus.f.bdeSize =
1450 le32_to_cpu(first_data_sgl->sge_len);
1451 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1452 bde->tus.w = cpu_to_le32(bde->tus.w);
1453
1454 } else {
1455 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1456 bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
1457 }
1458
1459 } else {
1460 lpfc_ncmd->seg_cnt = 0;
1461
1462
1463
1464
1465 if (nCmd->payload_length != 0) {
1466 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1467 "6063 NVME DMA Prep Err: sg_cnt %d "
1468 "payload_length x%x\n",
1469 nCmd->sg_cnt, nCmd->payload_length);
1470 return 1;
1471 }
1472 }
1473 return 0;
1474 }
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493 static int
1494 lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1495 struct nvme_fc_remote_port *pnvme_rport,
1496 void *hw_queue_handle,
1497 struct nvmefc_fcp_req *pnvme_fcreq)
1498 {
1499 int ret = 0;
1500 int expedite = 0;
1501 int idx, cpu;
1502 struct lpfc_nvme_lport *lport;
1503 struct lpfc_fc4_ctrl_stat *cstat;
1504 struct lpfc_vport *vport;
1505 struct lpfc_hba *phba;
1506 struct lpfc_nodelist *ndlp;
1507 struct lpfc_io_buf *lpfc_ncmd;
1508 struct lpfc_nvme_rport *rport;
1509 struct lpfc_nvme_qhandle *lpfc_queue_info;
1510 struct lpfc_nvme_fcpreq_priv *freqpriv;
1511 struct nvme_common_command *sqe;
1512 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1513 uint64_t start = 0;
1514 #endif
1515
1516
1517
1518
1519 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1520 if (unlikely(!lport)) {
1521 ret = -EINVAL;
1522 goto out_fail;
1523 }
1524
1525 vport = lport->vport;
1526
1527 if (unlikely(!hw_queue_handle)) {
1528 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1529 "6117 Fail IO, NULL hw_queue_handle\n");
1530 atomic_inc(&lport->xmt_fcp_err);
1531 ret = -EBUSY;
1532 goto out_fail;
1533 }
1534
1535 phba = vport->phba;
1536
1537 if (vport->load_flag & FC_UNLOADING) {
1538 ret = -ENODEV;
1539 goto out_fail;
1540 }
1541
1542 if (unlikely(vport->load_flag & FC_UNLOADING)) {
1543 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1544 "6124 Fail IO, Driver unload\n");
1545 atomic_inc(&lport->xmt_fcp_err);
1546 ret = -ENODEV;
1547 goto out_fail;
1548 }
1549
1550 freqpriv = pnvme_fcreq->private;
1551 if (unlikely(!freqpriv)) {
1552 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1553 "6158 Fail IO, NULL request data\n");
1554 atomic_inc(&lport->xmt_fcp_err);
1555 ret = -EINVAL;
1556 goto out_fail;
1557 }
1558
1559 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1560 if (phba->ktime_on)
1561 start = ktime_get_ns();
1562 #endif
1563 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1564 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1565
1566
1567
1568
1569
1570 ndlp = rport->ndlp;
1571 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1572 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1573 "6053 Busy IO, ndlp not ready: rport x%px "
1574 "ndlp x%px, DID x%06x\n",
1575 rport, ndlp, pnvme_rport->port_id);
1576 atomic_inc(&lport->xmt_fcp_err);
1577 ret = -EBUSY;
1578 goto out_fail;
1579 }
1580
1581
1582 if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1583 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1584 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1585 "6036 Fail IO, DID x%06x not ready for "
1586 "IO. State x%x, Type x%x Flg x%x\n",
1587 pnvme_rport->port_id,
1588 ndlp->nlp_state, ndlp->nlp_type,
1589 ndlp->upcall_flags);
1590 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1591 ret = -EBUSY;
1592 goto out_fail;
1593
1594 }
1595
1596
1597
1598
1599
1600 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1601 sqe = &((struct nvme_fc_cmd_iu *)
1602 pnvme_fcreq->cmdaddr)->sqe.common;
1603 if (sqe->opcode == nvme_admin_keep_alive)
1604 expedite = 1;
1605 }
1606
1607
1608
1609
1610 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1611 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1612 !expedite) {
1613 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1614 "6174 Fail IO, ndlp qdepth exceeded: "
1615 "idx %d DID %x pend %d qdepth %d\n",
1616 lpfc_queue_info->index, ndlp->nlp_DID,
1617 atomic_read(&ndlp->cmd_pending),
1618 ndlp->cmd_qdepth);
1619 atomic_inc(&lport->xmt_fcp_qdepth);
1620 ret = -EBUSY;
1621 goto out_fail;
1622 }
1623 }
1624
1625
1626 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1627 idx = lpfc_queue_info->index;
1628 } else {
1629 cpu = raw_smp_processor_id();
1630 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
1631 }
1632
1633 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
1634 if (lpfc_ncmd == NULL) {
1635 atomic_inc(&lport->xmt_fcp_noxri);
1636 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1637 "6065 Fail IO, driver buffer pool is empty: "
1638 "idx %d DID %x\n",
1639 lpfc_queue_info->index, ndlp->nlp_DID);
1640 ret = -EBUSY;
1641 goto out_fail;
1642 }
1643 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1644 if (start) {
1645 lpfc_ncmd->ts_cmd_start = start;
1646 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1647 } else {
1648 lpfc_ncmd->ts_cmd_start = 0;
1649 }
1650 #endif
1651
1652
1653
1654
1655
1656
1657
1658 freqpriv->nvme_buf = lpfc_ncmd;
1659 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1660 lpfc_ncmd->ndlp = ndlp;
1661 lpfc_ncmd->qidx = lpfc_queue_info->qidx;
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671 lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1672 cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
1673
1674 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1675 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1676 if (ret) {
1677 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1678 "6175 Fail IO, Prep DMA: "
1679 "idx %d DID %x\n",
1680 lpfc_queue_info->index, ndlp->nlp_DID);
1681 atomic_inc(&lport->xmt_fcp_err);
1682 ret = -ENOMEM;
1683 goto out_free_nvme_buf;
1684 }
1685
1686 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1687 lpfc_ncmd->cur_iocbq.sli4_xritag,
1688 lpfc_queue_info->index, ndlp->nlp_DID);
1689
1690 ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
1691 if (ret) {
1692 atomic_inc(&lport->xmt_fcp_wqerr);
1693 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1694 "6113 Fail IO, Could not issue WQE err %x "
1695 "sid: x%x did: x%x oxid: x%x\n",
1696 ret, vport->fc_myDID, ndlp->nlp_DID,
1697 lpfc_ncmd->cur_iocbq.sli4_xritag);
1698 goto out_free_nvme_buf;
1699 }
1700
1701 if (phba->cfg_xri_rebalancing)
1702 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1703
1704 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1705 if (lpfc_ncmd->ts_cmd_start)
1706 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1707
1708 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
1709 cpu = raw_smp_processor_id();
1710 if (cpu < LPFC_CHECK_CPU_CNT) {
1711 lpfc_ncmd->cpu = cpu;
1712 if (idx != cpu)
1713 lpfc_printf_vlog(vport,
1714 KERN_INFO, LOG_NVME_IOERR,
1715 "6702 CPU Check cmd: "
1716 "cpu %d wq %d\n",
1717 lpfc_ncmd->cpu,
1718 lpfc_queue_info->index);
1719 phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++;
1720 }
1721 }
1722 #endif
1723 return 0;
1724
1725 out_free_nvme_buf:
1726 if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1727 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1728 cstat->output_requests--;
1729 else
1730 cstat->input_requests--;
1731 } else
1732 cstat->control_requests--;
1733 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1734 out_fail:
1735 return ret;
1736 }
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749 void
1750 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1751 struct lpfc_wcqe_complete *abts_cmpl)
1752 {
1753 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1754 "6145 ABORT_XRI_CN completing on rpi x%x "
1755 "original iotag x%x, abort cmd iotag x%x "
1756 "req_tag x%x, status x%x, hwstatus x%x\n",
1757 cmdiocb->iocb.un.acxri.abortContextTag,
1758 cmdiocb->iocb.un.acxri.abortIoTag,
1759 cmdiocb->iotag,
1760 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1761 bf_get(lpfc_wcqe_c_status, abts_cmpl),
1762 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1763 lpfc_sli_release_iocbq(phba, cmdiocb);
1764 }
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783 static void
1784 lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1785 struct nvme_fc_remote_port *pnvme_rport,
1786 void *hw_queue_handle,
1787 struct nvmefc_fcp_req *pnvme_fcreq)
1788 {
1789 struct lpfc_nvme_lport *lport;
1790 struct lpfc_vport *vport;
1791 struct lpfc_hba *phba;
1792 struct lpfc_io_buf *lpfc_nbuf;
1793 struct lpfc_iocbq *abts_buf;
1794 struct lpfc_iocbq *nvmereq_wqe;
1795 struct lpfc_nvme_fcpreq_priv *freqpriv;
1796 union lpfc_wqe128 *abts_wqe;
1797 unsigned long flags;
1798 int ret_val;
1799
1800
1801
1802
1803 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1804 if (unlikely(!lport))
1805 return;
1806
1807 vport = lport->vport;
1808
1809 if (unlikely(!hw_queue_handle)) {
1810 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1811 "6129 Fail Abort, HW Queue Handle NULL.\n");
1812 return;
1813 }
1814
1815 phba = vport->phba;
1816 freqpriv = pnvme_fcreq->private;
1817
1818 if (unlikely(!freqpriv))
1819 return;
1820 if (vport->load_flag & FC_UNLOADING)
1821 return;
1822
1823
1824 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1825 "6002 Abort Request to rport DID x%06x "
1826 "for nvme_fc_req x%px\n",
1827 pnvme_rport->port_id,
1828 pnvme_fcreq);
1829
1830
1831
1832
1833 spin_lock_irqsave(&phba->hbalock, flags);
1834
1835 if (phba->hba_flag & HBA_IOQ_FLUSH) {
1836 spin_unlock_irqrestore(&phba->hbalock, flags);
1837 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1838 "6139 Driver in reset cleanup - flushing "
1839 "NVME Req now. hba_flag x%x\n",
1840 phba->hba_flag);
1841 return;
1842 }
1843
1844 lpfc_nbuf = freqpriv->nvme_buf;
1845 if (!lpfc_nbuf) {
1846 spin_unlock_irqrestore(&phba->hbalock, flags);
1847 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1848 "6140 NVME IO req has no matching lpfc nvme "
1849 "io buffer. Skipping abort req.\n");
1850 return;
1851 } else if (!lpfc_nbuf->nvmeCmd) {
1852 spin_unlock_irqrestore(&phba->hbalock, flags);
1853 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1854 "6141 lpfc NVME IO req has no nvme_fcreq "
1855 "io buffer. Skipping abort req.\n");
1856 return;
1857 }
1858 nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1859
1860
1861 spin_lock(&lpfc_nbuf->buf_lock);
1862
1863
1864
1865
1866
1867
1868
1869
1870 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1871 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1872 "6143 NVME req mismatch: "
1873 "lpfc_nbuf x%px nvmeCmd x%px, "
1874 "pnvme_fcreq x%px. Skipping Abort xri x%x\n",
1875 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1876 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1877 goto out_unlock;
1878 }
1879
1880
1881 if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1882 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1883 "6142 NVME IO req x%px not queued - skipping "
1884 "abort req xri x%x\n",
1885 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1886 goto out_unlock;
1887 }
1888
1889 atomic_inc(&lport->xmt_fcp_abort);
1890 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1891 nvmereq_wqe->sli4_xritag,
1892 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1893
1894
1895 if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1896 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1897 "6144 Outstanding NVME I/O Abort Request "
1898 "still pending on nvme_fcreq x%px, "
1899 "lpfc_ncmd %px xri x%x\n",
1900 pnvme_fcreq, lpfc_nbuf,
1901 nvmereq_wqe->sli4_xritag);
1902 goto out_unlock;
1903 }
1904
1905 abts_buf = __lpfc_sli_get_iocbq(phba);
1906 if (!abts_buf) {
1907 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1908 "6136 No available abort wqes. Skipping "
1909 "Abts req for nvme_fcreq x%px xri x%x\n",
1910 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1911 goto out_unlock;
1912 }
1913
1914
1915 nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
1916
1917
1918 abts_wqe = &abts_buf->wqe;
1919
1920
1921
1922
1923 memset(abts_wqe, 0, sizeof(*abts_wqe));
1924 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1925
1926
1927 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1928 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
1929 nvmereq_wqe->iocb.ulpClass);
1930
1931
1932
1933
1934 abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
1935
1936
1937 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
1938 abts_buf->iotag);
1939
1940
1941 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1942 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1943
1944
1945 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
1946 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
1947 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1948
1949
1950 abts_buf->iocb_flag |= LPFC_IO_NVME;
1951 abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
1952 abts_buf->vport = vport;
1953 abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
1954 ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf);
1955 spin_unlock(&lpfc_nbuf->buf_lock);
1956 spin_unlock_irqrestore(&phba->hbalock, flags);
1957 if (ret_val) {
1958 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1959 "6137 Failed abts issue_wqe with status x%x "
1960 "for nvme_fcreq x%px.\n",
1961 ret_val, pnvme_fcreq);
1962 lpfc_sli_release_iocbq(phba, abts_buf);
1963 return;
1964 }
1965
1966 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1967 "6138 Transport Abort NVME Request Issued for "
1968 "ox_id x%x on reqtag x%x\n",
1969 nvmereq_wqe->sli4_xritag,
1970 abts_buf->iotag);
1971 return;
1972
1973 out_unlock:
1974 spin_unlock(&lpfc_nbuf->buf_lock);
1975 spin_unlock_irqrestore(&phba->hbalock, flags);
1976 return;
1977 }
1978
1979
1980 static struct nvme_fc_port_template lpfc_nvme_template = {
1981
1982 .localport_delete = lpfc_nvme_localport_delete,
1983 .remoteport_delete = lpfc_nvme_remoteport_delete,
1984 .create_queue = lpfc_nvme_create_queue,
1985 .delete_queue = lpfc_nvme_delete_queue,
1986 .ls_req = lpfc_nvme_ls_req,
1987 .fcp_io = lpfc_nvme_fcp_io_submit,
1988 .ls_abort = lpfc_nvme_ls_abort,
1989 .fcp_abort = lpfc_nvme_fcp_abort,
1990
1991 .max_hw_queues = 1,
1992 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1993 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1994 .dma_boundary = 0xFFFFFFFF,
1995
1996
1997
1998
1999 .local_priv_sz = sizeof(struct lpfc_nvme_lport),
2000 .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
2001 .lsrqst_priv_sz = 0,
2002 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
2003 };
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016 static struct lpfc_io_buf *
2017 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
2018 int idx, int expedite)
2019 {
2020 struct lpfc_io_buf *lpfc_ncmd;
2021 struct lpfc_sli4_hdw_queue *qp;
2022 struct sli4_sge *sgl;
2023 struct lpfc_iocbq *pwqeq;
2024 union lpfc_wqe128 *wqe;
2025
2026 lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
2027
2028 if (lpfc_ncmd) {
2029 pwqeq = &(lpfc_ncmd->cur_iocbq);
2030 wqe = &pwqeq->wqe;
2031
2032
2033
2034
2035 pwqeq->iocb_flag = LPFC_IO_NVME;
2036 pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
2037 lpfc_ncmd->start_time = jiffies;
2038 lpfc_ncmd->flags = 0;
2039
2040
2041
2042
2043
2044 sgl = lpfc_ncmd->dma_sgl;
2045 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2046 bf_set(lpfc_sli4_sge_last, sgl, 0);
2047 sgl->word2 = cpu_to_le32(sgl->word2);
2048
2049
2050
2051 memset(wqe, 0, sizeof(union lpfc_wqe));
2052
2053 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
2054 atomic_inc(&ndlp->cmd_pending);
2055 lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
2056 }
2057
2058 } else {
2059 qp = &phba->sli4_hba.hdwq[idx];
2060 qp->empty_io_bufs++;
2061 }
2062
2063 return lpfc_ncmd;
2064 }
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076 static void
2077 lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
2078 {
2079 struct lpfc_sli4_hdw_queue *qp;
2080 unsigned long iflag = 0;
2081
2082 if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
2083 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
2084
2085 lpfc_ncmd->ndlp = NULL;
2086 lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
2087
2088 qp = lpfc_ncmd->hdwq;
2089 if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
2090 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2091 "6310 XB release deferred for "
2092 "ox_id x%x on reqtag x%x\n",
2093 lpfc_ncmd->cur_iocbq.sli4_xritag,
2094 lpfc_ncmd->cur_iocbq.iotag);
2095
2096 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
2097 list_add_tail(&lpfc_ncmd->list,
2098 &qp->lpfc_abts_io_buf_list);
2099 qp->abts_nvme_io_bufs++;
2100 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
2101 } else
2102 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
2103 }
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121 int
2122 lpfc_nvme_create_localport(struct lpfc_vport *vport)
2123 {
2124 int ret = 0;
2125 struct lpfc_hba *phba = vport->phba;
2126 struct nvme_fc_port_info nfcp_info;
2127 struct nvme_fc_local_port *localport;
2128 struct lpfc_nvme_lport *lport;
2129
2130
2131
2132
2133 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2134 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2135 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2136 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2137
2138
2139
2140
2141
2142 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2143
2144
2145 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
2146 lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
2147 else
2148 lpfc_nvme_template.max_hw_queues =
2149 phba->sli4_hba.num_present_cpu;
2150
2151 if (!IS_ENABLED(CONFIG_NVME_FC))
2152 return ret;
2153
2154
2155
2156
2157
2158 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2159 &vport->phba->pcidev->dev, &localport);
2160 if (!ret) {
2161 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2162 "6005 Successfully registered local "
2163 "NVME port num %d, localP x%px, private "
2164 "x%px, sg_seg %d\n",
2165 localport->port_num, localport,
2166 localport->private,
2167 lpfc_nvme_template.max_sgl_segments);
2168
2169
2170 lport = (struct lpfc_nvme_lport *)localport->private;
2171 vport->localport = localport;
2172 lport->vport = vport;
2173 vport->nvmei_support = 1;
2174
2175 atomic_set(&lport->xmt_fcp_noxri, 0);
2176 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2177 atomic_set(&lport->xmt_fcp_qdepth, 0);
2178 atomic_set(&lport->xmt_fcp_err, 0);
2179 atomic_set(&lport->xmt_fcp_wqerr, 0);
2180 atomic_set(&lport->xmt_fcp_abort, 0);
2181 atomic_set(&lport->xmt_ls_abort, 0);
2182 atomic_set(&lport->xmt_ls_err, 0);
2183 atomic_set(&lport->cmpl_fcp_xb, 0);
2184 atomic_set(&lport->cmpl_fcp_err, 0);
2185 atomic_set(&lport->cmpl_ls_xb, 0);
2186 atomic_set(&lport->cmpl_ls_err, 0);
2187 atomic_set(&lport->fc4NvmeLsRequests, 0);
2188 atomic_set(&lport->fc4NvmeLsCmpls, 0);
2189 }
2190
2191 return ret;
2192 }
2193
2194 #if (IS_ENABLED(CONFIG_NVME_FC))
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205 static void
2206 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2207 struct lpfc_nvme_lport *lport,
2208 struct completion *lport_unreg_cmp)
2209 {
2210 u32 wait_tmo;
2211 int ret, i, pending = 0;
2212 struct lpfc_sli_ring *pring;
2213 struct lpfc_hba *phba = vport->phba;
2214
2215
2216
2217
2218
2219 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2220 while (true) {
2221 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2222 if (unlikely(!ret)) {
2223 pending = 0;
2224 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2225 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2226 if (!pring)
2227 continue;
2228 if (pring->txcmplq_cnt)
2229 pending += pring->txcmplq_cnt;
2230 }
2231 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
2232 "6176 Lport x%px Localport x%px wait "
2233 "timed out. Pending %d. Renewing.\n",
2234 lport, vport->localport, pending);
2235 continue;
2236 }
2237 break;
2238 }
2239 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2240 "6177 Lport x%px Localport x%px Complete Success\n",
2241 lport, vport->localport);
2242 }
2243 #endif
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255 void
2256 lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2257 {
2258 #if (IS_ENABLED(CONFIG_NVME_FC))
2259 struct nvme_fc_local_port *localport;
2260 struct lpfc_nvme_lport *lport;
2261 int ret;
2262 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2263
2264 if (vport->nvmei_support == 0)
2265 return;
2266
2267 localport = vport->localport;
2268 lport = (struct lpfc_nvme_lport *)localport->private;
2269
2270 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2271 "6011 Destroying NVME localport x%px\n",
2272 localport);
2273
2274
2275
2276
2277 lport->lport_unreg_cmp = &lport_unreg_cmp;
2278 ret = nvme_fc_unregister_localport(localport);
2279
2280
2281
2282
2283 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2284 vport->localport = NULL;
2285
2286
2287
2288
2289
2290 vport->nvmei_support = 0;
2291 if (ret == 0) {
2292 lpfc_printf_vlog(vport,
2293 KERN_INFO, LOG_NVME_DISC,
2294 "6009 Unregistered lport Success\n");
2295 } else {
2296 lpfc_printf_vlog(vport,
2297 KERN_INFO, LOG_NVME_DISC,
2298 "6010 Unregistered lport "
2299 "Failed, status x%x\n",
2300 ret);
2301 }
2302 #endif
2303 }
2304
2305 void
2306 lpfc_nvme_update_localport(struct lpfc_vport *vport)
2307 {
2308 #if (IS_ENABLED(CONFIG_NVME_FC))
2309 struct nvme_fc_local_port *localport;
2310 struct lpfc_nvme_lport *lport;
2311
2312 localport = vport->localport;
2313 if (!localport) {
2314 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2315 "6710 Update NVME fail. No localport\n");
2316 return;
2317 }
2318 lport = (struct lpfc_nvme_lport *)localport->private;
2319 if (!lport) {
2320 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2321 "6171 Update NVME fail. localP x%px, No lport\n",
2322 localport);
2323 return;
2324 }
2325 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2326 "6012 Update NVME lport x%px did x%x\n",
2327 localport, vport->fc_myDID);
2328
2329 localport->port_id = vport->fc_myDID;
2330 if (localport->port_id == 0)
2331 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2332 else
2333 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2334
2335 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2336 "6030 bound lport x%px to DID x%06x\n",
2337 lport, localport->port_id);
2338 #endif
2339 }
2340
2341 int
2342 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2343 {
2344 #if (IS_ENABLED(CONFIG_NVME_FC))
2345 int ret = 0;
2346 struct nvme_fc_local_port *localport;
2347 struct lpfc_nvme_lport *lport;
2348 struct lpfc_nvme_rport *rport;
2349 struct lpfc_nvme_rport *oldrport;
2350 struct nvme_fc_remote_port *remote_port;
2351 struct nvme_fc_port_info rpinfo;
2352 struct lpfc_nodelist *prev_ndlp = NULL;
2353
2354 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2355 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2356 ndlp->nlp_DID, ndlp->nlp_type);
2357
2358 localport = vport->localport;
2359 if (!localport)
2360 return 0;
2361
2362 lport = (struct lpfc_nvme_lport *)localport->private;
2363
2364
2365
2366
2367
2368
2369
2370 memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2371 rpinfo.port_id = ndlp->nlp_DID;
2372 if (ndlp->nlp_type & NLP_NVME_TARGET)
2373 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2374 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2375 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2376
2377 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2378 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2379
2380 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2381 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2382
2383 spin_lock_irq(&vport->phba->hbalock);
2384 oldrport = lpfc_ndlp_get_nrport(ndlp);
2385 if (oldrport) {
2386 prev_ndlp = oldrport->ndlp;
2387 spin_unlock_irq(&vport->phba->hbalock);
2388 } else {
2389 spin_unlock_irq(&vport->phba->hbalock);
2390 lpfc_nlp_get(ndlp);
2391 }
2392
2393 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2394 if (!ret) {
2395
2396
2397
2398
2399
2400
2401
2402 spin_lock_irq(&vport->phba->hbalock);
2403 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2404 spin_unlock_irq(&vport->phba->hbalock);
2405 rport = remote_port->private;
2406 if (oldrport) {
2407
2408
2409
2410 if (oldrport == remote_port->private) {
2411
2412
2413
2414 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2415 LOG_NVME_DISC,
2416 "6014 Rebind lport to current "
2417 "remoteport x%px wwpn 0x%llx, "
2418 "Data: x%x x%x x%px x%px x%x "
2419 " x%06x\n",
2420 remote_port,
2421 remote_port->port_name,
2422 remote_port->port_id,
2423 remote_port->port_role,
2424 oldrport->ndlp,
2425 ndlp,
2426 ndlp->nlp_type,
2427 ndlp->nlp_DID);
2428
2429
2430
2431
2432
2433
2434
2435 if (prev_ndlp == ndlp)
2436 return 0;
2437
2438 }
2439
2440
2441
2442
2443
2444 spin_lock_irq(&vport->phba->hbalock);
2445 ndlp->nrport = NULL;
2446 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2447 spin_unlock_irq(&vport->phba->hbalock);
2448 rport->ndlp = NULL;
2449 rport->remoteport = NULL;
2450
2451
2452
2453
2454
2455 if (prev_ndlp && prev_ndlp != ndlp) {
2456 if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
2457 (!prev_ndlp->nrport))
2458 lpfc_nlp_put(prev_ndlp);
2459 }
2460 }
2461
2462
2463 rport->remoteport = remote_port;
2464 rport->lport = lport;
2465 rport->ndlp = ndlp;
2466 spin_lock_irq(&vport->phba->hbalock);
2467 ndlp->nrport = rport;
2468 spin_unlock_irq(&vport->phba->hbalock);
2469 lpfc_printf_vlog(vport, KERN_INFO,
2470 LOG_NVME_DISC | LOG_NODE,
2471 "6022 Bind lport x%px to remoteport x%px "
2472 "rport x%px WWNN 0x%llx, "
2473 "Rport WWPN 0x%llx DID "
2474 "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
2475 lport, remote_port, rport,
2476 rpinfo.node_name, rpinfo.port_name,
2477 rpinfo.port_id, rpinfo.port_role,
2478 ndlp, prev_ndlp);
2479 } else {
2480 lpfc_printf_vlog(vport, KERN_ERR,
2481 LOG_NVME_DISC | LOG_NODE,
2482 "6031 RemotePort Registration failed "
2483 "err: %d, DID x%06x\n",
2484 ret, ndlp->nlp_DID);
2485 }
2486
2487 return ret;
2488 #else
2489 return 0;
2490 #endif
2491 }
2492
2493
2494
2495
2496
2497
2498
2499
2500 void
2501 lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2502 {
2503 #if (IS_ENABLED(CONFIG_NVME_FC))
2504 struct lpfc_nvme_rport *nrport;
2505 struct nvme_fc_remote_port *remoteport = NULL;
2506
2507 spin_lock_irq(&vport->phba->hbalock);
2508 nrport = lpfc_ndlp_get_nrport(ndlp);
2509 if (nrport)
2510 remoteport = nrport->remoteport;
2511 spin_unlock_irq(&vport->phba->hbalock);
2512
2513 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2514 "6170 Rescan NPort DID x%06x type x%x "
2515 "state x%x nrport x%px remoteport x%px\n",
2516 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
2517 nrport, remoteport);
2518
2519 if (!nrport || !remoteport)
2520 goto rescan_exit;
2521
2522
2523 if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
2524 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
2525 nvme_fc_rescan_remoteport(remoteport);
2526
2527 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2528 "6172 NVME rescanned DID x%06x "
2529 "port_state x%x\n",
2530 ndlp->nlp_DID, remoteport->port_state);
2531 }
2532 return;
2533 rescan_exit:
2534 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2535 "6169 Skip NVME Rport Rescan, NVME remoteport "
2536 "unregistered\n");
2537 #endif
2538 }
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552 void
2553 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2554 {
2555 #if (IS_ENABLED(CONFIG_NVME_FC))
2556 int ret;
2557 struct nvme_fc_local_port *localport;
2558 struct lpfc_nvme_lport *lport;
2559 struct lpfc_nvme_rport *rport;
2560 struct nvme_fc_remote_port *remoteport = NULL;
2561
2562 localport = vport->localport;
2563
2564
2565
2566
2567 if (!localport)
2568 return;
2569
2570 lport = (struct lpfc_nvme_lport *)localport->private;
2571 if (!lport)
2572 goto input_err;
2573
2574 spin_lock_irq(&vport->phba->hbalock);
2575 rport = lpfc_ndlp_get_nrport(ndlp);
2576 if (rport)
2577 remoteport = rport->remoteport;
2578 spin_unlock_irq(&vport->phba->hbalock);
2579 if (!remoteport)
2580 goto input_err;
2581
2582 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2583 "6033 Unreg nvme remoteport x%px, portname x%llx, "
2584 "port_id x%06x, portstate x%x port type x%x\n",
2585 remoteport, remoteport->port_name,
2586 remoteport->port_id, remoteport->port_state,
2587 ndlp->nlp_type);
2588
2589
2590
2591
2592
2593 if (ndlp->nlp_type & NLP_NVME_TARGET) {
2594
2595
2596
2597 ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
2598
2599
2600
2601
2602
2603
2604 if (vport->load_flag & FC_UNLOADING)
2605 (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2606
2607 ret = nvme_fc_unregister_remoteport(remoteport);
2608 if (ret != 0) {
2609 lpfc_nlp_put(ndlp);
2610 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2611 "6167 NVME unregister failed %d "
2612 "port_state x%x\n",
2613 ret, remoteport->port_state);
2614 }
2615 }
2616 return;
2617
2618 input_err:
2619 #endif
2620 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2621 "6168 State error: lport x%px, rport x%px FCID x%06x\n",
2622 vport->localport, ndlp->rport, ndlp->nlp_DID);
2623 }
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635 void
2636 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2637 struct sli4_wcqe_xri_aborted *axri,
2638 struct lpfc_io_buf *lpfc_ncmd)
2639 {
2640 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2641 struct nvmefc_fcp_req *nvme_cmd = NULL;
2642 struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
2643
2644
2645 if (ndlp)
2646 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2647
2648 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2649 "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
2650 "xri released\n",
2651 lpfc_ncmd->nvmeCmd, xri,
2652 lpfc_ncmd->cur_iocbq.iotag);
2653
2654
2655
2656
2657
2658 if (lpfc_ncmd->nvmeCmd) {
2659 nvme_cmd = lpfc_ncmd->nvmeCmd;
2660 nvme_cmd->done(nvme_cmd);
2661 lpfc_ncmd->nvmeCmd = NULL;
2662 }
2663 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2664 }
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676 void
2677 lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2678 {
2679 struct lpfc_sli_ring *pring;
2680 u32 i, wait_cnt = 0;
2681
2682 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
2683 return;
2684
2685
2686
2687
2688 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2689 if (!phba->sli4_hba.hdwq[i].io_wq)
2690 continue;
2691 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2692
2693 if (!pring)
2694 continue;
2695
2696
2697 while (!list_empty(&pring->txcmplq)) {
2698 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2699 wait_cnt++;
2700
2701
2702
2703
2704 if ((wait_cnt % 1000) == 0) {
2705 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2706 "6178 NVME IO not empty, "
2707 "cnt %d\n", wait_cnt);
2708 }
2709 }
2710 }
2711 }
2712
2713 void
2714 lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
2715 {
2716 #if (IS_ENABLED(CONFIG_NVME_FC))
2717 struct lpfc_io_buf *lpfc_ncmd;
2718 struct nvmefc_fcp_req *nCmd;
2719 struct lpfc_nvme_fcpreq_priv *freqpriv;
2720
2721 if (!pwqeIn->context1) {
2722 lpfc_sli_release_iocbq(phba, pwqeIn);
2723 return;
2724 }
2725
2726 if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
2727 CMD_ABORT_XRI_CX) {
2728 lpfc_sli_release_iocbq(phba, pwqeIn);
2729 return;
2730 }
2731 lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
2732
2733 spin_lock(&lpfc_ncmd->buf_lock);
2734 if (!lpfc_ncmd->nvmeCmd) {
2735 spin_unlock(&lpfc_ncmd->buf_lock);
2736 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2737 return;
2738 }
2739
2740 nCmd = lpfc_ncmd->nvmeCmd;
2741 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2742 "6194 NVME Cancel xri %x\n",
2743 lpfc_ncmd->cur_iocbq.sli4_xritag);
2744
2745 nCmd->transferred_length = 0;
2746 nCmd->rcv_rsplen = 0;
2747 nCmd->status = NVME_SC_INTERNAL;
2748 freqpriv = nCmd->private;
2749 freqpriv->nvme_buf = NULL;
2750 lpfc_ncmd->nvmeCmd = NULL;
2751
2752 spin_unlock(&lpfc_ncmd->buf_lock);
2753 nCmd->done(nCmd);
2754
2755
2756 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2757 #endif
2758 }