root/drivers/scsi/lpfc/lpfc_nvme.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. lpfc_nvme_cmd_template
  2. lpfc_nvme_create_queue
  3. lpfc_nvme_delete_queue
  4. lpfc_nvme_localport_delete
  5. lpfc_nvme_remoteport_delete
  6. lpfc_nvme_cmpl_gen_req
  7. lpfc_nvme_gen_req
  8. lpfc_nvme_ls_req
  9. lpfc_nvme_ls_abort
  10. lpfc_nvme_adj_fcp_sgls
  11. lpfc_nvme_ktime
  12. lpfc_nvme_io_cmd_wqe_cmpl
  13. lpfc_nvme_prep_io_cmd
  14. lpfc_nvme_prep_io_dma
  15. lpfc_nvme_fcp_io_submit
  16. lpfc_nvme_abort_fcreq_cmpl
  17. lpfc_nvme_fcp_abort
  18. lpfc_get_nvme_buf
  19. lpfc_release_nvme_buf
  20. lpfc_nvme_create_localport
  21. lpfc_nvme_lport_unreg_wait
  22. lpfc_nvme_destroy_localport
  23. lpfc_nvme_update_localport
  24. lpfc_nvme_register_port
  25. lpfc_nvme_rescan_port
  26. lpfc_nvme_unregister_port
  27. lpfc_sli4_nvme_xri_aborted
  28. lpfc_nvme_wait_for_io_drain
  29. lpfc_nvme_cancel_iocb

   1 /*******************************************************************
   2  * This file is part of the Emulex Linux Device Driver for         *
   3  * Fibre Channel Host Bus Adapters.                                *
   4  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
   5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
   6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7  * EMULEX and SLI are trademarks of Emulex.                        *
   8  * www.broadcom.com                                                *
   9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10  *                                                                 *
  11  * This program is free software; you can redistribute it and/or   *
  12  * modify it under the terms of version 2 of the GNU General       *
  13  * Public License as published by the Free Software Foundation.    *
  14  * This program is distributed in the hope that it will be useful. *
  15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20  * more details, a copy of which can be found in the file COPYING  *
  21  * included with this package.                                     *
  22  ********************************************************************/
  23 #include <linux/pci.h>
  24 #include <linux/slab.h>
  25 #include <linux/interrupt.h>
  26 #include <linux/delay.h>
  27 #include <asm/unaligned.h>
  28 #include <linux/crc-t10dif.h>
  29 #include <net/checksum.h>
  30 
  31 #include <scsi/scsi.h>
  32 #include <scsi/scsi_device.h>
  33 #include <scsi/scsi_eh.h>
  34 #include <scsi/scsi_host.h>
  35 #include <scsi/scsi_tcq.h>
  36 #include <scsi/scsi_transport_fc.h>
  37 #include <scsi/fc/fc_fs.h>
  38 
  39 #include <linux/nvme.h>
  40 #include <linux/nvme-fc-driver.h>
  41 #include <linux/nvme-fc.h>
  42 #include "lpfc_version.h"
  43 #include "lpfc_hw4.h"
  44 #include "lpfc_hw.h"
  45 #include "lpfc_sli.h"
  46 #include "lpfc_sli4.h"
  47 #include "lpfc_nl.h"
  48 #include "lpfc_disc.h"
  49 #include "lpfc.h"
  50 #include "lpfc_nvme.h"
  51 #include "lpfc_scsi.h"
  52 #include "lpfc_logmsg.h"
  53 #include "lpfc_crtn.h"
  54 #include "lpfc_vport.h"
  55 #include "lpfc_debugfs.h"
  56 
  57 /* NVME initiator-based functions */
  58 
  59 static struct lpfc_io_buf *
  60 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
  61                   int idx, int expedite);
  62 
  63 static void
  64 lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
  65 
  66 static struct nvme_fc_port_template lpfc_nvme_template;
  67 
  68 static union lpfc_wqe128 lpfc_iread_cmd_template;
  69 static union lpfc_wqe128 lpfc_iwrite_cmd_template;
  70 static union lpfc_wqe128 lpfc_icmnd_cmd_template;
  71 
  72 /* Setup WQE templates for NVME IOs */
  73 void
  74 lpfc_nvme_cmd_template(void)
  75 {
  76         union lpfc_wqe128 *wqe;
  77 
  78         /* IREAD template */
  79         wqe = &lpfc_iread_cmd_template;
  80         memset(wqe, 0, sizeof(union lpfc_wqe128));
  81 
  82         /* Word 0, 1, 2 - BDE is variable */
  83 
  84         /* Word 3 - cmd_buff_len, payload_offset_len is zero */
  85 
  86         /* Word 4 - total_xfer_len is variable */
  87 
  88         /* Word 5 - is zero */
  89 
  90         /* Word 6 - ctxt_tag, xri_tag is variable */
  91 
  92         /* Word 7 */
  93         bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
  94         bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
  95         bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
  96         bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
  97 
  98         /* Word 8 - abort_tag is variable */
  99 
 100         /* Word 9  - reqtag is variable */
 101 
 102         /* Word 10 - dbde, wqes is variable */
 103         bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
 104         bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1);
 105         bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
 106         bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
 107         bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
 108         bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
 109 
 110         /* Word 11 - pbde is variable */
 111         bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, NVME_READ_CMD);
 112         bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 113         bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
 114 
 115         /* Word 12 - is zero */
 116 
 117         /* Word 13, 14, 15 - PBDE is variable */
 118 
 119         /* IWRITE template */
 120         wqe = &lpfc_iwrite_cmd_template;
 121         memset(wqe, 0, sizeof(union lpfc_wqe128));
 122 
 123         /* Word 0, 1, 2 - BDE is variable */
 124 
 125         /* Word 3 - cmd_buff_len, payload_offset_len is zero */
 126 
 127         /* Word 4 - total_xfer_len is variable */
 128 
 129         /* Word 5 - initial_xfer_len is variable */
 130 
 131         /* Word 6 - ctxt_tag, xri_tag is variable */
 132 
 133         /* Word 7 */
 134         bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
 135         bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
 136         bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
 137         bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
 138 
 139         /* Word 8 - abort_tag is variable */
 140 
 141         /* Word 9  - reqtag is variable */
 142 
 143         /* Word 10 - dbde, wqes is variable */
 144         bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
 145         bf_set(wqe_nvme, &wqe->fcp_iwrite.wqe_com, 1);
 146         bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
 147         bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
 148         bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
 149         bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
 150 
 151         /* Word 11 - pbde is variable */
 152         bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, NVME_WRITE_CMD);
 153         bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 154         bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
 155 
 156         /* Word 12 - is zero */
 157 
 158         /* Word 13, 14, 15 - PBDE is variable */
 159 
 160         /* ICMND template */
 161         wqe = &lpfc_icmnd_cmd_template;
 162         memset(wqe, 0, sizeof(union lpfc_wqe128));
 163 
 164         /* Word 0, 1, 2 - BDE is variable */
 165 
 166         /* Word 3 - payload_offset_len is variable */
 167 
 168         /* Word 4, 5 - is zero */
 169 
 170         /* Word 6 - ctxt_tag, xri_tag is variable */
 171 
 172         /* Word 7 */
 173         bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
 174         bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
 175         bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
 176         bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
 177 
 178         /* Word 8 - abort_tag is variable */
 179 
 180         /* Word 9  - reqtag is variable */
 181 
 182         /* Word 10 - dbde, wqes is variable */
 183         bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
 184         bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
 185         bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
 186         bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
 187         bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
 188         bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
 189 
 190         /* Word 11 */
 191         bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, FCP_COMMAND);
 192         bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 193         bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
 194 
 195         /* Word 12, 13, 14, 15 - is zero */
 196 }
 197 
 198 /**
 199  * lpfc_nvme_create_queue -
 200  * @lpfc_pnvme: Pointer to the driver's nvme instance data
 201  * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
 202  * @handle: An opaque driver handle used in follow-up calls.
 203  *
 204  * Driver registers this routine to preallocate and initialize any
 205  * internal data structures to bind the @qidx to its internal IO queues.
 206  * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
 207  *
 208  * Return value :
 209  *   0 - Success
 210  *   -EINVAL - Unsupported input value.
 211  *   -ENOMEM - Could not alloc necessary memory
 212  **/
 213 static int
 214 lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
 215                        unsigned int qidx, u16 qsize,
 216                        void **handle)
 217 {
 218         struct lpfc_nvme_lport *lport;
 219         struct lpfc_vport *vport;
 220         struct lpfc_nvme_qhandle *qhandle;
 221         char *str;
 222 
 223         if (!pnvme_lport->private)
 224                 return -ENOMEM;
 225 
 226         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
 227         vport = lport->vport;
 228         qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
 229         if (qhandle == NULL)
 230                 return -ENOMEM;
 231 
 232         qhandle->cpu_id = raw_smp_processor_id();
 233         qhandle->qidx = qidx;
 234         /*
 235          * NVME qidx == 0 is the admin queue, so both admin queue
 236          * and first IO queue will use MSI-X vector and associated
 237          * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
 238          */
 239         if (qidx) {
 240                 str = "IO ";  /* IO queue */
 241                 qhandle->index = ((qidx - 1) %
 242                         lpfc_nvme_template.max_hw_queues);
 243         } else {
 244                 str = "ADM";  /* Admin queue */
 245                 qhandle->index = qidx;
 246         }
 247 
 248         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
 249                          "6073 Binding %s HdwQueue %d  (cpu %d) to "
 250                          "hdw_queue %d qhandle x%px\n", str,
 251                          qidx, qhandle->cpu_id, qhandle->index, qhandle);
 252         *handle = (void *)qhandle;
 253         return 0;
 254 }
 255 
 256 /**
 257  * lpfc_nvme_delete_queue -
 258  * @lpfc_pnvme: Pointer to the driver's nvme instance data
 259  * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
 260  * @handle: An opaque driver handle from lpfc_nvme_create_queue
 261  *
 262  * Driver registers this routine to free
 263  * any internal data structures to bind the @qidx to its internal
 264  * IO queues.
 265  *
 266  * Return value :
 267  *   0 - Success
 268  *   TODO:  What are the failure codes.
 269  **/
 270 static void
 271 lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
 272                        unsigned int qidx,
 273                        void *handle)
 274 {
 275         struct lpfc_nvme_lport *lport;
 276         struct lpfc_vport *vport;
 277 
 278         if (!pnvme_lport->private)
 279                 return;
 280 
 281         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
 282         vport = lport->vport;
 283 
 284         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
 285                         "6001 ENTER.  lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
 286                         lport, qidx, handle);
 287         kfree(handle);
 288 }
 289 
 290 static void
 291 lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
 292 {
 293         struct lpfc_nvme_lport *lport = localport->private;
 294 
 295         lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
 296                          "6173 localport x%px delete complete\n",
 297                          lport);
 298 
 299         /* release any threads waiting for the unreg to complete */
 300         if (lport->vport->localport)
 301                 complete(lport->lport_unreg_cmp);
 302 }
 303 
 304 /* lpfc_nvme_remoteport_delete
 305  *
 306  * @remoteport: Pointer to an nvme transport remoteport instance.
 307  *
 308  * This is a template downcall.  NVME transport calls this function
 309  * when it has completed the unregistration of a previously
 310  * registered remoteport.
 311  *
 312  * Return value :
 313  * None
 314  */
 315 static void
 316 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
 317 {
 318         struct lpfc_nvme_rport *rport = remoteport->private;
 319         struct lpfc_vport *vport;
 320         struct lpfc_nodelist *ndlp;
 321 
 322         ndlp = rport->ndlp;
 323         if (!ndlp)
 324                 goto rport_err;
 325 
 326         vport = ndlp->vport;
 327         if (!vport)
 328                 goto rport_err;
 329 
 330         /* Remove this rport from the lport's list - memory is owned by the
 331          * transport. Remove the ndlp reference for the NVME transport before
 332          * calling state machine to remove the node.
 333          */
 334         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
 335                         "6146 remoteport delete of remoteport x%px\n",
 336                         remoteport);
 337         spin_lock_irq(&vport->phba->hbalock);
 338 
 339         /* The register rebind might have occurred before the delete
 340          * downcall.  Guard against this race.
 341          */
 342         if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
 343                 ndlp->nrport = NULL;
 344                 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
 345                 spin_unlock_irq(&vport->phba->hbalock);
 346 
 347                 /* Remove original register reference. The host transport
 348                  * won't reference this rport/remoteport any further.
 349                  */
 350                 lpfc_nlp_put(ndlp);
 351         } else {
 352                 spin_unlock_irq(&vport->phba->hbalock);
 353         }
 354 
 355  rport_err:
 356         return;
 357 }
 358 
 359 static void
 360 lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
 361                        struct lpfc_wcqe_complete *wcqe)
 362 {
 363         struct lpfc_vport *vport = cmdwqe->vport;
 364         struct lpfc_nvme_lport *lport;
 365         uint32_t status;
 366         struct nvmefc_ls_req *pnvme_lsreq;
 367         struct lpfc_dmabuf *buf_ptr;
 368         struct lpfc_nodelist *ndlp;
 369 
 370         pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
 371         status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
 372 
 373         if (vport->localport) {
 374                 lport = (struct lpfc_nvme_lport *)vport->localport->private;
 375                 if (lport) {
 376                         atomic_inc(&lport->fc4NvmeLsCmpls);
 377                         if (status) {
 378                                 if (bf_get(lpfc_wcqe_c_xb, wcqe))
 379                                         atomic_inc(&lport->cmpl_ls_xb);
 380                                 atomic_inc(&lport->cmpl_ls_err);
 381                         }
 382                 }
 383         }
 384 
 385         ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
 386         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
 387                          "6047 nvme cmpl Enter "
 388                          "Data %px DID %x Xri: %x status %x reason x%x "
 389                          "cmd:x%px lsreg:x%px bmp:x%px ndlp:x%px\n",
 390                          pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
 391                          cmdwqe->sli4_xritag, status,
 392                          (wcqe->parameter & 0xffff),
 393                          cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
 394 
 395         lpfc_nvmeio_data(phba, "NVME LS  CMPL: xri x%x stat x%x parm x%x\n",
 396                          cmdwqe->sli4_xritag, status, wcqe->parameter);
 397 
 398         if (cmdwqe->context3) {
 399                 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
 400                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
 401                 kfree(buf_ptr);
 402                 cmdwqe->context3 = NULL;
 403         }
 404         if (pnvme_lsreq->done)
 405                 pnvme_lsreq->done(pnvme_lsreq, status);
 406         else
 407                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
 408                                  "6046 nvme cmpl without done call back? "
 409                                  "Data %px DID %x Xri: %x status %x\n",
 410                                 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
 411                                 cmdwqe->sli4_xritag, status);
 412         if (ndlp) {
 413                 lpfc_nlp_put(ndlp);
 414                 cmdwqe->context1 = NULL;
 415         }
 416         lpfc_sli_release_iocbq(phba, cmdwqe);
 417 }
 418 
 419 static int
 420 lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
 421                   struct lpfc_dmabuf *inp,
 422                   struct nvmefc_ls_req *pnvme_lsreq,
 423                   void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
 424                                struct lpfc_wcqe_complete *),
 425                   struct lpfc_nodelist *ndlp, uint32_t num_entry,
 426                   uint32_t tmo, uint8_t retry)
 427 {
 428         struct lpfc_hba *phba = vport->phba;
 429         union lpfc_wqe128 *wqe;
 430         struct lpfc_iocbq *genwqe;
 431         struct ulp_bde64 *bpl;
 432         struct ulp_bde64 bde;
 433         int i, rc, xmit_len, first_len;
 434 
 435         /* Allocate buffer for  command WQE */
 436         genwqe = lpfc_sli_get_iocbq(phba);
 437         if (genwqe == NULL)
 438                 return 1;
 439 
 440         wqe = &genwqe->wqe;
 441         /* Initialize only 64 bytes */
 442         memset(wqe, 0, sizeof(union lpfc_wqe));
 443 
 444         genwqe->context3 = (uint8_t *)bmp;
 445         genwqe->iocb_flag |= LPFC_IO_NVME_LS;
 446 
 447         /* Save for completion so we can release these resources */
 448         genwqe->context1 = lpfc_nlp_get(ndlp);
 449         genwqe->context2 = (uint8_t *)pnvme_lsreq;
 450         /* Fill in payload, bp points to frame payload */
 451 
 452         if (!tmo)
 453                 /* FC spec states we need 3 * ratov for CT requests */
 454                 tmo = (3 * phba->fc_ratov);
 455 
 456         /* For this command calculate the xmit length of the request bde. */
 457         xmit_len = 0;
 458         first_len = 0;
 459         bpl = (struct ulp_bde64 *)bmp->virt;
 460         for (i = 0; i < num_entry; i++) {
 461                 bde.tus.w = bpl[i].tus.w;
 462                 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
 463                         break;
 464                 xmit_len += bde.tus.f.bdeSize;
 465                 if (i == 0)
 466                         first_len = xmit_len;
 467         }
 468 
 469         genwqe->rsvd2 = num_entry;
 470         genwqe->hba_wqidx = 0;
 471 
 472         /* Words 0 - 2 */
 473         wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 474         wqe->generic.bde.tus.f.bdeSize = first_len;
 475         wqe->generic.bde.addrLow = bpl[0].addrLow;
 476         wqe->generic.bde.addrHigh = bpl[0].addrHigh;
 477 
 478         /* Word 3 */
 479         wqe->gen_req.request_payload_len = first_len;
 480 
 481         /* Word 4 */
 482 
 483         /* Word 5 */
 484         bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
 485         bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
 486         bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
 487         bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
 488         bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
 489 
 490         /* Word 6 */
 491         bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
 492                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
 493         bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
 494 
 495         /* Word 7 */
 496         bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
 497         bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
 498         bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
 499         bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
 500 
 501         /* Word 8 */
 502         wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
 503 
 504         /* Word 9 */
 505         bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
 506 
 507         /* Word 10 */
 508         bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
 509         bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
 510         bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
 511         bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
 512         bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
 513 
 514         /* Word 11 */
 515         bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 516         bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
 517 
 518 
 519         /* Issue GEN REQ WQE for NPORT <did> */
 520         lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 521                          "6050 Issue GEN REQ WQE to NPORT x%x "
 522                          "Data: x%x x%x wq:x%px lsreq:x%px bmp:x%px "
 523                          "xmit:%d 1st:%d\n",
 524                          ndlp->nlp_DID, genwqe->iotag,
 525                          vport->port_state,
 526                         genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
 527         genwqe->wqe_cmpl = cmpl;
 528         genwqe->iocb_cmpl = NULL;
 529         genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
 530         genwqe->vport = vport;
 531         genwqe->retry = retry;
 532 
 533         lpfc_nvmeio_data(phba, "NVME LS  XMIT: xri x%x iotag x%x to x%06x\n",
 534                          genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
 535 
 536         rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
 537         if (rc) {
 538                 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 539                                  "6045 Issue GEN REQ WQE to NPORT x%x "
 540                                  "Data: x%x x%x\n",
 541                                  ndlp->nlp_DID, genwqe->iotag,
 542                                  vport->port_state);
 543                 lpfc_sli_release_iocbq(phba, genwqe);
 544                 return 1;
 545         }
 546         return 0;
 547 }
 548 
 549 /**
 550  * lpfc_nvme_ls_req - Issue an Link Service request
 551  * @lpfc_pnvme: Pointer to the driver's nvme instance data
 552  * @lpfc_nvme_lport: Pointer to the driver's local port data
 553  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
 554  *
 555  * Driver registers this routine to handle any link service request
 556  * from the nvme_fc transport to a remote nvme-aware port.
 557  *
 558  * Return value :
 559  *   0 - Success
 560  *   TODO: What are the failure codes.
 561  **/
 562 static int
 563 lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
 564                  struct nvme_fc_remote_port *pnvme_rport,
 565                  struct nvmefc_ls_req *pnvme_lsreq)
 566 {
 567         int ret = 0;
 568         struct lpfc_nvme_lport *lport;
 569         struct lpfc_nvme_rport *rport;
 570         struct lpfc_vport *vport;
 571         struct lpfc_nodelist *ndlp;
 572         struct ulp_bde64 *bpl;
 573         struct lpfc_dmabuf *bmp;
 574         uint16_t ntype, nstate;
 575 
 576         /* there are two dma buf in the request, actually there is one and
 577          * the second one is just the start address + cmd size.
 578          * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
 579          * in a lpfc_dmabuf struct. When freeing we just free the wrapper
 580          * because the nvem layer owns the data bufs.
 581          * We do not have to break these packets open, we don't care what is in
 582          * them. And we do not have to look at the resonse data, we only care
 583          * that we got a response. All of the caring is going to happen in the
 584          * nvme-fc layer.
 585          */
 586 
 587         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
 588         rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
 589         if (unlikely(!lport) || unlikely(!rport))
 590                 return -EINVAL;
 591 
 592         vport = lport->vport;
 593 
 594         if (vport->load_flag & FC_UNLOADING)
 595                 return -ENODEV;
 596 
 597         /* Need the ndlp.  It is stored in the driver's rport. */
 598         ndlp = rport->ndlp;
 599         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
 600                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
 601                                  "6051 Remoteport x%px, rport has invalid ndlp. "
 602                                  "Failing LS Req\n", pnvme_rport);
 603                 return -ENODEV;
 604         }
 605 
 606         /* The remote node has to be a mapped nvme target or an
 607          * unmapped nvme initiator or it's an error.
 608          */
 609         ntype = ndlp->nlp_type;
 610         nstate = ndlp->nlp_state;
 611         if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
 612             (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
 613                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
 614                                  "6088 DID x%06x not ready for "
 615                                  "IO. State x%x, Type x%x\n",
 616                                  pnvme_rport->port_id,
 617                                  ndlp->nlp_state, ndlp->nlp_type);
 618                 return -ENODEV;
 619         }
 620         bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
 621         if (!bmp) {
 622 
 623                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
 624                                  "6044 Could not find node for DID %x\n",
 625                                  pnvme_rport->port_id);
 626                 return 2;
 627         }
 628         INIT_LIST_HEAD(&bmp->list);
 629         bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
 630         if (!bmp->virt) {
 631                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
 632                                  "6042 Could not find node for DID %x\n",
 633                                  pnvme_rport->port_id);
 634                 kfree(bmp);
 635                 return 3;
 636         }
 637         bpl = (struct ulp_bde64 *)bmp->virt;
 638         bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
 639         bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
 640         bpl->tus.f.bdeFlags = 0;
 641         bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
 642         bpl->tus.w = le32_to_cpu(bpl->tus.w);
 643         bpl++;
 644 
 645         bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
 646         bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
 647         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
 648         bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
 649         bpl->tus.w = le32_to_cpu(bpl->tus.w);
 650 
 651         /* Expand print to include key fields. */
 652         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
 653                          "6149 Issue LS Req to DID 0x%06x lport x%px, "
 654                          "rport x%px lsreq x%px rqstlen:%d rsplen:%d "
 655                          "%pad %pad\n",
 656                          ndlp->nlp_DID, pnvme_lport, pnvme_rport,
 657                          pnvme_lsreq, pnvme_lsreq->rqstlen,
 658                          pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
 659                          &pnvme_lsreq->rspdma);
 660 
 661         atomic_inc(&lport->fc4NvmeLsRequests);
 662 
 663         /* Hardcode the wait to 30 seconds.  Connections are failing otherwise.
 664          * This code allows it all to work.
 665          */
 666         ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
 667                                 pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
 668                                 ndlp, 2, 30, 0);
 669         if (ret != WQE_SUCCESS) {
 670                 atomic_inc(&lport->xmt_ls_err);
 671                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
 672                                  "6052 EXIT. issue ls wqe failed lport x%px, "
 673                                  "rport x%px lsreq x%px Status %x DID %x\n",
 674                                  pnvme_lport, pnvme_rport, pnvme_lsreq,
 675                                  ret, ndlp->nlp_DID);
 676                 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
 677                 kfree(bmp);
 678                 return ret;
 679         }
 680 
 681         /* Stub in routine and return 0 for now. */
 682         return ret;
 683 }
 684 
 685 /**
 686  * lpfc_nvme_ls_abort - Issue an Link Service request
 687  * @lpfc_pnvme: Pointer to the driver's nvme instance data
 688  * @lpfc_nvme_lport: Pointer to the driver's local port data
 689  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
 690  *
 691  * Driver registers this routine to handle any link service request
 692  * from the nvme_fc transport to a remote nvme-aware port.
 693  *
 694  * Return value :
 695  *   0 - Success
 696  *   TODO: What are the failure codes.
 697  **/
 698 static void
 699 lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
 700                    struct nvme_fc_remote_port *pnvme_rport,
 701                    struct nvmefc_ls_req *pnvme_lsreq)
 702 {
 703         struct lpfc_nvme_lport *lport;
 704         struct lpfc_vport *vport;
 705         struct lpfc_hba *phba;
 706         struct lpfc_nodelist *ndlp;
 707         LIST_HEAD(abort_list);
 708         struct lpfc_sli_ring *pring;
 709         struct lpfc_iocbq *wqe, *next_wqe;
 710 
 711         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
 712         if (unlikely(!lport))
 713                 return;
 714         vport = lport->vport;
 715         phba = vport->phba;
 716 
 717         if (vport->load_flag & FC_UNLOADING)
 718                 return;
 719 
 720         ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
 721         if (!ndlp) {
 722                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
 723                                  "6049 Could not find node for DID %x\n",
 724                                  pnvme_rport->port_id);
 725                 return;
 726         }
 727 
 728         /* Expand print to include key fields. */
 729         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
 730                          "6040 ENTER.  lport x%px, rport x%px lsreq x%px rqstlen:%d "
 731                          "rsplen:%d %pad %pad\n",
 732                          pnvme_lport, pnvme_rport,
 733                          pnvme_lsreq, pnvme_lsreq->rqstlen,
 734                          pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
 735                          &pnvme_lsreq->rspdma);
 736 
 737         /*
 738          * Lock the ELS ring txcmplq and build a local list of all ELS IOs
 739          * that need an ABTS.  The IOs need to stay on the txcmplq so that
 740          * the abort operation completes them successfully.
 741          */
 742         pring = phba->sli4_hba.nvmels_wq->pring;
 743         spin_lock_irq(&phba->hbalock);
 744         spin_lock(&pring->ring_lock);
 745         list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
 746                 /* Add to abort_list on on NDLP match. */
 747                 if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
 748                         wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
 749                         list_add_tail(&wqe->dlist, &abort_list);
 750                 }
 751         }
 752         spin_unlock(&pring->ring_lock);
 753         spin_unlock_irq(&phba->hbalock);
 754 
 755         /* Abort the targeted IOs and remove them from the abort list. */
 756         list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
 757                 atomic_inc(&lport->xmt_ls_abort);
 758                 spin_lock_irq(&phba->hbalock);
 759                 list_del_init(&wqe->dlist);
 760                 lpfc_sli_issue_abort_iotag(phba, pring, wqe);
 761                 spin_unlock_irq(&phba->hbalock);
 762         }
 763 }
 764 
 765 /* Fix up the existing sgls for NVME IO. */
 766 static inline void
 767 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
 768                        struct lpfc_io_buf *lpfc_ncmd,
 769                        struct nvmefc_fcp_req *nCmd)
 770 {
 771         struct lpfc_hba  *phba = vport->phba;
 772         struct sli4_sge *sgl;
 773         union lpfc_wqe128 *wqe;
 774         uint32_t *wptr, *dptr;
 775 
 776         /*
 777          * Get a local pointer to the built-in wqe and correct
 778          * the cmd size to match NVME's 96 bytes and fix
 779          * the dma address.
 780          */
 781 
 782         wqe = &lpfc_ncmd->cur_iocbq.wqe;
 783 
 784         /*
 785          * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
 786          * match NVME.  NVME sends 96 bytes. Also, use the
 787          * nvme commands command and response dma addresses
 788          * rather than the virtual memory to ease the restore
 789          * operation.
 790          */
 791         sgl = lpfc_ncmd->dma_sgl;
 792         sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
 793         if (phba->cfg_nvme_embed_cmd) {
 794                 sgl->addr_hi = 0;
 795                 sgl->addr_lo = 0;
 796 
 797                 /* Word 0-2 - NVME CMND IU (embedded payload) */
 798                 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
 799                 wqe->generic.bde.tus.f.bdeSize = 56;
 800                 wqe->generic.bde.addrHigh = 0;
 801                 wqe->generic.bde.addrLow =  64;  /* Word 16 */
 802 
 803                 /* Word 10  - dbde is 0, wqes is 1 in template */
 804 
 805                 /*
 806                  * Embed the payload in the last half of the WQE
 807                  * WQE words 16-30 get the NVME CMD IU payload
 808                  *
 809                  * WQE words 16-19 get payload Words 1-4
 810                  * WQE words 20-21 get payload Words 6-7
 811                  * WQE words 22-29 get payload Words 16-23
 812                  */
 813                 wptr = &wqe->words[16];  /* WQE ptr */
 814                 dptr = (uint32_t *)nCmd->cmdaddr;  /* payload ptr */
 815                 dptr++;                 /* Skip Word 0 in payload */
 816 
 817                 *wptr++ = *dptr++;      /* Word 1 */
 818                 *wptr++ = *dptr++;      /* Word 2 */
 819                 *wptr++ = *dptr++;      /* Word 3 */
 820                 *wptr++ = *dptr++;      /* Word 4 */
 821                 dptr++;                 /* Skip Word 5 in payload */
 822                 *wptr++ = *dptr++;      /* Word 6 */
 823                 *wptr++ = *dptr++;      /* Word 7 */
 824                 dptr += 8;              /* Skip Words 8-15 in payload */
 825                 *wptr++ = *dptr++;      /* Word 16 */
 826                 *wptr++ = *dptr++;      /* Word 17 */
 827                 *wptr++ = *dptr++;      /* Word 18 */
 828                 *wptr++ = *dptr++;      /* Word 19 */
 829                 *wptr++ = *dptr++;      /* Word 20 */
 830                 *wptr++ = *dptr++;      /* Word 21 */
 831                 *wptr++ = *dptr++;      /* Word 22 */
 832                 *wptr   = *dptr;        /* Word 23 */
 833         } else {
 834                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
 835                 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
 836 
 837                 /* Word 0-2 - NVME CMND IU Inline BDE */
 838                 wqe->generic.bde.tus.f.bdeFlags =  BUFF_TYPE_BDE_64;
 839                 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
 840                 wqe->generic.bde.addrHigh = sgl->addr_hi;
 841                 wqe->generic.bde.addrLow =  sgl->addr_lo;
 842 
 843                 /* Word 10 */
 844                 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
 845                 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
 846         }
 847 
 848         sgl++;
 849 
 850         /* Setup the physical region for the FCP RSP */
 851         sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
 852         sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
 853         sgl->word2 = le32_to_cpu(sgl->word2);
 854         if (nCmd->sg_cnt)
 855                 bf_set(lpfc_sli4_sge_last, sgl, 0);
 856         else
 857                 bf_set(lpfc_sli4_sge_last, sgl, 1);
 858         sgl->word2 = cpu_to_le32(sgl->word2);
 859         sgl->sge_len = cpu_to_le32(nCmd->rsplen);
 860 }
 861 
 862 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 863 static void
 864 lpfc_nvme_ktime(struct lpfc_hba *phba,
 865                 struct lpfc_io_buf *lpfc_ncmd)
 866 {
 867         uint64_t seg1, seg2, seg3, seg4;
 868         uint64_t segsum;
 869 
 870         if (!lpfc_ncmd->ts_last_cmd ||
 871             !lpfc_ncmd->ts_cmd_start ||
 872             !lpfc_ncmd->ts_cmd_wqput ||
 873             !lpfc_ncmd->ts_isr_cmpl ||
 874             !lpfc_ncmd->ts_data_nvme)
 875                 return;
 876 
 877         if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
 878                 return;
 879         if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
 880                 return;
 881         if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
 882                 return;
 883         if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
 884                 return;
 885         if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
 886                 return;
 887         /*
 888          * Segment 1 - Time from Last FCP command cmpl is handed
 889          * off to NVME Layer to start of next command.
 890          * Segment 2 - Time from Driver receives a IO cmd start
 891          * from NVME Layer to WQ put is done on IO cmd.
 892          * Segment 3 - Time from Driver WQ put is done on IO cmd
 893          * to MSI-X ISR for IO cmpl.
 894          * Segment 4 - Time from MSI-X ISR for IO cmpl to when
 895          * cmpl is handled off to the NVME Layer.
 896          */
 897         seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
 898         if (seg1 > 5000000)  /* 5 ms - for sequential IOs only */
 899                 seg1 = 0;
 900 
 901         /* Calculate times relative to start of IO */
 902         seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
 903         segsum = seg2;
 904         seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
 905         if (segsum > seg3)
 906                 return;
 907         seg3 -= segsum;
 908         segsum += seg3;
 909 
 910         seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
 911         if (segsum > seg4)
 912                 return;
 913         seg4 -= segsum;
 914 
 915         phba->ktime_data_samples++;
 916         phba->ktime_seg1_total += seg1;
 917         if (seg1 < phba->ktime_seg1_min)
 918                 phba->ktime_seg1_min = seg1;
 919         else if (seg1 > phba->ktime_seg1_max)
 920                 phba->ktime_seg1_max = seg1;
 921         phba->ktime_seg2_total += seg2;
 922         if (seg2 < phba->ktime_seg2_min)
 923                 phba->ktime_seg2_min = seg2;
 924         else if (seg2 > phba->ktime_seg2_max)
 925                 phba->ktime_seg2_max = seg2;
 926         phba->ktime_seg3_total += seg3;
 927         if (seg3 < phba->ktime_seg3_min)
 928                 phba->ktime_seg3_min = seg3;
 929         else if (seg3 > phba->ktime_seg3_max)
 930                 phba->ktime_seg3_max = seg3;
 931         phba->ktime_seg4_total += seg4;
 932         if (seg4 < phba->ktime_seg4_min)
 933                 phba->ktime_seg4_min = seg4;
 934         else if (seg4 > phba->ktime_seg4_max)
 935                 phba->ktime_seg4_max = seg4;
 936 
 937         lpfc_ncmd->ts_last_cmd = 0;
 938         lpfc_ncmd->ts_cmd_start = 0;
 939         lpfc_ncmd->ts_cmd_wqput  = 0;
 940         lpfc_ncmd->ts_isr_cmpl = 0;
 941         lpfc_ncmd->ts_data_nvme = 0;
 942 }
 943 #endif
 944 
 945 /**
 946  * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
 947  * @lpfc_pnvme: Pointer to the driver's nvme instance data
 948  * @lpfc_nvme_lport: Pointer to the driver's local port data
 949  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
 950  *
 951  * Driver registers this routine as it io request handler.  This
 952  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
 953  * data structure to the rport indicated in @lpfc_nvme_rport.
 954  *
 955  * Return value :
 956  *   0 - Success
 957  *   TODO: What are the failure codes.
 958  **/
 959 static void
 960 lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
 961                           struct lpfc_wcqe_complete *wcqe)
 962 {
 963         struct lpfc_io_buf *lpfc_ncmd =
 964                 (struct lpfc_io_buf *)pwqeIn->context1;
 965         struct lpfc_vport *vport = pwqeIn->vport;
 966         struct nvmefc_fcp_req *nCmd;
 967         struct nvme_fc_ersp_iu *ep;
 968         struct nvme_fc_cmd_iu *cp;
 969         struct lpfc_nodelist *ndlp;
 970         struct lpfc_nvme_fcpreq_priv *freqpriv;
 971         struct lpfc_nvme_lport *lport;
 972         uint32_t code, status, idx;
 973         uint16_t cid, sqhd, data;
 974         uint32_t *ptr;
 975 
 976         /* Sanity check on return of outstanding command */
 977         if (!lpfc_ncmd) {
 978                 lpfc_printf_vlog(vport, KERN_ERR,
 979                                  LOG_NODE | LOG_NVME_IOERR,
 980                                  "6071 Null lpfc_ncmd pointer. No "
 981                                  "release, skip completion\n");
 982                 return;
 983         }
 984 
 985         /* Guard against abort handler being called at same time */
 986         spin_lock(&lpfc_ncmd->buf_lock);
 987 
 988         if (!lpfc_ncmd->nvmeCmd) {
 989                 spin_unlock(&lpfc_ncmd->buf_lock);
 990                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
 991                                  "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
 992                                  "nvmeCmd x%px\n",
 993                                  lpfc_ncmd, lpfc_ncmd->nvmeCmd);
 994 
 995                 /* Release the lpfc_ncmd regardless of the missing elements. */
 996                 lpfc_release_nvme_buf(phba, lpfc_ncmd);
 997                 return;
 998         }
 999         nCmd = lpfc_ncmd->nvmeCmd;
1000         status = bf_get(lpfc_wcqe_c_status, wcqe);
1001 
1002         idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
1003         phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
1004 
1005         if (unlikely(status && vport->localport)) {
1006                 lport = (struct lpfc_nvme_lport *)vport->localport->private;
1007                 if (lport) {
1008                         if (bf_get(lpfc_wcqe_c_xb, wcqe))
1009                                 atomic_inc(&lport->cmpl_fcp_xb);
1010                         atomic_inc(&lport->cmpl_fcp_err);
1011                 }
1012         }
1013 
1014         lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
1015                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1016                          status, wcqe->parameter);
1017         /*
1018          * Catch race where our node has transitioned, but the
1019          * transport is still transitioning.
1020          */
1021         ndlp = lpfc_ncmd->ndlp;
1022         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1023                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1024                                  "6062 Ignoring NVME cmpl.  No ndlp\n");
1025                 goto out_err;
1026         }
1027 
1028         code = bf_get(lpfc_wcqe_c_code, wcqe);
1029         if (code == CQE_CODE_NVME_ERSP) {
1030                 /* For this type of CQE, we need to rebuild the rsp */
1031                 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
1032 
1033                 /*
1034                  * Get Command Id from cmd to plug into response. This
1035                  * code is not needed in the next NVME Transport drop.
1036                  */
1037                 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1038                 cid = cp->sqe.common.command_id;
1039 
1040                 /*
1041                  * RSN is in CQE word 2
1042                  * SQHD is in CQE Word 3 bits 15:0
1043                  * Cmd Specific info is in CQE Word 1
1044                  * and in CQE Word 0 bits 15:0
1045                  */
1046                 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1047 
1048                 /* Now lets build the NVME ERSP IU */
1049                 ep->iu_len = cpu_to_be16(8);
1050                 ep->rsn = wcqe->parameter;
1051                 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1052                 ep->rsvd12 = 0;
1053                 ptr = (uint32_t *)&ep->cqe.result.u64;
1054                 *ptr++ = wcqe->total_data_placed;
1055                 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1056                 *ptr = (uint32_t)data;
1057                 ep->cqe.sq_head = sqhd;
1058                 ep->cqe.sq_id =  nCmd->sqid;
1059                 ep->cqe.command_id = cid;
1060                 ep->cqe.status = 0;
1061 
1062                 lpfc_ncmd->status = IOSTAT_SUCCESS;
1063                 lpfc_ncmd->result = 0;
1064                 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1065                 nCmd->transferred_length = nCmd->payload_length;
1066         } else {
1067                 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
1068                 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1069 
1070                 /* For NVME, the only failure path that results in an
1071                  * IO error is when the adapter rejects it.  All other
1072                  * conditions are a success case and resolved by the
1073                  * transport.
1074                  * IOSTAT_FCP_RSP_ERROR means:
1075                  * 1. Length of data received doesn't match total
1076                  *    transfer length in WQE
1077                  * 2. If the RSP payload does NOT match these cases:
1078                  *    a. RSP length 12/24 bytes and all zeros
1079                  *    b. NVME ERSP
1080                  */
1081                 switch (lpfc_ncmd->status) {
1082                 case IOSTAT_SUCCESS:
1083                         nCmd->transferred_length = wcqe->total_data_placed;
1084                         nCmd->rcv_rsplen = 0;
1085                         nCmd->status = 0;
1086                         break;
1087                 case IOSTAT_FCP_RSP_ERROR:
1088                         nCmd->transferred_length = wcqe->total_data_placed;
1089                         nCmd->rcv_rsplen = wcqe->parameter;
1090                         nCmd->status = 0;
1091                         /* Sanity check */
1092                         if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
1093                                 break;
1094                         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1095                                          "6081 NVME Completion Protocol Error: "
1096                                          "xri %x status x%x result x%x "
1097                                          "placed x%x\n",
1098                                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1099                                          lpfc_ncmd->status, lpfc_ncmd->result,
1100                                          wcqe->total_data_placed);
1101                         break;
1102                 case IOSTAT_LOCAL_REJECT:
1103                         /* Let fall through to set command final state. */
1104                         if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1105                                 lpfc_printf_vlog(vport, KERN_INFO,
1106                                          LOG_NVME_IOERR,
1107                                          "6032 Delay Aborted cmd x%px "
1108                                          "nvme cmd x%px, xri x%x, "
1109                                          "xb %d\n",
1110                                          lpfc_ncmd, nCmd,
1111                                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1112                                          bf_get(lpfc_wcqe_c_xb, wcqe));
1113                         /* fall through */
1114                 default:
1115 out_err:
1116                         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1117                                          "6072 NVME Completion Error: xri %x "
1118                                          "status x%x result x%x [x%x] "
1119                                          "placed x%x\n",
1120                                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1121                                          lpfc_ncmd->status, lpfc_ncmd->result,
1122                                          wcqe->parameter,
1123                                          wcqe->total_data_placed);
1124                         nCmd->transferred_length = 0;
1125                         nCmd->rcv_rsplen = 0;
1126                         nCmd->status = NVME_SC_INTERNAL;
1127                 }
1128         }
1129 
1130         /* pick up SLI4 exhange busy condition */
1131         if (bf_get(lpfc_wcqe_c_xb, wcqe))
1132                 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1133         else
1134                 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1135 
1136         /* Update stats and complete the IO.  There is
1137          * no need for dma unprep because the nvme_transport
1138          * owns the dma address.
1139          */
1140 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1141         if (lpfc_ncmd->ts_cmd_start) {
1142                 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1143                 lpfc_ncmd->ts_data_nvme = ktime_get_ns();
1144                 phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
1145                 lpfc_nvme_ktime(phba, lpfc_ncmd);
1146         }
1147         if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) {
1148                 uint32_t cpu;
1149                 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
1150                 cpu = raw_smp_processor_id();
1151                 if (cpu < LPFC_CHECK_CPU_CNT) {
1152                         if (lpfc_ncmd->cpu != cpu)
1153                                 lpfc_printf_vlog(vport,
1154                                                  KERN_INFO, LOG_NVME_IOERR,
1155                                                  "6701 CPU Check cmpl: "
1156                                                  "cpu %d expect %d\n",
1157                                                  cpu, lpfc_ncmd->cpu);
1158                         phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
1159                 }
1160         }
1161 #endif
1162 
1163         /* NVME targets need completion held off until the abort exchange
1164          * completes unless the NVME Rport is getting unregistered.
1165          */
1166 
1167         if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1168                 freqpriv = nCmd->private;
1169                 freqpriv->nvme_buf = NULL;
1170                 lpfc_ncmd->nvmeCmd = NULL;
1171                 spin_unlock(&lpfc_ncmd->buf_lock);
1172                 nCmd->done(nCmd);
1173         } else
1174                 spin_unlock(&lpfc_ncmd->buf_lock);
1175 
1176         /* Call release with XB=1 to queue the IO into the abort list. */
1177         lpfc_release_nvme_buf(phba, lpfc_ncmd);
1178 }
1179 
1180 
1181 /**
1182  * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1183  * @lpfc_pnvme: Pointer to the driver's nvme instance data
1184  * @lpfc_nvme_lport: Pointer to the driver's local port data
1185  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1186  * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1187  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1188  *
1189  * Driver registers this routine as it io request handler.  This
1190  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1191  * data structure to the rport indicated in @lpfc_nvme_rport.
1192  *
1193  * Return value :
1194  *   0 - Success
1195  *   TODO: What are the failure codes.
1196  **/
1197 static int
1198 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1199                       struct lpfc_io_buf *lpfc_ncmd,
1200                       struct lpfc_nodelist *pnode,
1201                       struct lpfc_fc4_ctrl_stat *cstat)
1202 {
1203         struct lpfc_hba *phba = vport->phba;
1204         struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1205         struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
1206         union lpfc_wqe128 *wqe = &pwqeq->wqe;
1207         uint32_t req_len;
1208 
1209         if (!NLP_CHK_NODE_ACT(pnode))
1210                 return -EINVAL;
1211 
1212         /*
1213          * There are three possibilities here - use scatter-gather segment, use
1214          * the single mapping, or neither.
1215          */
1216         if (nCmd->sg_cnt) {
1217                 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1218                         /* From the iwrite template, initialize words 7 - 11 */
1219                         memcpy(&wqe->words[7],
1220                                &lpfc_iwrite_cmd_template.words[7],
1221                                sizeof(uint32_t) * 5);
1222 
1223                         /* Word 4 */
1224                         wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1225 
1226                         /* Word 5 */
1227                         if ((phba->cfg_nvme_enable_fb) &&
1228                             (pnode->nlp_flag & NLP_FIRSTBURST)) {
1229                                 req_len = lpfc_ncmd->nvmeCmd->payload_length;
1230                                 if (req_len < pnode->nvme_fb_size)
1231                                         wqe->fcp_iwrite.initial_xfer_len =
1232                                                 req_len;
1233                                 else
1234                                         wqe->fcp_iwrite.initial_xfer_len =
1235                                                 pnode->nvme_fb_size;
1236                         } else {
1237                                 wqe->fcp_iwrite.initial_xfer_len = 0;
1238                         }
1239                         cstat->output_requests++;
1240                 } else {
1241                         /* From the iread template, initialize words 7 - 11 */
1242                         memcpy(&wqe->words[7],
1243                                &lpfc_iread_cmd_template.words[7],
1244                                sizeof(uint32_t) * 5);
1245 
1246                         /* Word 4 */
1247                         wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1248 
1249                         /* Word 5 */
1250                         wqe->fcp_iread.rsrvd5 = 0;
1251 
1252                         cstat->input_requests++;
1253                 }
1254         } else {
1255                 /* From the icmnd template, initialize words 4 - 11 */
1256                 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1257                        sizeof(uint32_t) * 8);
1258                 cstat->control_requests++;
1259         }
1260 
1261         if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
1262                 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
1263         /*
1264          * Finish initializing those WQE fields that are independent
1265          * of the nvme_cmnd request_buffer
1266          */
1267 
1268         /* Word 3 */
1269         bf_set(payload_offset_len, &wqe->fcp_icmd,
1270                (nCmd->rsplen + nCmd->cmdlen));
1271 
1272         /* Word 6 */
1273         bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1274                phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1275         bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1276 
1277         /* Word 8 */
1278         wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1279 
1280         /* Word 9 */
1281         bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1282 
1283         /* Words 13 14 15 are for PBDE support */
1284 
1285         pwqeq->vport = vport;
1286         return 0;
1287 }
1288 
1289 
1290 /**
1291  * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1292  * @lpfc_pnvme: Pointer to the driver's nvme instance data
1293  * @lpfc_nvme_lport: Pointer to the driver's local port data
1294  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1295  * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1296  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1297  *
1298  * Driver registers this routine as it io request handler.  This
1299  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1300  * data structure to the rport indicated in @lpfc_nvme_rport.
1301  *
1302  * Return value :
1303  *   0 - Success
1304  *   TODO: What are the failure codes.
1305  **/
1306 static int
1307 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1308                       struct lpfc_io_buf *lpfc_ncmd)
1309 {
1310         struct lpfc_hba *phba = vport->phba;
1311         struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1312         union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1313         struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
1314         struct sli4_hybrid_sgl *sgl_xtra = NULL;
1315         struct scatterlist *data_sg;
1316         struct sli4_sge *first_data_sgl;
1317         struct ulp_bde64 *bde;
1318         dma_addr_t physaddr = 0;
1319         uint32_t num_bde = 0;
1320         uint32_t dma_len = 0;
1321         uint32_t dma_offset = 0;
1322         int nseg, i, j;
1323         bool lsp_just_set = false;
1324 
1325         /* Fix up the command and response DMA stuff. */
1326         lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1327 
1328         /*
1329          * There are three possibilities here - use scatter-gather segment, use
1330          * the single mapping, or neither.
1331          */
1332         if (nCmd->sg_cnt) {
1333                 /*
1334                  * Jump over the cmd and rsp SGEs.  The fix routine
1335                  * has already adjusted for this.
1336                  */
1337                 sgl += 2;
1338 
1339                 first_data_sgl = sgl;
1340                 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1341                 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1342                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1343                                         "6058 Too many sg segments from "
1344                                         "NVME Transport.  Max %d, "
1345                                         "nvmeIO sg_cnt %d\n",
1346                                         phba->cfg_nvme_seg_cnt + 1,
1347                                         lpfc_ncmd->seg_cnt);
1348                         lpfc_ncmd->seg_cnt = 0;
1349                         return 1;
1350                 }
1351 
1352                 /*
1353                  * The driver established a maximum scatter-gather segment count
1354                  * during probe that limits the number of sg elements in any
1355                  * single nvme command.  Just run through the seg_cnt and format
1356                  * the sge's.
1357                  */
1358                 nseg = nCmd->sg_cnt;
1359                 data_sg = nCmd->first_sgl;
1360 
1361                 /* for tracking the segment boundaries */
1362                 j = 2;
1363                 for (i = 0; i < nseg; i++) {
1364                         if (data_sg == NULL) {
1365                                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1366                                                 "6059 dptr err %d, nseg %d\n",
1367                                                 i, nseg);
1368                                 lpfc_ncmd->seg_cnt = 0;
1369                                 return 1;
1370                         }
1371 
1372                         sgl->word2 = 0;
1373                         if ((num_bde + 1) == nseg) {
1374                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
1375                                 bf_set(lpfc_sli4_sge_type, sgl,
1376                                        LPFC_SGE_TYPE_DATA);
1377                         } else {
1378                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
1379 
1380                                 /* expand the segment */
1381                                 if (!lsp_just_set &&
1382                                     !((j + 1) % phba->border_sge_num) &&
1383                                     ((nseg - 1) != i)) {
1384                                         /* set LSP type */
1385                                         bf_set(lpfc_sli4_sge_type, sgl,
1386                                                LPFC_SGE_TYPE_LSP);
1387 
1388                                         sgl_xtra = lpfc_get_sgl_per_hdwq(
1389                                                         phba, lpfc_ncmd);
1390 
1391                                         if (unlikely(!sgl_xtra)) {
1392                                                 lpfc_ncmd->seg_cnt = 0;
1393                                                 return 1;
1394                                         }
1395                                         sgl->addr_lo = cpu_to_le32(putPaddrLow(
1396                                                        sgl_xtra->dma_phys_sgl));
1397                                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(
1398                                                        sgl_xtra->dma_phys_sgl));
1399 
1400                                 } else {
1401                                         bf_set(lpfc_sli4_sge_type, sgl,
1402                                                LPFC_SGE_TYPE_DATA);
1403                                 }
1404                         }
1405 
1406                         if (!(bf_get(lpfc_sli4_sge_type, sgl) &
1407                                      LPFC_SGE_TYPE_LSP)) {
1408                                 if ((nseg - 1) == i)
1409                                         bf_set(lpfc_sli4_sge_last, sgl, 1);
1410 
1411                                 physaddr = data_sg->dma_address;
1412                                 dma_len = data_sg->length;
1413                                 sgl->addr_lo = cpu_to_le32(
1414                                                          putPaddrLow(physaddr));
1415                                 sgl->addr_hi = cpu_to_le32(
1416                                                         putPaddrHigh(physaddr));
1417 
1418                                 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1419                                 sgl->word2 = cpu_to_le32(sgl->word2);
1420                                 sgl->sge_len = cpu_to_le32(dma_len);
1421 
1422                                 dma_offset += dma_len;
1423                                 data_sg = sg_next(data_sg);
1424 
1425                                 sgl++;
1426 
1427                                 lsp_just_set = false;
1428                         } else {
1429                                 sgl->word2 = cpu_to_le32(sgl->word2);
1430 
1431                                 sgl->sge_len = cpu_to_le32(
1432                                                      phba->cfg_sg_dma_buf_size);
1433 
1434                                 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
1435                                 i = i - 1;
1436 
1437                                 lsp_just_set = true;
1438                         }
1439 
1440                         j++;
1441                 }
1442                 if (phba->cfg_enable_pbde) {
1443                         /* Use PBDE support for first SGL only, offset == 0 */
1444                         /* Words 13-15 */
1445                         bde = (struct ulp_bde64 *)
1446                                 &wqe->words[13];
1447                         bde->addrLow = first_data_sgl->addr_lo;
1448                         bde->addrHigh = first_data_sgl->addr_hi;
1449                         bde->tus.f.bdeSize =
1450                                 le32_to_cpu(first_data_sgl->sge_len);
1451                         bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1452                         bde->tus.w = cpu_to_le32(bde->tus.w);
1453                         /* wqe_pbde is 1 in template */
1454                 } else {
1455                         memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1456                         bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
1457                 }
1458 
1459         } else {
1460                 lpfc_ncmd->seg_cnt = 0;
1461 
1462                 /* For this clause to be valid, the payload_length
1463                  * and sg_cnt must zero.
1464                  */
1465                 if (nCmd->payload_length != 0) {
1466                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1467                                         "6063 NVME DMA Prep Err: sg_cnt %d "
1468                                         "payload_length x%x\n",
1469                                         nCmd->sg_cnt, nCmd->payload_length);
1470                         return 1;
1471                 }
1472         }
1473         return 0;
1474 }
1475 
1476 /**
1477  * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1478  * @lpfc_pnvme: Pointer to the driver's nvme instance data
1479  * @lpfc_nvme_lport: Pointer to the driver's local port data
1480  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1481  * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1482  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1483  *
1484  * Driver registers this routine as it io request handler.  This
1485  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1486  * data structure to the rport
1487  indicated in @lpfc_nvme_rport.
1488  *
1489  * Return value :
1490  *   0 - Success
1491  *   TODO: What are the failure codes.
1492  **/
1493 static int
1494 lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1495                         struct nvme_fc_remote_port *pnvme_rport,
1496                         void *hw_queue_handle,
1497                         struct nvmefc_fcp_req *pnvme_fcreq)
1498 {
1499         int ret = 0;
1500         int expedite = 0;
1501         int idx, cpu;
1502         struct lpfc_nvme_lport *lport;
1503         struct lpfc_fc4_ctrl_stat *cstat;
1504         struct lpfc_vport *vport;
1505         struct lpfc_hba *phba;
1506         struct lpfc_nodelist *ndlp;
1507         struct lpfc_io_buf *lpfc_ncmd;
1508         struct lpfc_nvme_rport *rport;
1509         struct lpfc_nvme_qhandle *lpfc_queue_info;
1510         struct lpfc_nvme_fcpreq_priv *freqpriv;
1511         struct nvme_common_command *sqe;
1512 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1513         uint64_t start = 0;
1514 #endif
1515 
1516         /* Validate pointers. LLDD fault handling with transport does
1517          * have timing races.
1518          */
1519         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1520         if (unlikely(!lport)) {
1521                 ret = -EINVAL;
1522                 goto out_fail;
1523         }
1524 
1525         vport = lport->vport;
1526 
1527         if (unlikely(!hw_queue_handle)) {
1528                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1529                                  "6117 Fail IO, NULL hw_queue_handle\n");
1530                 atomic_inc(&lport->xmt_fcp_err);
1531                 ret = -EBUSY;
1532                 goto out_fail;
1533         }
1534 
1535         phba = vport->phba;
1536 
1537         if (vport->load_flag & FC_UNLOADING) {
1538                 ret = -ENODEV;
1539                 goto out_fail;
1540         }
1541 
1542         if (unlikely(vport->load_flag & FC_UNLOADING)) {
1543                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1544                                  "6124 Fail IO, Driver unload\n");
1545                 atomic_inc(&lport->xmt_fcp_err);
1546                 ret = -ENODEV;
1547                 goto out_fail;
1548         }
1549 
1550         freqpriv = pnvme_fcreq->private;
1551         if (unlikely(!freqpriv)) {
1552                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1553                                  "6158 Fail IO, NULL request data\n");
1554                 atomic_inc(&lport->xmt_fcp_err);
1555                 ret = -EINVAL;
1556                 goto out_fail;
1557         }
1558 
1559 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1560         if (phba->ktime_on)
1561                 start = ktime_get_ns();
1562 #endif
1563         rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1564         lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1565 
1566         /*
1567          * Catch race where our node has transitioned, but the
1568          * transport is still transitioning.
1569          */
1570         ndlp = rport->ndlp;
1571         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1572                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1573                                  "6053 Busy IO, ndlp not ready: rport x%px "
1574                                   "ndlp x%px, DID x%06x\n",
1575                                  rport, ndlp, pnvme_rport->port_id);
1576                 atomic_inc(&lport->xmt_fcp_err);
1577                 ret = -EBUSY;
1578                 goto out_fail;
1579         }
1580 
1581         /* The remote node has to be a mapped target or it's an error. */
1582         if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1583             (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1584                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1585                                  "6036 Fail IO, DID x%06x not ready for "
1586                                  "IO. State x%x, Type x%x Flg x%x\n",
1587                                  pnvme_rport->port_id,
1588                                  ndlp->nlp_state, ndlp->nlp_type,
1589                                  ndlp->upcall_flags);
1590                 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1591                 ret = -EBUSY;
1592                 goto out_fail;
1593 
1594         }
1595 
1596         /* Currently only NVME Keep alive commands should be expedited
1597          * if the driver runs out of a resource. These should only be
1598          * issued on the admin queue, qidx 0
1599          */
1600         if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1601                 sqe = &((struct nvme_fc_cmd_iu *)
1602                         pnvme_fcreq->cmdaddr)->sqe.common;
1603                 if (sqe->opcode == nvme_admin_keep_alive)
1604                         expedite = 1;
1605         }
1606 
1607         /* The node is shared with FCP IO, make sure the IO pending count does
1608          * not exceed the programmed depth.
1609          */
1610         if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1611                 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1612                     !expedite) {
1613                         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1614                                          "6174 Fail IO, ndlp qdepth exceeded: "
1615                                          "idx %d DID %x pend %d qdepth %d\n",
1616                                          lpfc_queue_info->index, ndlp->nlp_DID,
1617                                          atomic_read(&ndlp->cmd_pending),
1618                                          ndlp->cmd_qdepth);
1619                         atomic_inc(&lport->xmt_fcp_qdepth);
1620                         ret = -EBUSY;
1621                         goto out_fail;
1622                 }
1623         }
1624 
1625         /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
1626         if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1627                 idx = lpfc_queue_info->index;
1628         } else {
1629                 cpu = raw_smp_processor_id();
1630                 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
1631         }
1632 
1633         lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
1634         if (lpfc_ncmd == NULL) {
1635                 atomic_inc(&lport->xmt_fcp_noxri);
1636                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1637                                  "6065 Fail IO, driver buffer pool is empty: "
1638                                  "idx %d DID %x\n",
1639                                  lpfc_queue_info->index, ndlp->nlp_DID);
1640                 ret = -EBUSY;
1641                 goto out_fail;
1642         }
1643 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1644         if (start) {
1645                 lpfc_ncmd->ts_cmd_start = start;
1646                 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1647         } else {
1648                 lpfc_ncmd->ts_cmd_start = 0;
1649         }
1650 #endif
1651 
1652         /*
1653          * Store the data needed by the driver to issue, abort, and complete
1654          * an IO.
1655          * Do not let the IO hang out forever.  There is no midlayer issuing
1656          * an abort so inform the FW of the maximum IO pending time.
1657          */
1658         freqpriv->nvme_buf = lpfc_ncmd;
1659         lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1660         lpfc_ncmd->ndlp = ndlp;
1661         lpfc_ncmd->qidx = lpfc_queue_info->qidx;
1662 
1663         /*
1664          * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1665          * This identfier was create in our hardware queue create callback
1666          * routine. The driver now is dependent on the IO queue steering from
1667          * the transport.  We are trusting the upper NVME layers know which
1668          * index to use and that they have affinitized a CPU to this hardware
1669          * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1670          */
1671         lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1672         cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
1673 
1674         lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1675         ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1676         if (ret) {
1677                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1678                                  "6175 Fail IO, Prep DMA: "
1679                                  "idx %d DID %x\n",
1680                                  lpfc_queue_info->index, ndlp->nlp_DID);
1681                 atomic_inc(&lport->xmt_fcp_err);
1682                 ret = -ENOMEM;
1683                 goto out_free_nvme_buf;
1684         }
1685 
1686         lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1687                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1688                          lpfc_queue_info->index, ndlp->nlp_DID);
1689 
1690         ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
1691         if (ret) {
1692                 atomic_inc(&lport->xmt_fcp_wqerr);
1693                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1694                                  "6113 Fail IO, Could not issue WQE err %x "
1695                                  "sid: x%x did: x%x oxid: x%x\n",
1696                                  ret, vport->fc_myDID, ndlp->nlp_DID,
1697                                  lpfc_ncmd->cur_iocbq.sli4_xritag);
1698                 goto out_free_nvme_buf;
1699         }
1700 
1701         if (phba->cfg_xri_rebalancing)
1702                 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1703 
1704 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1705         if (lpfc_ncmd->ts_cmd_start)
1706                 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1707 
1708         if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
1709                 cpu = raw_smp_processor_id();
1710                 if (cpu < LPFC_CHECK_CPU_CNT) {
1711                         lpfc_ncmd->cpu = cpu;
1712                         if (idx != cpu)
1713                                 lpfc_printf_vlog(vport,
1714                                                  KERN_INFO, LOG_NVME_IOERR,
1715                                                 "6702 CPU Check cmd: "
1716                                                 "cpu %d wq %d\n",
1717                                                 lpfc_ncmd->cpu,
1718                                                 lpfc_queue_info->index);
1719                         phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++;
1720                 }
1721         }
1722 #endif
1723         return 0;
1724 
1725  out_free_nvme_buf:
1726         if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1727                 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1728                         cstat->output_requests--;
1729                 else
1730                         cstat->input_requests--;
1731         } else
1732                 cstat->control_requests--;
1733         lpfc_release_nvme_buf(phba, lpfc_ncmd);
1734  out_fail:
1735         return ret;
1736 }
1737 
1738 /**
1739  * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1740  * @phba: Pointer to HBA context object
1741  * @cmdiocb: Pointer to command iocb object.
1742  * @rspiocb: Pointer to response iocb object.
1743  *
1744  * This is the callback function for any NVME FCP IO that was aborted.
1745  *
1746  * Return value:
1747  *   None
1748  **/
1749 void
1750 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1751                            struct lpfc_wcqe_complete *abts_cmpl)
1752 {
1753         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1754                         "6145 ABORT_XRI_CN completing on rpi x%x "
1755                         "original iotag x%x, abort cmd iotag x%x "
1756                         "req_tag x%x, status x%x, hwstatus x%x\n",
1757                         cmdiocb->iocb.un.acxri.abortContextTag,
1758                         cmdiocb->iocb.un.acxri.abortIoTag,
1759                         cmdiocb->iotag,
1760                         bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1761                         bf_get(lpfc_wcqe_c_status, abts_cmpl),
1762                         bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1763         lpfc_sli_release_iocbq(phba, cmdiocb);
1764 }
1765 
1766 /**
1767  * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1768  * @lpfc_pnvme: Pointer to the driver's nvme instance data
1769  * @lpfc_nvme_lport: Pointer to the driver's local port data
1770  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1771  * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1772  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1773  *
1774  * Driver registers this routine as its nvme request io abort handler.  This
1775  * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1776  * data structure to the rport indicated in @lpfc_nvme_rport.  This routine
1777  * is executed asynchronously - one the target is validated as "MAPPED" and
1778  * ready for IO, the driver issues the abort request and returns.
1779  *
1780  * Return value:
1781  *   None
1782  **/
1783 static void
1784 lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1785                     struct nvme_fc_remote_port *pnvme_rport,
1786                     void *hw_queue_handle,
1787                     struct nvmefc_fcp_req *pnvme_fcreq)
1788 {
1789         struct lpfc_nvme_lport *lport;
1790         struct lpfc_vport *vport;
1791         struct lpfc_hba *phba;
1792         struct lpfc_io_buf *lpfc_nbuf;
1793         struct lpfc_iocbq *abts_buf;
1794         struct lpfc_iocbq *nvmereq_wqe;
1795         struct lpfc_nvme_fcpreq_priv *freqpriv;
1796         union lpfc_wqe128 *abts_wqe;
1797         unsigned long flags;
1798         int ret_val;
1799 
1800         /* Validate pointers. LLDD fault handling with transport does
1801          * have timing races.
1802          */
1803         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1804         if (unlikely(!lport))
1805                 return;
1806 
1807         vport = lport->vport;
1808 
1809         if (unlikely(!hw_queue_handle)) {
1810                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1811                                  "6129 Fail Abort, HW Queue Handle NULL.\n");
1812                 return;
1813         }
1814 
1815         phba = vport->phba;
1816         freqpriv = pnvme_fcreq->private;
1817 
1818         if (unlikely(!freqpriv))
1819                 return;
1820         if (vport->load_flag & FC_UNLOADING)
1821                 return;
1822 
1823         /* Announce entry to new IO submit field. */
1824         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1825                          "6002 Abort Request to rport DID x%06x "
1826                          "for nvme_fc_req x%px\n",
1827                          pnvme_rport->port_id,
1828                          pnvme_fcreq);
1829 
1830         /* If the hba is getting reset, this flag is set.  It is
1831          * cleared when the reset is complete and rings reestablished.
1832          */
1833         spin_lock_irqsave(&phba->hbalock, flags);
1834         /* driver queued commands are in process of being flushed */
1835         if (phba->hba_flag & HBA_IOQ_FLUSH) {
1836                 spin_unlock_irqrestore(&phba->hbalock, flags);
1837                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1838                                  "6139 Driver in reset cleanup - flushing "
1839                                  "NVME Req now.  hba_flag x%x\n",
1840                                  phba->hba_flag);
1841                 return;
1842         }
1843 
1844         lpfc_nbuf = freqpriv->nvme_buf;
1845         if (!lpfc_nbuf) {
1846                 spin_unlock_irqrestore(&phba->hbalock, flags);
1847                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1848                                  "6140 NVME IO req has no matching lpfc nvme "
1849                                  "io buffer.  Skipping abort req.\n");
1850                 return;
1851         } else if (!lpfc_nbuf->nvmeCmd) {
1852                 spin_unlock_irqrestore(&phba->hbalock, flags);
1853                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1854                                  "6141 lpfc NVME IO req has no nvme_fcreq "
1855                                  "io buffer.  Skipping abort req.\n");
1856                 return;
1857         }
1858         nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1859 
1860         /* Guard against IO completion being called at same time */
1861         spin_lock(&lpfc_nbuf->buf_lock);
1862 
1863         /*
1864          * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1865          * state must match the nvme_fcreq passed by the nvme
1866          * transport.  If they don't match, it is likely the driver
1867          * has already completed the NVME IO and the nvme transport
1868          * has not seen it yet.
1869          */
1870         if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1871                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1872                                  "6143 NVME req mismatch: "
1873                                  "lpfc_nbuf x%px nvmeCmd x%px, "
1874                                  "pnvme_fcreq x%px.  Skipping Abort xri x%x\n",
1875                                  lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1876                                  pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1877                 goto out_unlock;
1878         }
1879 
1880         /* Don't abort IOs no longer on the pending queue. */
1881         if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1882                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1883                                  "6142 NVME IO req x%px not queued - skipping "
1884                                  "abort req xri x%x\n",
1885                                  pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1886                 goto out_unlock;
1887         }
1888 
1889         atomic_inc(&lport->xmt_fcp_abort);
1890         lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1891                          nvmereq_wqe->sli4_xritag,
1892                          nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1893 
1894         /* Outstanding abort is in progress */
1895         if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1896                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1897                                  "6144 Outstanding NVME I/O Abort Request "
1898                                  "still pending on nvme_fcreq x%px, "
1899                                  "lpfc_ncmd %px xri x%x\n",
1900                                  pnvme_fcreq, lpfc_nbuf,
1901                                  nvmereq_wqe->sli4_xritag);
1902                 goto out_unlock;
1903         }
1904 
1905         abts_buf = __lpfc_sli_get_iocbq(phba);
1906         if (!abts_buf) {
1907                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1908                                  "6136 No available abort wqes. Skipping "
1909                                  "Abts req for nvme_fcreq x%px xri x%x\n",
1910                                  pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1911                 goto out_unlock;
1912         }
1913 
1914         /* Ready - mark outstanding as aborted by driver. */
1915         nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
1916 
1917         /* Complete prepping the abort wqe and issue to the FW. */
1918         abts_wqe = &abts_buf->wqe;
1919 
1920         /* WQEs are reused.  Clear stale data and set key fields to
1921          * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1922          */
1923         memset(abts_wqe, 0, sizeof(*abts_wqe));
1924         bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1925 
1926         /* word 7 */
1927         bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1928         bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
1929                nvmereq_wqe->iocb.ulpClass);
1930 
1931         /* word 8 - tell the FW to abort the IO associated with this
1932          * outstanding exchange ID.
1933          */
1934         abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
1935 
1936         /* word 9 - this is the iotag for the abts_wqe completion. */
1937         bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
1938                abts_buf->iotag);
1939 
1940         /* word 10 */
1941         bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1942         bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1943 
1944         /* word 11 */
1945         bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
1946         bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
1947         bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1948 
1949         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
1950         abts_buf->iocb_flag |= LPFC_IO_NVME;
1951         abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
1952         abts_buf->vport = vport;
1953         abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
1954         ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf);
1955         spin_unlock(&lpfc_nbuf->buf_lock);
1956         spin_unlock_irqrestore(&phba->hbalock, flags);
1957         if (ret_val) {
1958                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1959                                  "6137 Failed abts issue_wqe with status x%x "
1960                                  "for nvme_fcreq x%px.\n",
1961                                  ret_val, pnvme_fcreq);
1962                 lpfc_sli_release_iocbq(phba, abts_buf);
1963                 return;
1964         }
1965 
1966         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1967                          "6138 Transport Abort NVME Request Issued for "
1968                          "ox_id x%x on reqtag x%x\n",
1969                          nvmereq_wqe->sli4_xritag,
1970                          abts_buf->iotag);
1971         return;
1972 
1973 out_unlock:
1974         spin_unlock(&lpfc_nbuf->buf_lock);
1975         spin_unlock_irqrestore(&phba->hbalock, flags);
1976         return;
1977 }
1978 
1979 /* Declare and initialization an instance of the FC NVME template. */
1980 static struct nvme_fc_port_template lpfc_nvme_template = {
1981         /* initiator-based functions */
1982         .localport_delete  = lpfc_nvme_localport_delete,
1983         .remoteport_delete = lpfc_nvme_remoteport_delete,
1984         .create_queue = lpfc_nvme_create_queue,
1985         .delete_queue = lpfc_nvme_delete_queue,
1986         .ls_req       = lpfc_nvme_ls_req,
1987         .fcp_io       = lpfc_nvme_fcp_io_submit,
1988         .ls_abort     = lpfc_nvme_ls_abort,
1989         .fcp_abort    = lpfc_nvme_fcp_abort,
1990 
1991         .max_hw_queues = 1,
1992         .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1993         .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1994         .dma_boundary = 0xFFFFFFFF,
1995 
1996         /* Sizes of additional private data for data structures.
1997          * No use for the last two sizes at this time.
1998          */
1999         .local_priv_sz = sizeof(struct lpfc_nvme_lport),
2000         .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
2001         .lsrqst_priv_sz = 0,
2002         .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
2003 };
2004 
2005 /**
2006  * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
2007  * @phba: The HBA for which this call is being executed.
2008  *
2009  * This routine removes a nvme buffer from head of @hdwq io_buf_list
2010  * and returns to caller.
2011  *
2012  * Return codes:
2013  *   NULL - Error
2014  *   Pointer to lpfc_nvme_buf - Success
2015  **/
2016 static struct lpfc_io_buf *
2017 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
2018                   int idx, int expedite)
2019 {
2020         struct lpfc_io_buf *lpfc_ncmd;
2021         struct lpfc_sli4_hdw_queue *qp;
2022         struct sli4_sge *sgl;
2023         struct lpfc_iocbq *pwqeq;
2024         union lpfc_wqe128 *wqe;
2025 
2026         lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
2027 
2028         if (lpfc_ncmd) {
2029                 pwqeq = &(lpfc_ncmd->cur_iocbq);
2030                 wqe = &pwqeq->wqe;
2031 
2032                 /* Setup key fields in buffer that may have been changed
2033                  * if other protocols used this buffer.
2034                  */
2035                 pwqeq->iocb_flag = LPFC_IO_NVME;
2036                 pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
2037                 lpfc_ncmd->start_time = jiffies;
2038                 lpfc_ncmd->flags = 0;
2039 
2040                 /* Rsp SGE will be filled in when we rcv an IO
2041                  * from the NVME Layer to be sent.
2042                  * The cmd is going to be embedded so we need a SKIP SGE.
2043                  */
2044                 sgl = lpfc_ncmd->dma_sgl;
2045                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2046                 bf_set(lpfc_sli4_sge_last, sgl, 0);
2047                 sgl->word2 = cpu_to_le32(sgl->word2);
2048                 /* Fill in word 3 / sgl_len during cmd submission */
2049 
2050                 /* Initialize 64 bytes only */
2051                 memset(wqe, 0, sizeof(union lpfc_wqe));
2052 
2053                 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
2054                         atomic_inc(&ndlp->cmd_pending);
2055                         lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
2056                 }
2057 
2058         } else {
2059                 qp = &phba->sli4_hba.hdwq[idx];
2060                 qp->empty_io_bufs++;
2061         }
2062 
2063         return  lpfc_ncmd;
2064 }
2065 
2066 /**
2067  * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
2068  * @phba: The Hba for which this call is being executed.
2069  * @lpfc_ncmd: The nvme buffer which is being released.
2070  *
2071  * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
2072  * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
2073  * and cannot be reused for at least RA_TOV amount of time if it was
2074  * aborted.
2075  **/
2076 static void
2077 lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
2078 {
2079         struct lpfc_sli4_hdw_queue *qp;
2080         unsigned long iflag = 0;
2081 
2082         if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
2083                 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
2084 
2085         lpfc_ncmd->ndlp = NULL;
2086         lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
2087 
2088         qp = lpfc_ncmd->hdwq;
2089         if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
2090                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2091                                 "6310 XB release deferred for "
2092                                 "ox_id x%x on reqtag x%x\n",
2093                                 lpfc_ncmd->cur_iocbq.sli4_xritag,
2094                                 lpfc_ncmd->cur_iocbq.iotag);
2095 
2096                 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
2097                 list_add_tail(&lpfc_ncmd->list,
2098                         &qp->lpfc_abts_io_buf_list);
2099                 qp->abts_nvme_io_bufs++;
2100                 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
2101         } else
2102                 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
2103 }
2104 
2105 /**
2106  * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2107  * @pvport - the lpfc_vport instance requesting a localport.
2108  *
2109  * This routine is invoked to create an nvme localport instance to bind
2110  * to the nvme_fc_transport.  It is called once during driver load
2111  * like lpfc_create_shost after all other services are initialized.
2112  * It requires a vport, vpi, and wwns at call time.  Other localport
2113  * parameters are modified as the driver's FCID and the Fabric WWN
2114  * are established.
2115  *
2116  * Return codes
2117  *      0 - successful
2118  *      -ENOMEM - no heap memory available
2119  *      other values - from nvme registration upcall
2120  **/
2121 int
2122 lpfc_nvme_create_localport(struct lpfc_vport *vport)
2123 {
2124         int ret = 0;
2125         struct lpfc_hba  *phba = vport->phba;
2126         struct nvme_fc_port_info nfcp_info;
2127         struct nvme_fc_local_port *localport;
2128         struct lpfc_nvme_lport *lport;
2129 
2130         /* Initialize this localport instance.  The vport wwn usage ensures
2131          * that NPIV is accounted for.
2132          */
2133         memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2134         nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2135         nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2136         nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2137 
2138         /* We need to tell the transport layer + 1 because it takes page
2139          * alignment into account. When space for the SGL is allocated we
2140          * allocate + 3, one for cmd, one for rsp and one for this alignment
2141          */
2142         lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2143 
2144         /* Advertise how many hw queues we support based on fcp_io_sched */
2145         if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
2146                 lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
2147         else
2148                 lpfc_nvme_template.max_hw_queues =
2149                         phba->sli4_hba.num_present_cpu;
2150 
2151         if (!IS_ENABLED(CONFIG_NVME_FC))
2152                 return ret;
2153 
2154         /* localport is allocated from the stack, but the registration
2155          * call allocates heap memory as well as the private area.
2156          */
2157 
2158         ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2159                                          &vport->phba->pcidev->dev, &localport);
2160         if (!ret) {
2161                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2162                                  "6005 Successfully registered local "
2163                                  "NVME port num %d, localP x%px, private "
2164                                  "x%px, sg_seg %d\n",
2165                                  localport->port_num, localport,
2166                                  localport->private,
2167                                  lpfc_nvme_template.max_sgl_segments);
2168 
2169                 /* Private is our lport size declared in the template. */
2170                 lport = (struct lpfc_nvme_lport *)localport->private;
2171                 vport->localport = localport;
2172                 lport->vport = vport;
2173                 vport->nvmei_support = 1;
2174 
2175                 atomic_set(&lport->xmt_fcp_noxri, 0);
2176                 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2177                 atomic_set(&lport->xmt_fcp_qdepth, 0);
2178                 atomic_set(&lport->xmt_fcp_err, 0);
2179                 atomic_set(&lport->xmt_fcp_wqerr, 0);
2180                 atomic_set(&lport->xmt_fcp_abort, 0);
2181                 atomic_set(&lport->xmt_ls_abort, 0);
2182                 atomic_set(&lport->xmt_ls_err, 0);
2183                 atomic_set(&lport->cmpl_fcp_xb, 0);
2184                 atomic_set(&lport->cmpl_fcp_err, 0);
2185                 atomic_set(&lport->cmpl_ls_xb, 0);
2186                 atomic_set(&lport->cmpl_ls_err, 0);
2187                 atomic_set(&lport->fc4NvmeLsRequests, 0);
2188                 atomic_set(&lport->fc4NvmeLsCmpls, 0);
2189         }
2190 
2191         return ret;
2192 }
2193 
2194 #if (IS_ENABLED(CONFIG_NVME_FC))
2195 /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2196  *
2197  * The driver has to wait for the host nvme transport to callback
2198  * indicating the localport has successfully unregistered all
2199  * resources.  Since this is an uninterruptible wait, loop every ten
2200  * seconds and print a message indicating no progress.
2201  *
2202  * An uninterruptible wait is used because of the risk of transport-to-
2203  * driver state mismatch.
2204  */
2205 static void
2206 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2207                            struct lpfc_nvme_lport *lport,
2208                            struct completion *lport_unreg_cmp)
2209 {
2210         u32 wait_tmo;
2211         int ret, i, pending = 0;
2212         struct lpfc_sli_ring  *pring;
2213         struct lpfc_hba  *phba = vport->phba;
2214 
2215         /* Host transport has to clean up and confirm requiring an indefinite
2216          * wait. Print a message if a 10 second wait expires and renew the
2217          * wait. This is unexpected.
2218          */
2219         wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2220         while (true) {
2221                 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2222                 if (unlikely(!ret)) {
2223                         pending = 0;
2224                         for (i = 0; i < phba->cfg_hdw_queue; i++) {
2225                                 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2226                                 if (!pring)
2227                                         continue;
2228                                 if (pring->txcmplq_cnt)
2229                                         pending += pring->txcmplq_cnt;
2230                         }
2231                         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
2232                                          "6176 Lport x%px Localport x%px wait "
2233                                          "timed out. Pending %d. Renewing.\n",
2234                                          lport, vport->localport, pending);
2235                         continue;
2236                 }
2237                 break;
2238         }
2239         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2240                          "6177 Lport x%px Localport x%px Complete Success\n",
2241                          lport, vport->localport);
2242 }
2243 #endif
2244 
2245 /**
2246  * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2247  * @pnvme: pointer to lpfc nvme data structure.
2248  *
2249  * This routine is invoked to destroy all lports bound to the phba.
2250  * The lport memory was allocated by the nvme fc transport and is
2251  * released there.  This routine ensures all rports bound to the
2252  * lport have been disconnected.
2253  *
2254  **/
2255 void
2256 lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2257 {
2258 #if (IS_ENABLED(CONFIG_NVME_FC))
2259         struct nvme_fc_local_port *localport;
2260         struct lpfc_nvme_lport *lport;
2261         int ret;
2262         DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2263 
2264         if (vport->nvmei_support == 0)
2265                 return;
2266 
2267         localport = vport->localport;
2268         lport = (struct lpfc_nvme_lport *)localport->private;
2269 
2270         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2271                          "6011 Destroying NVME localport x%px\n",
2272                          localport);
2273 
2274         /* lport's rport list is clear.  Unregister
2275          * lport and release resources.
2276          */
2277         lport->lport_unreg_cmp = &lport_unreg_cmp;
2278         ret = nvme_fc_unregister_localport(localport);
2279 
2280         /* Wait for completion.  This either blocks
2281          * indefinitely or succeeds
2282          */
2283         lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2284         vport->localport = NULL;
2285 
2286         /* Regardless of the unregister upcall response, clear
2287          * nvmei_support.  All rports are unregistered and the
2288          * driver will clean up.
2289          */
2290         vport->nvmei_support = 0;
2291         if (ret == 0) {
2292                 lpfc_printf_vlog(vport,
2293                                  KERN_INFO, LOG_NVME_DISC,
2294                                  "6009 Unregistered lport Success\n");
2295         } else {
2296                 lpfc_printf_vlog(vport,
2297                                  KERN_INFO, LOG_NVME_DISC,
2298                                  "6010 Unregistered lport "
2299                                  "Failed, status x%x\n",
2300                                  ret);
2301         }
2302 #endif
2303 }
2304 
2305 void
2306 lpfc_nvme_update_localport(struct lpfc_vport *vport)
2307 {
2308 #if (IS_ENABLED(CONFIG_NVME_FC))
2309         struct nvme_fc_local_port *localport;
2310         struct lpfc_nvme_lport *lport;
2311 
2312         localport = vport->localport;
2313         if (!localport) {
2314                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2315                                  "6710 Update NVME fail. No localport\n");
2316                 return;
2317         }
2318         lport = (struct lpfc_nvme_lport *)localport->private;
2319         if (!lport) {
2320                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2321                                  "6171 Update NVME fail. localP x%px, No lport\n",
2322                                  localport);
2323                 return;
2324         }
2325         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2326                          "6012 Update NVME lport x%px did x%x\n",
2327                          localport, vport->fc_myDID);
2328 
2329         localport->port_id = vport->fc_myDID;
2330         if (localport->port_id == 0)
2331                 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2332         else
2333                 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2334 
2335         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2336                          "6030 bound lport x%px to DID x%06x\n",
2337                          lport, localport->port_id);
2338 #endif
2339 }
2340 
2341 int
2342 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2343 {
2344 #if (IS_ENABLED(CONFIG_NVME_FC))
2345         int ret = 0;
2346         struct nvme_fc_local_port *localport;
2347         struct lpfc_nvme_lport *lport;
2348         struct lpfc_nvme_rport *rport;
2349         struct lpfc_nvme_rport *oldrport;
2350         struct nvme_fc_remote_port *remote_port;
2351         struct nvme_fc_port_info rpinfo;
2352         struct lpfc_nodelist *prev_ndlp = NULL;
2353 
2354         lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2355                          "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2356                          ndlp->nlp_DID, ndlp->nlp_type);
2357 
2358         localport = vport->localport;
2359         if (!localport)
2360                 return 0;
2361 
2362         lport = (struct lpfc_nvme_lport *)localport->private;
2363 
2364         /* NVME rports are not preserved across devloss.
2365          * Just register this instance.  Note, rpinfo->dev_loss_tmo
2366          * is left 0 to indicate accept transport defaults.  The
2367          * driver communicates port role capabilities consistent
2368          * with the PRLI response data.
2369          */
2370         memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2371         rpinfo.port_id = ndlp->nlp_DID;
2372         if (ndlp->nlp_type & NLP_NVME_TARGET)
2373                 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2374         if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2375                 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2376 
2377         if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2378                 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2379 
2380         rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2381         rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2382 
2383         spin_lock_irq(&vport->phba->hbalock);
2384         oldrport = lpfc_ndlp_get_nrport(ndlp);
2385         if (oldrport) {
2386                 prev_ndlp = oldrport->ndlp;
2387                 spin_unlock_irq(&vport->phba->hbalock);
2388         } else {
2389                 spin_unlock_irq(&vport->phba->hbalock);
2390                 lpfc_nlp_get(ndlp);
2391         }
2392 
2393         ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2394         if (!ret) {
2395                 /* If the ndlp already has an nrport, this is just
2396                  * a resume of the existing rport.  Else this is a
2397                  * new rport.
2398                  */
2399                 /* Guard against an unregister/reregister
2400                  * race that leaves the WAIT flag set.
2401                  */
2402                 spin_lock_irq(&vport->phba->hbalock);
2403                 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2404                 spin_unlock_irq(&vport->phba->hbalock);
2405                 rport = remote_port->private;
2406                 if (oldrport) {
2407                         /* New remoteport record does not guarantee valid
2408                          * host private memory area.
2409                          */
2410                         if (oldrport == remote_port->private) {
2411                                 /* Same remoteport - ndlp should match.
2412                                  * Just reuse.
2413                                  */
2414                                 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2415                                                  LOG_NVME_DISC,
2416                                                  "6014 Rebind lport to current "
2417                                                  "remoteport x%px wwpn 0x%llx, "
2418                                                  "Data: x%x x%x x%px x%px x%x "
2419                                                  " x%06x\n",
2420                                                  remote_port,
2421                                                  remote_port->port_name,
2422                                                  remote_port->port_id,
2423                                                  remote_port->port_role,
2424                                                  oldrport->ndlp,
2425                                                  ndlp,
2426                                                  ndlp->nlp_type,
2427                                                  ndlp->nlp_DID);
2428 
2429                                 /* It's a complete rebind only if the driver
2430                                  * is registering with the same ndlp. Otherwise
2431                                  * the driver likely executed a node swap
2432                                  * prior to this registration and the ndlp to
2433                                  * remoteport binding needs to be redone.
2434                                  */
2435                                 if (prev_ndlp == ndlp)
2436                                         return 0;
2437 
2438                         }
2439 
2440                         /* Sever the ndlp<->rport association
2441                          * before dropping the ndlp ref from
2442                          * register.
2443                          */
2444                         spin_lock_irq(&vport->phba->hbalock);
2445                         ndlp->nrport = NULL;
2446                         ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2447                         spin_unlock_irq(&vport->phba->hbalock);
2448                         rport->ndlp = NULL;
2449                         rport->remoteport = NULL;
2450 
2451                         /* Reference only removed if previous NDLP is no longer
2452                          * active. It might be just a swap and removing the
2453                          * reference would cause a premature cleanup.
2454                          */
2455                         if (prev_ndlp && prev_ndlp != ndlp) {
2456                                 if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
2457                                     (!prev_ndlp->nrport))
2458                                         lpfc_nlp_put(prev_ndlp);
2459                         }
2460                 }
2461 
2462                 /* Clean bind the rport to the ndlp. */
2463                 rport->remoteport = remote_port;
2464                 rport->lport = lport;
2465                 rport->ndlp = ndlp;
2466                 spin_lock_irq(&vport->phba->hbalock);
2467                 ndlp->nrport = rport;
2468                 spin_unlock_irq(&vport->phba->hbalock);
2469                 lpfc_printf_vlog(vport, KERN_INFO,
2470                                  LOG_NVME_DISC | LOG_NODE,
2471                                  "6022 Bind lport x%px to remoteport x%px "
2472                                  "rport x%px WWNN 0x%llx, "
2473                                  "Rport WWPN 0x%llx DID "
2474                                  "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
2475                                  lport, remote_port, rport,
2476                                  rpinfo.node_name, rpinfo.port_name,
2477                                  rpinfo.port_id, rpinfo.port_role,
2478                                  ndlp, prev_ndlp);
2479         } else {
2480                 lpfc_printf_vlog(vport, KERN_ERR,
2481                                  LOG_NVME_DISC | LOG_NODE,
2482                                  "6031 RemotePort Registration failed "
2483                                  "err: %d, DID x%06x\n",
2484                                  ret, ndlp->nlp_DID);
2485         }
2486 
2487         return ret;
2488 #else
2489         return 0;
2490 #endif
2491 }
2492 
2493 /**
2494  * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
2495  *
2496  * If the ndlp represents an NVME Target, that we are logged into,
2497  * ping the NVME FC Transport layer to initiate a device rescan
2498  * on this remote NPort.
2499  */
2500 void
2501 lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2502 {
2503 #if (IS_ENABLED(CONFIG_NVME_FC))
2504         struct lpfc_nvme_rport *nrport;
2505         struct nvme_fc_remote_port *remoteport = NULL;
2506 
2507         spin_lock_irq(&vport->phba->hbalock);
2508         nrport = lpfc_ndlp_get_nrport(ndlp);
2509         if (nrport)
2510                 remoteport = nrport->remoteport;
2511         spin_unlock_irq(&vport->phba->hbalock);
2512 
2513         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2514                          "6170 Rescan NPort DID x%06x type x%x "
2515                          "state x%x nrport x%px remoteport x%px\n",
2516                          ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
2517                          nrport, remoteport);
2518 
2519         if (!nrport || !remoteport)
2520                 goto rescan_exit;
2521 
2522         /* Only rescan if we are an NVME target in the MAPPED state */
2523         if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
2524             ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
2525                 nvme_fc_rescan_remoteport(remoteport);
2526 
2527                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2528                                  "6172 NVME rescanned DID x%06x "
2529                                  "port_state x%x\n",
2530                                  ndlp->nlp_DID, remoteport->port_state);
2531         }
2532         return;
2533  rescan_exit:
2534         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2535                          "6169 Skip NVME Rport Rescan, NVME remoteport "
2536                          "unregistered\n");
2537 #endif
2538 }
2539 
2540 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2541  *
2542  * There is no notion of Devloss or rport recovery from the current
2543  * nvme_transport perspective.  Loss of an rport just means IO cannot
2544  * be sent and recovery is completely up to the initator.
2545  * For now, the driver just unbinds the DID and port_role so that
2546  * no further IO can be issued.  Changes are planned for later.
2547  *
2548  * Notes - the ndlp reference count is not decremented here since
2549  * since there is no nvme_transport api for devloss.  Node ref count
2550  * is only adjusted in driver unload.
2551  */
2552 void
2553 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2554 {
2555 #if (IS_ENABLED(CONFIG_NVME_FC))
2556         int ret;
2557         struct nvme_fc_local_port *localport;
2558         struct lpfc_nvme_lport *lport;
2559         struct lpfc_nvme_rport *rport;
2560         struct nvme_fc_remote_port *remoteport = NULL;
2561 
2562         localport = vport->localport;
2563 
2564         /* This is fundamental error.  The localport is always
2565          * available until driver unload.  Just exit.
2566          */
2567         if (!localport)
2568                 return;
2569 
2570         lport = (struct lpfc_nvme_lport *)localport->private;
2571         if (!lport)
2572                 goto input_err;
2573 
2574         spin_lock_irq(&vport->phba->hbalock);
2575         rport = lpfc_ndlp_get_nrport(ndlp);
2576         if (rport)
2577                 remoteport = rport->remoteport;
2578         spin_unlock_irq(&vport->phba->hbalock);
2579         if (!remoteport)
2580                 goto input_err;
2581 
2582         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2583                          "6033 Unreg nvme remoteport x%px, portname x%llx, "
2584                          "port_id x%06x, portstate x%x port type x%x\n",
2585                          remoteport, remoteport->port_name,
2586                          remoteport->port_id, remoteport->port_state,
2587                          ndlp->nlp_type);
2588 
2589         /* Sanity check ndlp type.  Only call for NVME ports. Don't
2590          * clear any rport state until the transport calls back.
2591          */
2592 
2593         if (ndlp->nlp_type & NLP_NVME_TARGET) {
2594                 /* No concern about the role change on the nvme remoteport.
2595                  * The transport will update it.
2596                  */
2597                 ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
2598 
2599                 /* Don't let the host nvme transport keep sending keep-alives
2600                  * on this remoteport. Vport is unloading, no recovery. The
2601                  * return values is ignored.  The upcall is a courtesy to the
2602                  * transport.
2603                  */
2604                 if (vport->load_flag & FC_UNLOADING)
2605                         (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2606 
2607                 ret = nvme_fc_unregister_remoteport(remoteport);
2608                 if (ret != 0) {
2609                         lpfc_nlp_put(ndlp);
2610                         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2611                                          "6167 NVME unregister failed %d "
2612                                          "port_state x%x\n",
2613                                          ret, remoteport->port_state);
2614                 }
2615         }
2616         return;
2617 
2618  input_err:
2619 #endif
2620         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2621                          "6168 State error: lport x%px, rport x%px FCID x%06x\n",
2622                          vport->localport, ndlp->rport, ndlp->nlp_DID);
2623 }
2624 
2625 /**
2626  * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2627  * @phba: pointer to lpfc hba data structure.
2628  * @axri: pointer to the fcp xri abort wcqe structure.
2629  * @lpfc_ncmd: The nvme job structure for the request being aborted.
2630  *
2631  * This routine is invoked by the worker thread to process a SLI4 fast-path
2632  * NVME aborted xri.  Aborted NVME IO commands are completed to the transport
2633  * here.
2634  **/
2635 void
2636 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2637                            struct sli4_wcqe_xri_aborted *axri,
2638                            struct lpfc_io_buf *lpfc_ncmd)
2639 {
2640         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2641         struct nvmefc_fcp_req *nvme_cmd = NULL;
2642         struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
2643 
2644 
2645         if (ndlp)
2646                 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2647 
2648         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2649                         "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
2650                         "xri released\n",
2651                         lpfc_ncmd->nvmeCmd, xri,
2652                         lpfc_ncmd->cur_iocbq.iotag);
2653 
2654         /* Aborted NVME commands are required to not complete
2655          * before the abort exchange command fully completes.
2656          * Once completed, it is available via the put list.
2657          */
2658         if (lpfc_ncmd->nvmeCmd) {
2659                 nvme_cmd = lpfc_ncmd->nvmeCmd;
2660                 nvme_cmd->done(nvme_cmd);
2661                 lpfc_ncmd->nvmeCmd = NULL;
2662         }
2663         lpfc_release_nvme_buf(phba, lpfc_ncmd);
2664 }
2665 
2666 /**
2667  * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2668  * @phba: Pointer to HBA context object.
2669  *
2670  * This function flushes all wqes in the nvme rings and frees all resources
2671  * in the txcmplq. This function does not issue abort wqes for the IO
2672  * commands in txcmplq, they will just be returned with
2673  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2674  * slot has been permanently disabled.
2675  **/
2676 void
2677 lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2678 {
2679         struct lpfc_sli_ring  *pring;
2680         u32 i, wait_cnt = 0;
2681 
2682         if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
2683                 return;
2684 
2685         /* Cycle through all IO rings and make sure all outstanding
2686          * WQEs have been removed from the txcmplqs.
2687          */
2688         for (i = 0; i < phba->cfg_hdw_queue; i++) {
2689                 if (!phba->sli4_hba.hdwq[i].io_wq)
2690                         continue;
2691                 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2692 
2693                 if (!pring)
2694                         continue;
2695 
2696                 /* Retrieve everything on the txcmplq */
2697                 while (!list_empty(&pring->txcmplq)) {
2698                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2699                         wait_cnt++;
2700 
2701                         /* The sleep is 10mS.  Every ten seconds,
2702                          * dump a message.  Something is wrong.
2703                          */
2704                         if ((wait_cnt % 1000) == 0) {
2705                                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2706                                                 "6178 NVME IO not empty, "
2707                                                 "cnt %d\n", wait_cnt);
2708                         }
2709                 }
2710         }
2711 }
2712 
2713 void
2714 lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
2715 {
2716 #if (IS_ENABLED(CONFIG_NVME_FC))
2717         struct lpfc_io_buf *lpfc_ncmd;
2718         struct nvmefc_fcp_req *nCmd;
2719         struct lpfc_nvme_fcpreq_priv *freqpriv;
2720 
2721         if (!pwqeIn->context1) {
2722                 lpfc_sli_release_iocbq(phba, pwqeIn);
2723                 return;
2724         }
2725         /* For abort iocb just return, IO iocb will do a done call */
2726         if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
2727             CMD_ABORT_XRI_CX) {
2728                 lpfc_sli_release_iocbq(phba, pwqeIn);
2729                 return;
2730         }
2731         lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
2732 
2733         spin_lock(&lpfc_ncmd->buf_lock);
2734         if (!lpfc_ncmd->nvmeCmd) {
2735                 spin_unlock(&lpfc_ncmd->buf_lock);
2736                 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2737                 return;
2738         }
2739 
2740         nCmd = lpfc_ncmd->nvmeCmd;
2741         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2742                         "6194 NVME Cancel xri %x\n",
2743                         lpfc_ncmd->cur_iocbq.sli4_xritag);
2744 
2745         nCmd->transferred_length = 0;
2746         nCmd->rcv_rsplen = 0;
2747         nCmd->status = NVME_SC_INTERNAL;
2748         freqpriv = nCmd->private;
2749         freqpriv->nvme_buf = NULL;
2750         lpfc_ncmd->nvmeCmd = NULL;
2751 
2752         spin_unlock(&lpfc_ncmd->buf_lock);
2753         nCmd->done(nCmd);
2754 
2755         /* Call release with XB=1 to queue the IO into the abort list. */
2756         lpfc_release_nvme_buf(phba, lpfc_ncmd);
2757 #endif
2758 }

/* [<][>][^][v][top][bottom][index][help] */