root/drivers/scsi/lpfc/lpfc_nvmet.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. lpfc_nvmet_cmd_template
  2. lpfc_nvmet_get_ctx_for_xri
  3. lpfc_nvmet_get_ctx_for_oxid
  4. lpfc_nvmet_defer_release
  5. lpfc_nvmet_xmt_ls_rsp_cmp
  6. lpfc_nvmet_ctxbuf_post
  7. lpfc_nvmet_ktime
  8. lpfc_nvmet_xmt_fcp_op_cmp
  9. lpfc_nvmet_xmt_ls_rsp
  10. lpfc_nvmet_xmt_fcp_op
  11. lpfc_nvmet_targetport_delete
  12. lpfc_nvmet_xmt_fcp_abort
  13. lpfc_nvmet_xmt_fcp_release
  14. lpfc_nvmet_defer_rcv
  15. lpfc_nvmet_discovery_event
  16. __lpfc_nvmet_clean_io_for_cpu
  17. lpfc_nvmet_cleanup_io_context
  18. lpfc_nvmet_setup_io_context
  19. lpfc_nvmet_create_targetport
  20. lpfc_nvmet_update_targetport
  21. lpfc_sli4_nvmet_xri_aborted
  22. lpfc_nvmet_rcv_unsol_abort
  23. lpfc_nvmet_wqfull_flush
  24. lpfc_nvmet_wqfull_process
  25. lpfc_nvmet_destroy_targetport
  26. lpfc_nvmet_unsol_ls_buffer
  27. lpfc_nvmet_process_rcv_fcp_req
  28. lpfc_nvmet_fcp_rqst_defer_work
  29. lpfc_nvmet_replenish_context
  30. lpfc_nvmet_unsol_fcp_buffer
  31. lpfc_nvmet_unsol_ls_event
  32. lpfc_nvmet_unsol_fcp_event
  33. lpfc_nvmet_prep_ls_wqe
  34. lpfc_nvmet_prep_fcp_wqe
  35. lpfc_nvmet_sol_fcp_abort_cmp
  36. lpfc_nvmet_unsol_fcp_abort_cmp
  37. lpfc_nvmet_xmt_ls_abort_cmp
  38. lpfc_nvmet_unsol_issue_abort
  39. lpfc_nvmet_sol_fcp_issue_abort
  40. lpfc_nvmet_unsol_fcp_issue_abort
  41. lpfc_nvmet_unsol_ls_issue_abort

   1 /*******************************************************************
   2  * This file is part of the Emulex Linux Device Driver for         *
   3  * Fibre Channsel Host Bus Adapters.                               *
   4  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
   5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
   6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7  * EMULEX and SLI are trademarks of Emulex.                        *
   8  * www.broadcom.com                                                *
   9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10  *                                                                 *
  11  * This program is free software; you can redistribute it and/or   *
  12  * modify it under the terms of version 2 of the GNU General       *
  13  * Public License as published by the Free Software Foundation.    *
  14  * This program is distributed in the hope that it will be useful. *
  15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20  * more details, a copy of which can be found in the file COPYING  *
  21  * included with this package.                                     *
  22  ********************************************************************/
  23 #include <linux/pci.h>
  24 #include <linux/slab.h>
  25 #include <linux/interrupt.h>
  26 #include <linux/delay.h>
  27 #include <asm/unaligned.h>
  28 #include <linux/crc-t10dif.h>
  29 #include <net/checksum.h>
  30 
  31 #include <scsi/scsi.h>
  32 #include <scsi/scsi_device.h>
  33 #include <scsi/scsi_eh.h>
  34 #include <scsi/scsi_host.h>
  35 #include <scsi/scsi_tcq.h>
  36 #include <scsi/scsi_transport_fc.h>
  37 #include <scsi/fc/fc_fs.h>
  38 
  39 #include <linux/nvme.h>
  40 #include <linux/nvme-fc-driver.h>
  41 #include <linux/nvme-fc.h>
  42 
  43 #include "lpfc_version.h"
  44 #include "lpfc_hw4.h"
  45 #include "lpfc_hw.h"
  46 #include "lpfc_sli.h"
  47 #include "lpfc_sli4.h"
  48 #include "lpfc_nl.h"
  49 #include "lpfc_disc.h"
  50 #include "lpfc.h"
  51 #include "lpfc_scsi.h"
  52 #include "lpfc_nvme.h"
  53 #include "lpfc_nvmet.h"
  54 #include "lpfc_logmsg.h"
  55 #include "lpfc_crtn.h"
  56 #include "lpfc_vport.h"
  57 #include "lpfc_debugfs.h"
  58 
  59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
  60                                                  struct lpfc_nvmet_rcv_ctx *,
  61                                                  dma_addr_t rspbuf,
  62                                                  uint16_t rspsize);
  63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
  64                                                   struct lpfc_nvmet_rcv_ctx *);
  65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
  66                                           struct lpfc_nvmet_rcv_ctx *,
  67                                           uint32_t, uint16_t);
  68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
  69                                             struct lpfc_nvmet_rcv_ctx *,
  70                                             uint32_t, uint16_t);
  71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
  72                                            struct lpfc_nvmet_rcv_ctx *,
  73                                            uint32_t, uint16_t);
  74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
  75                                     struct lpfc_nvmet_rcv_ctx *);
  76 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
  77 
  78 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
  79 
  80 static union lpfc_wqe128 lpfc_tsend_cmd_template;
  81 static union lpfc_wqe128 lpfc_treceive_cmd_template;
  82 static union lpfc_wqe128 lpfc_trsp_cmd_template;
  83 
  84 /* Setup WQE templates for NVME IOs */
  85 void
  86 lpfc_nvmet_cmd_template(void)
  87 {
  88         union lpfc_wqe128 *wqe;
  89 
  90         /* TSEND template */
  91         wqe = &lpfc_tsend_cmd_template;
  92         memset(wqe, 0, sizeof(union lpfc_wqe128));
  93 
  94         /* Word 0, 1, 2 - BDE is variable */
  95 
  96         /* Word 3 - payload_offset_len is zero */
  97 
  98         /* Word 4 - relative_offset is variable */
  99 
 100         /* Word 5 - is zero */
 101 
 102         /* Word 6 - ctxt_tag, xri_tag is variable */
 103 
 104         /* Word 7 - wqe_ar is variable */
 105         bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
 106         bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
 107         bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
 108         bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
 109         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
 110 
 111         /* Word 8 - abort_tag is variable */
 112 
 113         /* Word 9  - reqtag, rcvoxid is variable */
 114 
 115         /* Word 10 - wqes, xc is variable */
 116         bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
 117         bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
 118         bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
 119         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
 120         bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
 121         bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
 122 
 123         /* Word 11 - sup, irsp, irsplen is variable */
 124         bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
 125         bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 126         bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
 127         bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
 128         bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
 129         bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
 130 
 131         /* Word 12 - fcp_data_len is variable */
 132 
 133         /* Word 13, 14, 15 - PBDE is zero */
 134 
 135         /* TRECEIVE template */
 136         wqe = &lpfc_treceive_cmd_template;
 137         memset(wqe, 0, sizeof(union lpfc_wqe128));
 138 
 139         /* Word 0, 1, 2 - BDE is variable */
 140 
 141         /* Word 3 */
 142         wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
 143 
 144         /* Word 4 - relative_offset is variable */
 145 
 146         /* Word 5 - is zero */
 147 
 148         /* Word 6 - ctxt_tag, xri_tag is variable */
 149 
 150         /* Word 7 */
 151         bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
 152         bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
 153         bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
 154         bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
 155         bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
 156 
 157         /* Word 8 - abort_tag is variable */
 158 
 159         /* Word 9  - reqtag, rcvoxid is variable */
 160 
 161         /* Word 10 - xc is variable */
 162         bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
 163         bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
 164         bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
 165         bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
 166         bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
 167         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
 168 
 169         /* Word 11 - pbde is variable */
 170         bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
 171         bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 172         bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
 173         bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
 174         bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
 175         bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
 176 
 177         /* Word 12 - fcp_data_len is variable */
 178 
 179         /* Word 13, 14, 15 - PBDE is variable */
 180 
 181         /* TRSP template */
 182         wqe = &lpfc_trsp_cmd_template;
 183         memset(wqe, 0, sizeof(union lpfc_wqe128));
 184 
 185         /* Word 0, 1, 2 - BDE is variable */
 186 
 187         /* Word 3 - response_len is variable */
 188 
 189         /* Word 4, 5 - is zero */
 190 
 191         /* Word 6 - ctxt_tag, xri_tag is variable */
 192 
 193         /* Word 7 */
 194         bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
 195         bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
 196         bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
 197         bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
 198         bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
 199 
 200         /* Word 8 - abort_tag is variable */
 201 
 202         /* Word 9  - reqtag is variable */
 203 
 204         /* Word 10 wqes, xc is variable */
 205         bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
 206         bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
 207         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
 208         bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
 209         bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
 210         bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
 211 
 212         /* Word 11 irsp, irsplen is variable */
 213         bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
 214         bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 215         bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
 216         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
 217         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
 218         bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
 219 
 220         /* Word 12, 13, 14, 15 - is zero */
 221 }
 222 
 223 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
 224 static struct lpfc_nvmet_rcv_ctx *
 225 lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
 226 {
 227         struct lpfc_nvmet_rcv_ctx *ctxp;
 228         unsigned long iflag;
 229         bool found = false;
 230 
 231         spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
 232         list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
 233                 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
 234                         continue;
 235 
 236                 found = true;
 237                 break;
 238         }
 239         spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
 240         if (found)
 241                 return ctxp;
 242 
 243         return NULL;
 244 }
 245 
 246 static struct lpfc_nvmet_rcv_ctx *
 247 lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
 248 {
 249         struct lpfc_nvmet_rcv_ctx *ctxp;
 250         unsigned long iflag;
 251         bool found = false;
 252 
 253         spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
 254         list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
 255                 if (ctxp->oxid != oxid || ctxp->sid != sid)
 256                         continue;
 257 
 258                 found = true;
 259                 break;
 260         }
 261         spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
 262         if (found)
 263                 return ctxp;
 264 
 265         return NULL;
 266 }
 267 #endif
 268 
 269 static void
 270 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
 271 {
 272         lockdep_assert_held(&ctxp->ctxlock);
 273 
 274         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
 275                         "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
 276                         ctxp->oxid, ctxp->flag);
 277 
 278         if (ctxp->flag & LPFC_NVMET_CTX_RLS)
 279                 return;
 280 
 281         ctxp->flag |= LPFC_NVMET_CTX_RLS;
 282         spin_lock(&phba->sli4_hba.t_active_list_lock);
 283         list_del(&ctxp->list);
 284         spin_unlock(&phba->sli4_hba.t_active_list_lock);
 285         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 286         list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
 287         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 288 }
 289 
 290 /**
 291  * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
 292  * @phba: Pointer to HBA context object.
 293  * @cmdwqe: Pointer to driver command WQE object.
 294  * @wcqe: Pointer to driver response CQE object.
 295  *
 296  * The function is called from SLI ring event handler with no
 297  * lock held. This function is the completion handler for NVME LS commands
 298  * The function frees memory resources used for the NVME commands.
 299  **/
 300 static void
 301 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
 302                           struct lpfc_wcqe_complete *wcqe)
 303 {
 304         struct lpfc_nvmet_tgtport *tgtp;
 305         struct nvmefc_tgt_ls_req *rsp;
 306         struct lpfc_nvmet_rcv_ctx *ctxp;
 307         uint32_t status, result;
 308 
 309         status = bf_get(lpfc_wcqe_c_status, wcqe);
 310         result = wcqe->parameter;
 311         ctxp = cmdwqe->context2;
 312 
 313         if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
 314                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
 315                                 "6410 NVMET LS cmpl state mismatch IO x%x: "
 316                                 "%d %d\n",
 317                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
 318         }
 319 
 320         if (!phba->targetport)
 321                 goto out;
 322 
 323         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
 324 
 325         if (tgtp) {
 326                 if (status) {
 327                         atomic_inc(&tgtp->xmt_ls_rsp_error);
 328                         if (result == IOERR_ABORT_REQUESTED)
 329                                 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
 330                         if (bf_get(lpfc_wcqe_c_xb, wcqe))
 331                                 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
 332                 } else {
 333                         atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
 334                 }
 335         }
 336 
 337 out:
 338         rsp = &ctxp->ctx.ls_req;
 339 
 340         lpfc_nvmeio_data(phba, "NVMET LS  CMPL: xri x%x stat x%x result x%x\n",
 341                          ctxp->oxid, status, result);
 342 
 343         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
 344                         "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
 345                         status, result, ctxp->oxid);
 346 
 347         lpfc_nlp_put(cmdwqe->context1);
 348         cmdwqe->context2 = NULL;
 349         cmdwqe->context3 = NULL;
 350         lpfc_sli_release_iocbq(phba, cmdwqe);
 351         rsp->done(rsp);
 352         kfree(ctxp);
 353 }
 354 
 355 /**
 356  * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
 357  * @phba: HBA buffer is associated with
 358  * @ctxp: context to clean up
 359  * @mp: Buffer to free
 360  *
 361  * Description: Frees the given DMA buffer in the appropriate way given by
 362  * reposting it to its associated RQ so it can be reused.
 363  *
 364  * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
 365  *
 366  * Returns: None
 367  **/
 368 void
 369 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
 370 {
 371 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
 372         struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
 373         struct lpfc_nvmet_tgtport *tgtp;
 374         struct fc_frame_header *fc_hdr;
 375         struct rqb_dmabuf *nvmebuf;
 376         struct lpfc_nvmet_ctx_info *infop;
 377         uint32_t size, oxid, sid;
 378         int cpu;
 379         unsigned long iflag;
 380 
 381         if (ctxp->txrdy) {
 382                 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
 383                               ctxp->txrdy_phys);
 384                 ctxp->txrdy = NULL;
 385                 ctxp->txrdy_phys = 0;
 386         }
 387 
 388         if (ctxp->state == LPFC_NVMET_STE_FREE) {
 389                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
 390                                 "6411 NVMET free, already free IO x%x: %d %d\n",
 391                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
 392         }
 393 
 394         if (ctxp->rqb_buffer) {
 395                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
 396                 nvmebuf = ctxp->rqb_buffer;
 397                 /* check if freed in another path whilst acquiring lock */
 398                 if (nvmebuf) {
 399                         ctxp->rqb_buffer = NULL;
 400                         if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
 401                                 ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
 402                                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
 403                                 nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
 404                                                                     nvmebuf);
 405                         } else {
 406                                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
 407                                 /* repost */
 408                                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
 409                         }
 410                 } else {
 411                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
 412                 }
 413         }
 414         ctxp->state = LPFC_NVMET_STE_FREE;
 415 
 416         spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
 417         if (phba->sli4_hba.nvmet_io_wait_cnt) {
 418                 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
 419                                  nvmebuf, struct rqb_dmabuf,
 420                                  hbuf.list);
 421                 phba->sli4_hba.nvmet_io_wait_cnt--;
 422                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
 423                                        iflag);
 424 
 425                 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
 426                 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
 427                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
 428                 size = nvmebuf->bytes_recv;
 429                 sid = sli4_sid_from_fc_hdr(fc_hdr);
 430 
 431                 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
 432                 ctxp->wqeq = NULL;
 433                 ctxp->txrdy = NULL;
 434                 ctxp->offset = 0;
 435                 ctxp->phba = phba;
 436                 ctxp->size = size;
 437                 ctxp->oxid = oxid;
 438                 ctxp->sid = sid;
 439                 ctxp->state = LPFC_NVMET_STE_RCV;
 440                 ctxp->entry_cnt = 1;
 441                 ctxp->flag = 0;
 442                 ctxp->ctxbuf = ctx_buf;
 443                 ctxp->rqb_buffer = (void *)nvmebuf;
 444                 spin_lock_init(&ctxp->ctxlock);
 445 
 446 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 447                 /* NOTE: isr time stamp is stale when context is re-assigned*/
 448                 if (ctxp->ts_isr_cmd) {
 449                         ctxp->ts_cmd_nvme = 0;
 450                         ctxp->ts_nvme_data = 0;
 451                         ctxp->ts_data_wqput = 0;
 452                         ctxp->ts_isr_data = 0;
 453                         ctxp->ts_data_nvme = 0;
 454                         ctxp->ts_nvme_status = 0;
 455                         ctxp->ts_status_wqput = 0;
 456                         ctxp->ts_isr_status = 0;
 457                         ctxp->ts_status_nvme = 0;
 458                 }
 459 #endif
 460                 atomic_inc(&tgtp->rcv_fcp_cmd_in);
 461 
 462                 /* Indicate that a replacement buffer has been posted */
 463                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
 464                 ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
 465                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
 466 
 467                 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
 468                         atomic_inc(&tgtp->rcv_fcp_cmd_drop);
 469                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
 470                                         "6181 Unable to queue deferred work "
 471                                         "for oxid x%x. "
 472                                         "FCP Drop IO [x%x x%x x%x]\n",
 473                                         ctxp->oxid,
 474                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
 475                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
 476                                         atomic_read(&tgtp->xmt_fcp_release));
 477 
 478                         spin_lock_irqsave(&ctxp->ctxlock, iflag);
 479                         lpfc_nvmet_defer_release(phba, ctxp);
 480                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
 481                         lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
 482                 }
 483                 return;
 484         }
 485         spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
 486 
 487         /*
 488          * Use the CPU context list, from the MRQ the IO was received on
 489          * (ctxp->idx), to save context structure.
 490          */
 491         spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
 492         list_del_init(&ctxp->list);
 493         spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
 494         cpu = raw_smp_processor_id();
 495         infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
 496         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
 497         list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
 498         infop->nvmet_ctx_list_cnt++;
 499         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
 500 #endif
 501 }
 502 
 503 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 504 static void
 505 lpfc_nvmet_ktime(struct lpfc_hba *phba,
 506                  struct lpfc_nvmet_rcv_ctx *ctxp)
 507 {
 508         uint64_t seg1, seg2, seg3, seg4, seg5;
 509         uint64_t seg6, seg7, seg8, seg9, seg10;
 510         uint64_t segsum;
 511 
 512         if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
 513             !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
 514             !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
 515             !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
 516             !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
 517                 return;
 518 
 519         if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
 520                 return;
 521         if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
 522                 return;
 523         if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
 524                 return;
 525         if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
 526                 return;
 527         if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
 528                 return;
 529         if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
 530                 return;
 531         if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
 532                 return;
 533         if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
 534                 return;
 535         if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
 536                 return;
 537         if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
 538                 return;
 539         /*
 540          * Segment 1 - Time from FCP command received by MSI-X ISR
 541          * to FCP command is passed to NVME Layer.
 542          * Segment 2 - Time from FCP command payload handed
 543          * off to NVME Layer to Driver receives a Command op
 544          * from NVME Layer.
 545          * Segment 3 - Time from Driver receives a Command op
 546          * from NVME Layer to Command is put on WQ.
 547          * Segment 4 - Time from Driver WQ put is done
 548          * to MSI-X ISR for Command cmpl.
 549          * Segment 5 - Time from MSI-X ISR for Command cmpl to
 550          * Command cmpl is passed to NVME Layer.
 551          * Segment 6 - Time from Command cmpl is passed to NVME
 552          * Layer to Driver receives a RSP op from NVME Layer.
 553          * Segment 7 - Time from Driver receives a RSP op from
 554          * NVME Layer to WQ put is done on TRSP FCP Status.
 555          * Segment 8 - Time from Driver WQ put is done on TRSP
 556          * FCP Status to MSI-X ISR for TRSP cmpl.
 557          * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
 558          * TRSP cmpl is passed to NVME Layer.
 559          * Segment 10 - Time from FCP command received by
 560          * MSI-X ISR to command is completed on wire.
 561          * (Segments 1 thru 8) for READDATA / WRITEDATA
 562          * (Segments 1 thru 4) for READDATA_RSP
 563          */
 564         seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
 565         segsum = seg1;
 566 
 567         seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
 568         if (segsum > seg2)
 569                 return;
 570         seg2 -= segsum;
 571         segsum += seg2;
 572 
 573         seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
 574         if (segsum > seg3)
 575                 return;
 576         seg3 -= segsum;
 577         segsum += seg3;
 578 
 579         seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
 580         if (segsum > seg4)
 581                 return;
 582         seg4 -= segsum;
 583         segsum += seg4;
 584 
 585         seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
 586         if (segsum > seg5)
 587                 return;
 588         seg5 -= segsum;
 589         segsum += seg5;
 590 
 591 
 592         /* For auto rsp commands seg6 thru seg10 will be 0 */
 593         if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
 594                 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
 595                 if (segsum > seg6)
 596                         return;
 597                 seg6 -= segsum;
 598                 segsum += seg6;
 599 
 600                 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
 601                 if (segsum > seg7)
 602                         return;
 603                 seg7 -= segsum;
 604                 segsum += seg7;
 605 
 606                 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
 607                 if (segsum > seg8)
 608                         return;
 609                 seg8 -= segsum;
 610                 segsum += seg8;
 611 
 612                 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
 613                 if (segsum > seg9)
 614                         return;
 615                 seg9 -= segsum;
 616                 segsum += seg9;
 617 
 618                 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
 619                         return;
 620                 seg10 = (ctxp->ts_isr_status -
 621                         ctxp->ts_isr_cmd);
 622         } else {
 623                 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
 624                         return;
 625                 seg6 =  0;
 626                 seg7 =  0;
 627                 seg8 =  0;
 628                 seg9 =  0;
 629                 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
 630         }
 631 
 632         phba->ktime_seg1_total += seg1;
 633         if (seg1 < phba->ktime_seg1_min)
 634                 phba->ktime_seg1_min = seg1;
 635         else if (seg1 > phba->ktime_seg1_max)
 636                 phba->ktime_seg1_max = seg1;
 637 
 638         phba->ktime_seg2_total += seg2;
 639         if (seg2 < phba->ktime_seg2_min)
 640                 phba->ktime_seg2_min = seg2;
 641         else if (seg2 > phba->ktime_seg2_max)
 642                 phba->ktime_seg2_max = seg2;
 643 
 644         phba->ktime_seg3_total += seg3;
 645         if (seg3 < phba->ktime_seg3_min)
 646                 phba->ktime_seg3_min = seg3;
 647         else if (seg3 > phba->ktime_seg3_max)
 648                 phba->ktime_seg3_max = seg3;
 649 
 650         phba->ktime_seg4_total += seg4;
 651         if (seg4 < phba->ktime_seg4_min)
 652                 phba->ktime_seg4_min = seg4;
 653         else if (seg4 > phba->ktime_seg4_max)
 654                 phba->ktime_seg4_max = seg4;
 655 
 656         phba->ktime_seg5_total += seg5;
 657         if (seg5 < phba->ktime_seg5_min)
 658                 phba->ktime_seg5_min = seg5;
 659         else if (seg5 > phba->ktime_seg5_max)
 660                 phba->ktime_seg5_max = seg5;
 661 
 662         phba->ktime_data_samples++;
 663         if (!seg6)
 664                 goto out;
 665 
 666         phba->ktime_seg6_total += seg6;
 667         if (seg6 < phba->ktime_seg6_min)
 668                 phba->ktime_seg6_min = seg6;
 669         else if (seg6 > phba->ktime_seg6_max)
 670                 phba->ktime_seg6_max = seg6;
 671 
 672         phba->ktime_seg7_total += seg7;
 673         if (seg7 < phba->ktime_seg7_min)
 674                 phba->ktime_seg7_min = seg7;
 675         else if (seg7 > phba->ktime_seg7_max)
 676                 phba->ktime_seg7_max = seg7;
 677 
 678         phba->ktime_seg8_total += seg8;
 679         if (seg8 < phba->ktime_seg8_min)
 680                 phba->ktime_seg8_min = seg8;
 681         else if (seg8 > phba->ktime_seg8_max)
 682                 phba->ktime_seg8_max = seg8;
 683 
 684         phba->ktime_seg9_total += seg9;
 685         if (seg9 < phba->ktime_seg9_min)
 686                 phba->ktime_seg9_min = seg9;
 687         else if (seg9 > phba->ktime_seg9_max)
 688                 phba->ktime_seg9_max = seg9;
 689 out:
 690         phba->ktime_seg10_total += seg10;
 691         if (seg10 < phba->ktime_seg10_min)
 692                 phba->ktime_seg10_min = seg10;
 693         else if (seg10 > phba->ktime_seg10_max)
 694                 phba->ktime_seg10_max = seg10;
 695         phba->ktime_status_samples++;
 696 }
 697 #endif
 698 
 699 /**
 700  * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
 701  * @phba: Pointer to HBA context object.
 702  * @cmdwqe: Pointer to driver command WQE object.
 703  * @wcqe: Pointer to driver response CQE object.
 704  *
 705  * The function is called from SLI ring event handler with no
 706  * lock held. This function is the completion handler for NVME FCP commands
 707  * The function frees memory resources used for the NVME commands.
 708  **/
 709 static void
 710 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
 711                           struct lpfc_wcqe_complete *wcqe)
 712 {
 713         struct lpfc_nvmet_tgtport *tgtp;
 714         struct nvmefc_tgt_fcp_req *rsp;
 715         struct lpfc_nvmet_rcv_ctx *ctxp;
 716         uint32_t status, result, op, start_clean, logerr;
 717 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 718         uint32_t id;
 719 #endif
 720 
 721         ctxp = cmdwqe->context2;
 722         ctxp->flag &= ~LPFC_NVMET_IO_INP;
 723 
 724         rsp = &ctxp->ctx.fcp_req;
 725         op = rsp->op;
 726 
 727         status = bf_get(lpfc_wcqe_c_status, wcqe);
 728         result = wcqe->parameter;
 729 
 730         if (phba->targetport)
 731                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
 732         else
 733                 tgtp = NULL;
 734 
 735         lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
 736                          ctxp->oxid, op, status);
 737 
 738         if (status) {
 739                 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
 740                 rsp->transferred_length = 0;
 741                 if (tgtp) {
 742                         atomic_inc(&tgtp->xmt_fcp_rsp_error);
 743                         if (result == IOERR_ABORT_REQUESTED)
 744                                 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
 745                 }
 746 
 747                 logerr = LOG_NVME_IOERR;
 748 
 749                 /* pick up SLI4 exhange busy condition */
 750                 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
 751                         ctxp->flag |= LPFC_NVMET_XBUSY;
 752                         logerr |= LOG_NVME_ABTS;
 753                         if (tgtp)
 754                                 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
 755 
 756                 } else {
 757                         ctxp->flag &= ~LPFC_NVMET_XBUSY;
 758                 }
 759 
 760                 lpfc_printf_log(phba, KERN_INFO, logerr,
 761                                 "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
 762                                 "XBUSY:x%x\n",
 763                                 ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
 764                                 status, result, ctxp->flag);
 765 
 766         } else {
 767                 rsp->fcp_error = NVME_SC_SUCCESS;
 768                 if (op == NVMET_FCOP_RSP)
 769                         rsp->transferred_length = rsp->rsplen;
 770                 else
 771                         rsp->transferred_length = rsp->transfer_length;
 772                 if (tgtp)
 773                         atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
 774         }
 775 
 776         if ((op == NVMET_FCOP_READDATA_RSP) ||
 777             (op == NVMET_FCOP_RSP)) {
 778                 /* Sanity check */
 779                 ctxp->state = LPFC_NVMET_STE_DONE;
 780                 ctxp->entry_cnt++;
 781 
 782 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 783                 if (ctxp->ts_cmd_nvme) {
 784                         if (rsp->op == NVMET_FCOP_READDATA_RSP) {
 785                                 ctxp->ts_isr_data =
 786                                         cmdwqe->isr_timestamp;
 787                                 ctxp->ts_data_nvme =
 788                                         ktime_get_ns();
 789                                 ctxp->ts_nvme_status =
 790                                         ctxp->ts_data_nvme;
 791                                 ctxp->ts_status_wqput =
 792                                         ctxp->ts_data_nvme;
 793                                 ctxp->ts_isr_status =
 794                                         ctxp->ts_data_nvme;
 795                                 ctxp->ts_status_nvme =
 796                                         ctxp->ts_data_nvme;
 797                         } else {
 798                                 ctxp->ts_isr_status =
 799                                         cmdwqe->isr_timestamp;
 800                                 ctxp->ts_status_nvme =
 801                                         ktime_get_ns();
 802                         }
 803                 }
 804 #endif
 805                 rsp->done(rsp);
 806 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 807                 if (ctxp->ts_cmd_nvme)
 808                         lpfc_nvmet_ktime(phba, ctxp);
 809 #endif
 810                 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
 811         } else {
 812                 ctxp->entry_cnt++;
 813                 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
 814                 memset(((char *)cmdwqe) + start_clean, 0,
 815                        (sizeof(struct lpfc_iocbq) - start_clean));
 816 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 817                 if (ctxp->ts_cmd_nvme) {
 818                         ctxp->ts_isr_data = cmdwqe->isr_timestamp;
 819                         ctxp->ts_data_nvme = ktime_get_ns();
 820                 }
 821 #endif
 822                 rsp->done(rsp);
 823         }
 824 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 825         if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
 826                 id = raw_smp_processor_id();
 827                 if (id < LPFC_CHECK_CPU_CNT) {
 828                         if (ctxp->cpu != id)
 829                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
 830                                                 "6704 CPU Check cmdcmpl: "
 831                                                 "cpu %d expect %d\n",
 832                                                 id, ctxp->cpu);
 833                         phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
 834                 }
 835         }
 836 #endif
 837 }
 838 
 839 static int
 840 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
 841                       struct nvmefc_tgt_ls_req *rsp)
 842 {
 843         struct lpfc_nvmet_rcv_ctx *ctxp =
 844                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
 845         struct lpfc_hba *phba = ctxp->phba;
 846         struct hbq_dmabuf *nvmebuf =
 847                 (struct hbq_dmabuf *)ctxp->rqb_buffer;
 848         struct lpfc_iocbq *nvmewqeq;
 849         struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
 850         struct lpfc_dmabuf dmabuf;
 851         struct ulp_bde64 bpl;
 852         int rc;
 853 
 854         if (phba->pport->load_flag & FC_UNLOADING)
 855                 return -ENODEV;
 856 
 857         if (phba->pport->load_flag & FC_UNLOADING)
 858                 return -ENODEV;
 859 
 860         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
 861                         "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
 862 
 863         if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
 864             (ctxp->entry_cnt != 1)) {
 865                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
 866                                 "6412 NVMET LS rsp state mismatch "
 867                                 "oxid x%x: %d %d\n",
 868                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
 869         }
 870         ctxp->state = LPFC_NVMET_STE_LS_RSP;
 871         ctxp->entry_cnt++;
 872 
 873         nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
 874                                       rsp->rsplen);
 875         if (nvmewqeq == NULL) {
 876                 atomic_inc(&nvmep->xmt_ls_drop);
 877                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
 878                                 "6150 LS Drop IO x%x: Prep\n",
 879                                 ctxp->oxid);
 880                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
 881                 atomic_inc(&nvmep->xmt_ls_abort);
 882                 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
 883                                                 ctxp->sid, ctxp->oxid);
 884                 return -ENOMEM;
 885         }
 886 
 887         /* Save numBdes for bpl2sgl */
 888         nvmewqeq->rsvd2 = 1;
 889         nvmewqeq->hba_wqidx = 0;
 890         nvmewqeq->context3 = &dmabuf;
 891         dmabuf.virt = &bpl;
 892         bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
 893         bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
 894         bpl.tus.f.bdeSize = rsp->rsplen;
 895         bpl.tus.f.bdeFlags = 0;
 896         bpl.tus.w = le32_to_cpu(bpl.tus.w);
 897 
 898         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
 899         nvmewqeq->iocb_cmpl = NULL;
 900         nvmewqeq->context2 = ctxp;
 901 
 902         lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
 903                          ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
 904 
 905         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
 906         if (rc == WQE_SUCCESS) {
 907                 /*
 908                  * Okay to repost buffer here, but wait till cmpl
 909                  * before freeing ctxp and iocbq.
 910                  */
 911                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
 912                 atomic_inc(&nvmep->xmt_ls_rsp);
 913                 return 0;
 914         }
 915         /* Give back resources */
 916         atomic_inc(&nvmep->xmt_ls_drop);
 917         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
 918                         "6151 LS Drop IO x%x: Issue %d\n",
 919                         ctxp->oxid, rc);
 920 
 921         lpfc_nlp_put(nvmewqeq->context1);
 922 
 923         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
 924         atomic_inc(&nvmep->xmt_ls_abort);
 925         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
 926         return -ENXIO;
 927 }
 928 
 929 static int
 930 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
 931                       struct nvmefc_tgt_fcp_req *rsp)
 932 {
 933         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
 934         struct lpfc_nvmet_rcv_ctx *ctxp =
 935                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
 936         struct lpfc_hba *phba = ctxp->phba;
 937         struct lpfc_queue *wq;
 938         struct lpfc_iocbq *nvmewqeq;
 939         struct lpfc_sli_ring *pring;
 940         unsigned long iflags;
 941         int rc;
 942 
 943         if (phba->pport->load_flag & FC_UNLOADING) {
 944                 rc = -ENODEV;
 945                 goto aerr;
 946         }
 947 
 948         if (phba->pport->load_flag & FC_UNLOADING) {
 949                 rc = -ENODEV;
 950                 goto aerr;
 951         }
 952 
 953 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 954         if (ctxp->ts_cmd_nvme) {
 955                 if (rsp->op == NVMET_FCOP_RSP)
 956                         ctxp->ts_nvme_status = ktime_get_ns();
 957                 else
 958                         ctxp->ts_nvme_data = ktime_get_ns();
 959         }
 960 
 961         /* Setup the hdw queue if not already set */
 962         if (!ctxp->hdwq)
 963                 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
 964 
 965         if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
 966                 int id = raw_smp_processor_id();
 967                 if (id < LPFC_CHECK_CPU_CNT) {
 968                         if (rsp->hwqid != id)
 969                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
 970                                                 "6705 CPU Check OP: "
 971                                                 "cpu %d expect %d\n",
 972                                                 id, rsp->hwqid);
 973                         phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
 974                 }
 975                 ctxp->cpu = id; /* Setup cpu for cmpl check */
 976         }
 977 #endif
 978 
 979         /* Sanity check */
 980         if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
 981             (ctxp->state == LPFC_NVMET_STE_ABORT)) {
 982                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
 983                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
 984                                 "6102 IO oxid x%x aborted\n",
 985                                 ctxp->oxid);
 986                 rc = -ENXIO;
 987                 goto aerr;
 988         }
 989 
 990         nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
 991         if (nvmewqeq == NULL) {
 992                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
 993                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
 994                                 "6152 FCP Drop IO x%x: Prep\n",
 995                                 ctxp->oxid);
 996                 rc = -ENXIO;
 997                 goto aerr;
 998         }
 999 
1000         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
1001         nvmewqeq->iocb_cmpl = NULL;
1002         nvmewqeq->context2 = ctxp;
1003         nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
1004         ctxp->wqeq->hba_wqidx = rsp->hwqid;
1005 
1006         lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
1007                          ctxp->oxid, rsp->op, rsp->rsplen);
1008 
1009         ctxp->flag |= LPFC_NVMET_IO_INP;
1010         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1011         if (rc == WQE_SUCCESS) {
1012 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1013                 if (!ctxp->ts_cmd_nvme)
1014                         return 0;
1015                 if (rsp->op == NVMET_FCOP_RSP)
1016                         ctxp->ts_status_wqput = ktime_get_ns();
1017                 else
1018                         ctxp->ts_data_wqput = ktime_get_ns();
1019 #endif
1020                 return 0;
1021         }
1022 
1023         if (rc == -EBUSY) {
1024                 /*
1025                  * WQ was full, so queue nvmewqeq to be sent after
1026                  * WQE release CQE
1027                  */
1028                 ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
1029                 wq = ctxp->hdwq->io_wq;
1030                 pring = wq->pring;
1031                 spin_lock_irqsave(&pring->ring_lock, iflags);
1032                 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
1033                 wq->q_flag |= HBA_NVMET_WQFULL;
1034                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1035                 atomic_inc(&lpfc_nvmep->defer_wqfull);
1036                 return 0;
1037         }
1038 
1039         /* Give back resources */
1040         atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1041         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1042                         "6153 FCP Drop IO x%x: Issue: %d\n",
1043                         ctxp->oxid, rc);
1044 
1045         ctxp->wqeq->hba_wqidx = 0;
1046         nvmewqeq->context2 = NULL;
1047         nvmewqeq->context3 = NULL;
1048         rc = -EBUSY;
1049 aerr:
1050         return rc;
1051 }
1052 
1053 static void
1054 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1055 {
1056         struct lpfc_nvmet_tgtport *tport = targetport->private;
1057 
1058         /* release any threads waiting for the unreg to complete */
1059         if (tport->phba->targetport)
1060                 complete(tport->tport_unreg_cmp);
1061 }
1062 
1063 static void
1064 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1065                          struct nvmefc_tgt_fcp_req *req)
1066 {
1067         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1068         struct lpfc_nvmet_rcv_ctx *ctxp =
1069                 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1070         struct lpfc_hba *phba = ctxp->phba;
1071         struct lpfc_queue *wq;
1072         unsigned long flags;
1073 
1074         if (phba->pport->load_flag & FC_UNLOADING)
1075                 return;
1076 
1077         if (phba->pport->load_flag & FC_UNLOADING)
1078                 return;
1079 
1080         if (!ctxp->hdwq)
1081                 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1082 
1083         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1084                         "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1085                         ctxp->oxid, ctxp->flag, ctxp->state);
1086 
1087         lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1088                          ctxp->oxid, ctxp->flag, ctxp->state);
1089 
1090         atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1091 
1092         spin_lock_irqsave(&ctxp->ctxlock, flags);
1093 
1094         /* Since iaab/iaar are NOT set, we need to check
1095          * if the firmware is in process of aborting IO
1096          */
1097         if (ctxp->flag & (LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP)) {
1098                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1099                 return;
1100         }
1101         ctxp->flag |= LPFC_NVMET_ABORT_OP;
1102 
1103         if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1104                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1105                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1106                                                  ctxp->oxid);
1107                 wq = ctxp->hdwq->io_wq;
1108                 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1109                 return;
1110         }
1111         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1112 
1113         /* An state of LPFC_NVMET_STE_RCV means we have just received
1114          * the NVME command and have not started processing it.
1115          * (by issuing any IO WQEs on this exchange yet)
1116          */
1117         if (ctxp->state == LPFC_NVMET_STE_RCV)
1118                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1119                                                  ctxp->oxid);
1120         else
1121                 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1122                                                ctxp->oxid);
1123 }
1124 
1125 static void
1126 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1127                            struct nvmefc_tgt_fcp_req *rsp)
1128 {
1129         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1130         struct lpfc_nvmet_rcv_ctx *ctxp =
1131                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1132         struct lpfc_hba *phba = ctxp->phba;
1133         unsigned long flags;
1134         bool aborting = false;
1135 
1136         spin_lock_irqsave(&ctxp->ctxlock, flags);
1137         if (ctxp->flag & LPFC_NVMET_XBUSY)
1138                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1139                                 "6027 NVMET release with XBUSY flag x%x"
1140                                 " oxid x%x\n",
1141                                 ctxp->flag, ctxp->oxid);
1142         else if (ctxp->state != LPFC_NVMET_STE_DONE &&
1143                  ctxp->state != LPFC_NVMET_STE_ABORT)
1144                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1145                                 "6413 NVMET release bad state %d %d oxid x%x\n",
1146                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1147 
1148         if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1149             (ctxp->flag & LPFC_NVMET_XBUSY)) {
1150                 aborting = true;
1151                 /* let the abort path do the real release */
1152                 lpfc_nvmet_defer_release(phba, ctxp);
1153         }
1154         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1155 
1156         lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1157                          ctxp->state, aborting);
1158 
1159         atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1160         ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
1161 
1162         if (aborting)
1163                 return;
1164 
1165         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1166 }
1167 
1168 static void
1169 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1170                      struct nvmefc_tgt_fcp_req *rsp)
1171 {
1172         struct lpfc_nvmet_tgtport *tgtp;
1173         struct lpfc_nvmet_rcv_ctx *ctxp =
1174                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1175         struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1176         struct lpfc_hba *phba = ctxp->phba;
1177         unsigned long iflag;
1178 
1179 
1180         lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1181                          ctxp->oxid, ctxp->size, raw_smp_processor_id());
1182 
1183         if (!nvmebuf) {
1184                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1185                                 "6425 Defer rcv: no buffer oxid x%x: "
1186                                 "flg %x ste %x\n",
1187                                 ctxp->oxid, ctxp->flag, ctxp->state);
1188                 return;
1189         }
1190 
1191         tgtp = phba->targetport->private;
1192         if (tgtp)
1193                 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1194 
1195         /* Free the nvmebuf since a new buffer already replaced it */
1196         nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1197         spin_lock_irqsave(&ctxp->ctxlock, iflag);
1198         ctxp->rqb_buffer = NULL;
1199         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1200 }
1201 
1202 static void
1203 lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1204 {
1205         struct lpfc_nvmet_tgtport *tgtp;
1206         struct lpfc_hba *phba;
1207         uint32_t rc;
1208 
1209         tgtp = tgtport->private;
1210         phba = tgtp->phba;
1211 
1212         rc = lpfc_issue_els_rscn(phba->pport, 0);
1213         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1214                         "6420 NVMET subsystem change: Notification %s\n",
1215                         (rc) ? "Failed" : "Sent");
1216 }
1217 
1218 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1219         .targetport_delete = lpfc_nvmet_targetport_delete,
1220         .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
1221         .fcp_op         = lpfc_nvmet_xmt_fcp_op,
1222         .fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
1223         .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1224         .defer_rcv      = lpfc_nvmet_defer_rcv,
1225         .discovery_event = lpfc_nvmet_discovery_event,
1226 
1227         .max_hw_queues  = 1,
1228         .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1229         .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1230         .dma_boundary = 0xFFFFFFFF,
1231 
1232         /* optional features */
1233         .target_features = 0,
1234         /* sizes of additional private data for data structures */
1235         .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1236 };
1237 
1238 static void
1239 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1240                 struct lpfc_nvmet_ctx_info *infop)
1241 {
1242         struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1243         unsigned long flags;
1244 
1245         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1246         list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1247                                 &infop->nvmet_ctx_list, list) {
1248                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1249                 list_del_init(&ctx_buf->list);
1250                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1251 
1252                 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1253                 ctx_buf->sglq->state = SGL_FREED;
1254                 ctx_buf->sglq->ndlp = NULL;
1255 
1256                 spin_lock(&phba->sli4_hba.sgl_list_lock);
1257                 list_add_tail(&ctx_buf->sglq->list,
1258                                 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1259                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1260 
1261                 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1262                 kfree(ctx_buf->context);
1263         }
1264         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1265 }
1266 
1267 static void
1268 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1269 {
1270         struct lpfc_nvmet_ctx_info *infop;
1271         int i, j;
1272 
1273         /* The first context list, MRQ 0 CPU 0 */
1274         infop = phba->sli4_hba.nvmet_ctx_info;
1275         if (!infop)
1276                 return;
1277 
1278         /* Cycle the the entire CPU context list for every MRQ */
1279         for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1280                 for_each_present_cpu(j) {
1281                         infop = lpfc_get_ctx_list(phba, j, i);
1282                         __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1283                 }
1284         }
1285         kfree(phba->sli4_hba.nvmet_ctx_info);
1286         phba->sli4_hba.nvmet_ctx_info = NULL;
1287 }
1288 
1289 static int
1290 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1291 {
1292         struct lpfc_nvmet_ctxbuf *ctx_buf;
1293         struct lpfc_iocbq *nvmewqe;
1294         union lpfc_wqe128 *wqe;
1295         struct lpfc_nvmet_ctx_info *last_infop;
1296         struct lpfc_nvmet_ctx_info *infop;
1297         int i, j, idx, cpu;
1298 
1299         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1300                         "6403 Allocate NVMET resources for %d XRIs\n",
1301                         phba->sli4_hba.nvmet_xri_cnt);
1302 
1303         phba->sli4_hba.nvmet_ctx_info = kcalloc(
1304                 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1305                 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1306         if (!phba->sli4_hba.nvmet_ctx_info) {
1307                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1308                                 "6419 Failed allocate memory for "
1309                                 "nvmet context lists\n");
1310                 return -ENOMEM;
1311         }
1312 
1313         /*
1314          * Assuming X CPUs in the system, and Y MRQs, allocate some
1315          * lpfc_nvmet_ctx_info structures as follows:
1316          *
1317          * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1318          * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1319          * ...
1320          * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1321          *
1322          * Each line represents a MRQ "silo" containing an entry for
1323          * every CPU.
1324          *
1325          * MRQ X is initially assumed to be associated with CPU X, thus
1326          * contexts are initially distributed across all MRQs using
1327          * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1328          * freed, the are freed to the MRQ silo based on the CPU number
1329          * of the IO completion. Thus a context that was allocated for MRQ A
1330          * whose IO completed on CPU B will be freed to cpuB/mrqA.
1331          */
1332         for_each_possible_cpu(i) {
1333                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1334                         infop = lpfc_get_ctx_list(phba, i, j);
1335                         INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1336                         spin_lock_init(&infop->nvmet_ctx_list_lock);
1337                         infop->nvmet_ctx_list_cnt = 0;
1338                 }
1339         }
1340 
1341         /*
1342          * Setup the next CPU context info ptr for each MRQ.
1343          * MRQ 0 will cycle thru CPUs 0 - X separately from
1344          * MRQ 1 cycling thru CPUs 0 - X, and so on.
1345          */
1346         for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1347                 last_infop = lpfc_get_ctx_list(phba,
1348                                                cpumask_first(cpu_present_mask),
1349                                                j);
1350                 for (i = phba->sli4_hba.num_possible_cpu - 1;  i >= 0; i--) {
1351                         infop = lpfc_get_ctx_list(phba, i, j);
1352                         infop->nvmet_ctx_next_cpu = last_infop;
1353                         last_infop = infop;
1354                 }
1355         }
1356 
1357         /* For all nvmet xris, allocate resources needed to process a
1358          * received command on a per xri basis.
1359          */
1360         idx = 0;
1361         cpu = cpumask_first(cpu_present_mask);
1362         for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1363                 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1364                 if (!ctx_buf) {
1365                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1366                                         "6404 Ran out of memory for NVMET\n");
1367                         return -ENOMEM;
1368                 }
1369 
1370                 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1371                                            GFP_KERNEL);
1372                 if (!ctx_buf->context) {
1373                         kfree(ctx_buf);
1374                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1375                                         "6405 Ran out of NVMET "
1376                                         "context memory\n");
1377                         return -ENOMEM;
1378                 }
1379                 ctx_buf->context->ctxbuf = ctx_buf;
1380                 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1381 
1382                 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1383                 if (!ctx_buf->iocbq) {
1384                         kfree(ctx_buf->context);
1385                         kfree(ctx_buf);
1386                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1387                                         "6406 Ran out of NVMET iocb/WQEs\n");
1388                         return -ENOMEM;
1389                 }
1390                 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1391                 nvmewqe = ctx_buf->iocbq;
1392                 wqe = &nvmewqe->wqe;
1393 
1394                 /* Initialize WQE */
1395                 memset(wqe, 0, sizeof(union lpfc_wqe));
1396 
1397                 ctx_buf->iocbq->context1 = NULL;
1398                 spin_lock(&phba->sli4_hba.sgl_list_lock);
1399                 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1400                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1401                 if (!ctx_buf->sglq) {
1402                         lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1403                         kfree(ctx_buf->context);
1404                         kfree(ctx_buf);
1405                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1406                                         "6407 Ran out of NVMET XRIs\n");
1407                         return -ENOMEM;
1408                 }
1409                 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1410 
1411                 /*
1412                  * Add ctx to MRQidx context list. Our initial assumption
1413                  * is MRQidx will be associated with CPUidx. This association
1414                  * can change on the fly.
1415                  */
1416                 infop = lpfc_get_ctx_list(phba, cpu, idx);
1417                 spin_lock(&infop->nvmet_ctx_list_lock);
1418                 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1419                 infop->nvmet_ctx_list_cnt++;
1420                 spin_unlock(&infop->nvmet_ctx_list_lock);
1421 
1422                 /* Spread ctx structures evenly across all MRQs */
1423                 idx++;
1424                 if (idx >= phba->cfg_nvmet_mrq) {
1425                         idx = 0;
1426                         cpu = cpumask_first(cpu_present_mask);
1427                         continue;
1428                 }
1429                 cpu = cpumask_next(cpu, cpu_present_mask);
1430                 if (cpu == nr_cpu_ids)
1431                         cpu = cpumask_first(cpu_present_mask);
1432 
1433         }
1434 
1435         for_each_present_cpu(i) {
1436                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1437                         infop = lpfc_get_ctx_list(phba, i, j);
1438                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1439                                         "6408 TOTAL NVMET ctx for CPU %d "
1440                                         "MRQ %d: cnt %d nextcpu x%px\n",
1441                                         i, j, infop->nvmet_ctx_list_cnt,
1442                                         infop->nvmet_ctx_next_cpu);
1443                 }
1444         }
1445         return 0;
1446 }
1447 
1448 int
1449 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1450 {
1451         struct lpfc_vport  *vport = phba->pport;
1452         struct lpfc_nvmet_tgtport *tgtp;
1453         struct nvmet_fc_port_info pinfo;
1454         int error;
1455 
1456         if (phba->targetport)
1457                 return 0;
1458 
1459         error = lpfc_nvmet_setup_io_context(phba);
1460         if (error)
1461                 return error;
1462 
1463         memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1464         pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1465         pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1466         pinfo.port_id = vport->fc_myDID;
1467 
1468         /* We need to tell the transport layer + 1 because it takes page
1469          * alignment into account. When space for the SGL is allocated we
1470          * allocate + 3, one for cmd, one for rsp and one for this alignment
1471          */
1472         lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1473         lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1474         lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1475 
1476 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1477         error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1478                                              &phba->pcidev->dev,
1479                                              &phba->targetport);
1480 #else
1481         error = -ENOENT;
1482 #endif
1483         if (error) {
1484                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1485                                 "6025 Cannot register NVME targetport x%x: "
1486                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1487                                 error,
1488                                 pinfo.port_name, pinfo.node_name,
1489                                 lpfc_tgttemplate.max_sgl_segments,
1490                                 lpfc_tgttemplate.max_hw_queues);
1491                 phba->targetport = NULL;
1492                 phba->nvmet_support = 0;
1493 
1494                 lpfc_nvmet_cleanup_io_context(phba);
1495 
1496         } else {
1497                 tgtp = (struct lpfc_nvmet_tgtport *)
1498                         phba->targetport->private;
1499                 tgtp->phba = phba;
1500 
1501                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1502                                 "6026 Registered NVME "
1503                                 "targetport: x%px, private x%px "
1504                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1505                                 phba->targetport, tgtp,
1506                                 pinfo.port_name, pinfo.node_name,
1507                                 lpfc_tgttemplate.max_sgl_segments,
1508                                 lpfc_tgttemplate.max_hw_queues);
1509 
1510                 atomic_set(&tgtp->rcv_ls_req_in, 0);
1511                 atomic_set(&tgtp->rcv_ls_req_out, 0);
1512                 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1513                 atomic_set(&tgtp->xmt_ls_abort, 0);
1514                 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1515                 atomic_set(&tgtp->xmt_ls_rsp, 0);
1516                 atomic_set(&tgtp->xmt_ls_drop, 0);
1517                 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1518                 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1519                 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1520                 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1521                 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1522                 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1523                 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1524                 atomic_set(&tgtp->xmt_fcp_drop, 0);
1525                 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1526                 atomic_set(&tgtp->xmt_fcp_read, 0);
1527                 atomic_set(&tgtp->xmt_fcp_write, 0);
1528                 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1529                 atomic_set(&tgtp->xmt_fcp_release, 0);
1530                 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1531                 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1532                 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1533                 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1534                 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1535                 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1536                 atomic_set(&tgtp->xmt_fcp_abort, 0);
1537                 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1538                 atomic_set(&tgtp->xmt_abort_unsol, 0);
1539                 atomic_set(&tgtp->xmt_abort_sol, 0);
1540                 atomic_set(&tgtp->xmt_abort_rsp, 0);
1541                 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1542                 atomic_set(&tgtp->defer_ctx, 0);
1543                 atomic_set(&tgtp->defer_fod, 0);
1544                 atomic_set(&tgtp->defer_wqfull, 0);
1545         }
1546         return error;
1547 }
1548 
1549 int
1550 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1551 {
1552         struct lpfc_vport  *vport = phba->pport;
1553 
1554         if (!phba->targetport)
1555                 return 0;
1556 
1557         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1558                          "6007 Update NVMET port x%px did x%x\n",
1559                          phba->targetport, vport->fc_myDID);
1560 
1561         phba->targetport->port_id = vport->fc_myDID;
1562         return 0;
1563 }
1564 
1565 /**
1566  * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1567  * @phba: pointer to lpfc hba data structure.
1568  * @axri: pointer to the nvmet xri abort wcqe structure.
1569  *
1570  * This routine is invoked by the worker thread to process a SLI4 fast-path
1571  * NVMET aborted xri.
1572  **/
1573 void
1574 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1575                             struct sli4_wcqe_xri_aborted *axri)
1576 {
1577 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1578         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1579         uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1580         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1581         struct lpfc_nvmet_tgtport *tgtp;
1582         struct nvmefc_tgt_fcp_req *req = NULL;
1583         struct lpfc_nodelist *ndlp;
1584         unsigned long iflag = 0;
1585         int rrq_empty = 0;
1586         bool released = false;
1587 
1588         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1589                         "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1590 
1591         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1592                 return;
1593 
1594         if (phba->targetport) {
1595                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1596                 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1597         }
1598 
1599         spin_lock_irqsave(&phba->hbalock, iflag);
1600         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1601         list_for_each_entry_safe(ctxp, next_ctxp,
1602                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1603                                  list) {
1604                 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1605                         continue;
1606 
1607                 spin_lock(&ctxp->ctxlock);
1608                 /* Check if we already received a free context call
1609                  * and we have completed processing an abort situation.
1610                  */
1611                 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1612                     !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1613                         list_del_init(&ctxp->list);
1614                         released = true;
1615                 }
1616                 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1617                 spin_unlock(&ctxp->ctxlock);
1618                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1619 
1620                 rrq_empty = list_empty(&phba->active_rrq_list);
1621                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1622                 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1623                 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1624                     (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1625                      ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1626                         lpfc_set_rrq_active(phba, ndlp,
1627                                 ctxp->ctxbuf->sglq->sli4_lxritag,
1628                                 rxid, 1);
1629                         lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1630                 }
1631 
1632                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1633                                 "6318 XB aborted oxid x%x flg x%x (%x)\n",
1634                                 ctxp->oxid, ctxp->flag, released);
1635                 if (released)
1636                         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1637 
1638                 if (rrq_empty)
1639                         lpfc_worker_wake_up(phba);
1640                 return;
1641         }
1642         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1643         spin_unlock_irqrestore(&phba->hbalock, iflag);
1644 
1645         ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1646         if (ctxp) {
1647                 /*
1648                  *  Abort already done by FW, so BA_ACC sent.
1649                  *  However, the transport may be unaware.
1650                  */
1651                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1652                                 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1653                                 "flag x%x oxid x%x rxid x%x\n",
1654                                 xri, ctxp->state, ctxp->flag, ctxp->oxid,
1655                                 rxid);
1656 
1657                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1658                 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1659                 ctxp->state = LPFC_NVMET_STE_ABORT;
1660                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1661 
1662                 lpfc_nvmeio_data(phba,
1663                                  "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1664                                  xri, raw_smp_processor_id(), 0);
1665 
1666                 req = &ctxp->ctx.fcp_req;
1667                 if (req)
1668                         nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1669         }
1670 #endif
1671 }
1672 
1673 int
1674 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1675                            struct fc_frame_header *fc_hdr)
1676 {
1677 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1678         struct lpfc_hba *phba = vport->phba;
1679         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1680         struct nvmefc_tgt_fcp_req *rsp;
1681         uint32_t sid;
1682         uint16_t oxid, xri;
1683         unsigned long iflag = 0;
1684 
1685         sid = sli4_sid_from_fc_hdr(fc_hdr);
1686         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1687 
1688         spin_lock_irqsave(&phba->hbalock, iflag);
1689         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1690         list_for_each_entry_safe(ctxp, next_ctxp,
1691                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1692                                  list) {
1693                 if (ctxp->oxid != oxid || ctxp->sid != sid)
1694                         continue;
1695 
1696                 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1697 
1698                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1699                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1700 
1701                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1702                 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1703                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1704 
1705                 lpfc_nvmeio_data(phba,
1706                         "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1707                         xri, raw_smp_processor_id(), 0);
1708 
1709                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1710                                 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1711 
1712                 rsp = &ctxp->ctx.fcp_req;
1713                 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1714 
1715                 /* Respond with BA_ACC accordingly */
1716                 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1717                 return 0;
1718         }
1719         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1720         spin_unlock_irqrestore(&phba->hbalock, iflag);
1721 
1722         /* check the wait list */
1723         if (phba->sli4_hba.nvmet_io_wait_cnt) {
1724                 struct rqb_dmabuf *nvmebuf;
1725                 struct fc_frame_header *fc_hdr_tmp;
1726                 u32 sid_tmp;
1727                 u16 oxid_tmp;
1728                 bool found = false;
1729 
1730                 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1731 
1732                 /* match by oxid and s_id */
1733                 list_for_each_entry(nvmebuf,
1734                                     &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1735                                     hbuf.list) {
1736                         fc_hdr_tmp = (struct fc_frame_header *)
1737                                         (nvmebuf->hbuf.virt);
1738                         oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1739                         sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1740                         if (oxid_tmp != oxid || sid_tmp != sid)
1741                                 continue;
1742 
1743                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1744                                         "6321 NVMET Rcv ABTS oxid x%x from x%x "
1745                                         "is waiting for a ctxp\n",
1746                                         oxid, sid);
1747 
1748                         list_del_init(&nvmebuf->hbuf.list);
1749                         phba->sli4_hba.nvmet_io_wait_cnt--;
1750                         found = true;
1751                         break;
1752                 }
1753                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1754                                        iflag);
1755 
1756                 /* free buffer since already posted a new DMA buffer to RQ */
1757                 if (found) {
1758                         nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1759                         /* Respond with BA_ACC accordingly */
1760                         lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1761                         return 0;
1762                 }
1763         }
1764 
1765         /* check active list */
1766         ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1767         if (ctxp) {
1768                 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1769 
1770                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1771                 ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP);
1772                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1773 
1774                 lpfc_nvmeio_data(phba,
1775                                  "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1776                                  xri, raw_smp_processor_id(), 0);
1777 
1778                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1779                                 "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1780                                 "flag x%x state x%x\n",
1781                                 ctxp->oxid, xri, ctxp->flag, ctxp->state);
1782 
1783                 if (ctxp->flag & LPFC_NVMET_TNOTIFY) {
1784                         /* Notify the transport */
1785                         nvmet_fc_rcv_fcp_abort(phba->targetport,
1786                                                &ctxp->ctx.fcp_req);
1787                 } else {
1788                         cancel_work_sync(&ctxp->ctxbuf->defer_work);
1789                         spin_lock_irqsave(&ctxp->ctxlock, iflag);
1790                         lpfc_nvmet_defer_release(phba, ctxp);
1791                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1792                 }
1793                 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1794                                                ctxp->oxid);
1795 
1796                 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1797                 return 0;
1798         }
1799 
1800         lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1801                          oxid, raw_smp_processor_id(), 1);
1802 
1803         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1804                         "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
1805 
1806         /* Respond with BA_RJT accordingly */
1807         lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1808 #endif
1809         return 0;
1810 }
1811 
1812 static void
1813 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1814                         struct lpfc_nvmet_rcv_ctx *ctxp)
1815 {
1816         struct lpfc_sli_ring *pring;
1817         struct lpfc_iocbq *nvmewqeq;
1818         struct lpfc_iocbq *next_nvmewqeq;
1819         unsigned long iflags;
1820         struct lpfc_wcqe_complete wcqe;
1821         struct lpfc_wcqe_complete *wcqep;
1822 
1823         pring = wq->pring;
1824         wcqep = &wcqe;
1825 
1826         /* Fake an ABORT error code back to cmpl routine */
1827         memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
1828         bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
1829         wcqep->parameter = IOERR_ABORT_REQUESTED;
1830 
1831         spin_lock_irqsave(&pring->ring_lock, iflags);
1832         list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
1833                                  &wq->wqfull_list, list) {
1834                 if (ctxp) {
1835                         /* Checking for a specific IO to flush */
1836                         if (nvmewqeq->context2 == ctxp) {
1837                                 list_del(&nvmewqeq->list);
1838                                 spin_unlock_irqrestore(&pring->ring_lock,
1839                                                        iflags);
1840                                 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
1841                                                           wcqep);
1842                                 return;
1843                         }
1844                         continue;
1845                 } else {
1846                         /* Flush all IOs */
1847                         list_del(&nvmewqeq->list);
1848                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1849                         lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
1850                         spin_lock_irqsave(&pring->ring_lock, iflags);
1851                 }
1852         }
1853         if (!ctxp)
1854                 wq->q_flag &= ~HBA_NVMET_WQFULL;
1855         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1856 }
1857 
1858 void
1859 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
1860                           struct lpfc_queue *wq)
1861 {
1862 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1863         struct lpfc_sli_ring *pring;
1864         struct lpfc_iocbq *nvmewqeq;
1865         struct lpfc_nvmet_rcv_ctx *ctxp;
1866         unsigned long iflags;
1867         int rc;
1868 
1869         /*
1870          * Some WQE slots are available, so try to re-issue anything
1871          * on the WQ wqfull_list.
1872          */
1873         pring = wq->pring;
1874         spin_lock_irqsave(&pring->ring_lock, iflags);
1875         while (!list_empty(&wq->wqfull_list)) {
1876                 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
1877                                  list);
1878                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1879                 ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
1880                 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1881                 spin_lock_irqsave(&pring->ring_lock, iflags);
1882                 if (rc == -EBUSY) {
1883                         /* WQ was full again, so put it back on the list */
1884                         list_add(&nvmewqeq->list, &wq->wqfull_list);
1885                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1886                         return;
1887                 }
1888                 if (rc == WQE_SUCCESS) {
1889 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1890                         if (ctxp->ts_cmd_nvme) {
1891                                 if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP)
1892                                         ctxp->ts_status_wqput = ktime_get_ns();
1893                                 else
1894                                         ctxp->ts_data_wqput = ktime_get_ns();
1895                         }
1896 #endif
1897                 } else {
1898                         WARN_ON(rc);
1899                 }
1900         }
1901         wq->q_flag &= ~HBA_NVMET_WQFULL;
1902         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1903 
1904 #endif
1905 }
1906 
1907 void
1908 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1909 {
1910 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1911         struct lpfc_nvmet_tgtport *tgtp;
1912         struct lpfc_queue *wq;
1913         uint32_t qidx;
1914         DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
1915 
1916         if (phba->nvmet_support == 0)
1917                 return;
1918         if (phba->targetport) {
1919                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1920                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
1921                         wq = phba->sli4_hba.hdwq[qidx].io_wq;
1922                         lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1923                 }
1924                 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
1925                 nvmet_fc_unregister_targetport(phba->targetport);
1926                 if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
1927                                         msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
1928                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1929                                         "6179 Unreg targetport x%px timeout "
1930                                         "reached.\n", phba->targetport);
1931                 lpfc_nvmet_cleanup_io_context(phba);
1932         }
1933         phba->targetport = NULL;
1934 #endif
1935 }
1936 
1937 /**
1938  * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1939  * @phba: pointer to lpfc hba data structure.
1940  * @pring: pointer to a SLI ring.
1941  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1942  *
1943  * This routine is used for processing the WQE associated with a unsolicited
1944  * event. It first determines whether there is an existing ndlp that matches
1945  * the DID from the unsolicited WQE. If not, it will create a new one with
1946  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1947  * WQE is then used to invoke the proper routine and to set up proper state
1948  * of the discovery state machine.
1949  **/
1950 static void
1951 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1952                            struct hbq_dmabuf *nvmebuf)
1953 {
1954 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1955         struct lpfc_nvmet_tgtport *tgtp;
1956         struct fc_frame_header *fc_hdr;
1957         struct lpfc_nvmet_rcv_ctx *ctxp;
1958         uint32_t *payload;
1959         uint32_t size, oxid, sid, rc;
1960 
1961         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1962         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1963 
1964         if (!phba->targetport) {
1965                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1966                                 "6154 LS Drop IO x%x\n", oxid);
1967                 oxid = 0;
1968                 size = 0;
1969                 sid = 0;
1970                 ctxp = NULL;
1971                 goto dropit;
1972         }
1973 
1974         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1975         payload = (uint32_t *)(nvmebuf->dbuf.virt);
1976         size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
1977         sid = sli4_sid_from_fc_hdr(fc_hdr);
1978 
1979         ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1980         if (ctxp == NULL) {
1981                 atomic_inc(&tgtp->rcv_ls_req_drop);
1982                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1983                                 "6155 LS Drop IO x%x: Alloc\n",
1984                                 oxid);
1985 dropit:
1986                 lpfc_nvmeio_data(phba, "NVMET LS  DROP: "
1987                                  "xri x%x sz %d from %06x\n",
1988                                  oxid, size, sid);
1989                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1990                 return;
1991         }
1992         ctxp->phba = phba;
1993         ctxp->size = size;
1994         ctxp->oxid = oxid;
1995         ctxp->sid = sid;
1996         ctxp->wqeq = NULL;
1997         ctxp->state = LPFC_NVMET_STE_LS_RCV;
1998         ctxp->entry_cnt = 1;
1999         ctxp->rqb_buffer = (void *)nvmebuf;
2000         ctxp->hdwq = &phba->sli4_hba.hdwq[0];
2001 
2002         lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
2003                          oxid, size, sid);
2004         /*
2005          * The calling sequence should be:
2006          * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
2007          * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
2008          */
2009         atomic_inc(&tgtp->rcv_ls_req_in);
2010         rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
2011                                  payload, size);
2012 
2013         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2014                         "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2015                         "%08x %08x %08x\n", size, rc,
2016                         *payload, *(payload+1), *(payload+2),
2017                         *(payload+3), *(payload+4), *(payload+5));
2018 
2019         if (rc == 0) {
2020                 atomic_inc(&tgtp->rcv_ls_req_out);
2021                 return;
2022         }
2023 
2024         lpfc_nvmeio_data(phba, "NVMET LS  DROP: xri x%x sz %d from %06x\n",
2025                          oxid, size, sid);
2026 
2027         atomic_inc(&tgtp->rcv_ls_req_drop);
2028         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2029                         "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
2030                         ctxp->oxid, rc);
2031 
2032         /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
2033         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2034 
2035         atomic_inc(&tgtp->xmt_ls_abort);
2036         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
2037 #endif
2038 }
2039 
2040 static void
2041 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2042 {
2043 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2044         struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
2045         struct lpfc_hba *phba = ctxp->phba;
2046         struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2047         struct lpfc_nvmet_tgtport *tgtp;
2048         uint32_t *payload, qno;
2049         uint32_t rc;
2050         unsigned long iflags;
2051 
2052         if (!nvmebuf) {
2053                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2054                         "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2055                         "oxid: x%x flg: x%x state: x%x\n",
2056                         ctxp->oxid, ctxp->flag, ctxp->state);
2057                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2058                 lpfc_nvmet_defer_release(phba, ctxp);
2059                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2060                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2061                                                  ctxp->oxid);
2062                 return;
2063         }
2064 
2065         if (ctxp->flag & LPFC_NVMET_ABTS_RCV) {
2066                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2067                                 "6324 IO oxid x%x aborted\n",
2068                                 ctxp->oxid);
2069                 return;
2070         }
2071 
2072         payload = (uint32_t *)(nvmebuf->dbuf.virt);
2073         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2074         ctxp->flag |= LPFC_NVMET_TNOTIFY;
2075 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2076         if (ctxp->ts_isr_cmd)
2077                 ctxp->ts_cmd_nvme = ktime_get_ns();
2078 #endif
2079         /*
2080          * The calling sequence should be:
2081          * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
2082          * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2083          * When we return from nvmet_fc_rcv_fcp_req, all relevant info
2084          * the NVME command / FC header is stored.
2085          * A buffer has already been reposted for this IO, so just free
2086          * the nvmebuf.
2087          */
2088         rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
2089                                   payload, ctxp->size);
2090         /* Process FCP command */
2091         if (rc == 0) {
2092                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2093                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2094                 if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) ||
2095                     (nvmebuf != ctxp->rqb_buffer)) {
2096                         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2097                         return;
2098                 }
2099                 ctxp->rqb_buffer = NULL;
2100                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2101                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2102                 return;
2103         }
2104 
2105         /* Processing of FCP command is deferred */
2106         if (rc == -EOVERFLOW) {
2107                 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2108                                  "from %06x\n",
2109                                  ctxp->oxid, ctxp->size, ctxp->sid);
2110                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2111                 atomic_inc(&tgtp->defer_fod);
2112                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2113                 if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
2114                         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2115                         return;
2116                 }
2117                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2118                 /*
2119                  * Post a replacement DMA buffer to RQ and defer
2120                  * freeing rcv buffer till .defer_rcv callback
2121                  */
2122                 qno = nvmebuf->idx;
2123                 lpfc_post_rq_buffer(
2124                         phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2125                         phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2126                 return;
2127         }
2128         ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
2129         atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2130         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2131                         "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2132                         ctxp->oxid, rc,
2133                         atomic_read(&tgtp->rcv_fcp_cmd_in),
2134                         atomic_read(&tgtp->rcv_fcp_cmd_out),
2135                         atomic_read(&tgtp->xmt_fcp_release));
2136         lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2137                          ctxp->oxid, ctxp->size, ctxp->sid);
2138         spin_lock_irqsave(&ctxp->ctxlock, iflags);
2139         lpfc_nvmet_defer_release(phba, ctxp);
2140         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2141         lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2142 #endif
2143 }
2144 
2145 static void
2146 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2147 {
2148 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2149         struct lpfc_nvmet_ctxbuf *ctx_buf =
2150                 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2151 
2152         lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2153 #endif
2154 }
2155 
2156 static struct lpfc_nvmet_ctxbuf *
2157 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
2158                              struct lpfc_nvmet_ctx_info *current_infop)
2159 {
2160 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2161         struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
2162         struct lpfc_nvmet_ctx_info *get_infop;
2163         int i;
2164 
2165         /*
2166          * The current_infop for the MRQ a NVME command IU was received
2167          * on is empty. Our goal is to replenish this MRQs context
2168          * list from a another CPUs.
2169          *
2170          * First we need to pick a context list to start looking on.
2171          * nvmet_ctx_start_cpu has available context the last time
2172          * we needed to replenish this CPU where nvmet_ctx_next_cpu
2173          * is just the next sequential CPU for this MRQ.
2174          */
2175         if (current_infop->nvmet_ctx_start_cpu)
2176                 get_infop = current_infop->nvmet_ctx_start_cpu;
2177         else
2178                 get_infop = current_infop->nvmet_ctx_next_cpu;
2179 
2180         for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
2181                 if (get_infop == current_infop) {
2182                         get_infop = get_infop->nvmet_ctx_next_cpu;
2183                         continue;
2184                 }
2185                 spin_lock(&get_infop->nvmet_ctx_list_lock);
2186 
2187                 /* Just take the entire context list, if there are any */
2188                 if (get_infop->nvmet_ctx_list_cnt) {
2189                         list_splice_init(&get_infop->nvmet_ctx_list,
2190                                     &current_infop->nvmet_ctx_list);
2191                         current_infop->nvmet_ctx_list_cnt =
2192                                 get_infop->nvmet_ctx_list_cnt - 1;
2193                         get_infop->nvmet_ctx_list_cnt = 0;
2194                         spin_unlock(&get_infop->nvmet_ctx_list_lock);
2195 
2196                         current_infop->nvmet_ctx_start_cpu = get_infop;
2197                         list_remove_head(&current_infop->nvmet_ctx_list,
2198                                          ctx_buf, struct lpfc_nvmet_ctxbuf,
2199                                          list);
2200                         return ctx_buf;
2201                 }
2202 
2203                 /* Otherwise, move on to the next CPU for this MRQ */
2204                 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2205                 get_infop = get_infop->nvmet_ctx_next_cpu;
2206         }
2207 
2208 #endif
2209         /* Nothing found, all contexts for the MRQ are in-flight */
2210         return NULL;
2211 }
2212 
2213 /**
2214  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2215  * @phba: pointer to lpfc hba data structure.
2216  * @idx: relative index of MRQ vector
2217  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2218  * @isr_timestamp: in jiffies.
2219  * @cqflag: cq processing information regarding workload.
2220  *
2221  * This routine is used for processing the WQE associated with a unsolicited
2222  * event. It first determines whether there is an existing ndlp that matches
2223  * the DID from the unsolicited WQE. If not, it will create a new one with
2224  * the DID from the unsolicited WQE. The ELS command from the unsolicited
2225  * WQE is then used to invoke the proper routine and to set up proper state
2226  * of the discovery state machine.
2227  **/
2228 static void
2229 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2230                             uint32_t idx,
2231                             struct rqb_dmabuf *nvmebuf,
2232                             uint64_t isr_timestamp,
2233                             uint8_t cqflag)
2234 {
2235         struct lpfc_nvmet_rcv_ctx *ctxp;
2236         struct lpfc_nvmet_tgtport *tgtp;
2237         struct fc_frame_header *fc_hdr;
2238         struct lpfc_nvmet_ctxbuf *ctx_buf;
2239         struct lpfc_nvmet_ctx_info *current_infop;
2240         uint32_t size, oxid, sid, qno;
2241         unsigned long iflag;
2242         int current_cpu;
2243 
2244         if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2245                 return;
2246 
2247         ctx_buf = NULL;
2248         if (!nvmebuf || !phba->targetport) {
2249                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2250                                 "6157 NVMET FCP Drop IO\n");
2251                 if (nvmebuf)
2252                         lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2253                 return;
2254         }
2255 
2256         /*
2257          * Get a pointer to the context list for this MRQ based on
2258          * the CPU this MRQ IRQ is associated with. If the CPU association
2259          * changes from our initial assumption, the context list could
2260          * be empty, thus it would need to be replenished with the
2261          * context list from another CPU for this MRQ.
2262          */
2263         current_cpu = raw_smp_processor_id();
2264         current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2265         spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
2266         if (current_infop->nvmet_ctx_list_cnt) {
2267                 list_remove_head(&current_infop->nvmet_ctx_list,
2268                                  ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2269                 current_infop->nvmet_ctx_list_cnt--;
2270         } else {
2271                 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2272         }
2273         spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
2274 
2275         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2276         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2277         size = nvmebuf->bytes_recv;
2278 
2279 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2280         if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
2281                 if (current_cpu < LPFC_CHECK_CPU_CNT) {
2282                         if (idx != current_cpu)
2283                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2284                                                 "6703 CPU Check rcv: "
2285                                                 "cpu %d expect %d\n",
2286                                                 current_cpu, idx);
2287                         phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
2288                 }
2289         }
2290 #endif
2291 
2292         lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
2293                          oxid, size, raw_smp_processor_id());
2294 
2295         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2296 
2297         if (!ctx_buf) {
2298                 /* Queue this NVME IO to process later */
2299                 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2300                 list_add_tail(&nvmebuf->hbuf.list,
2301                               &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2302                 phba->sli4_hba.nvmet_io_wait_cnt++;
2303                 phba->sli4_hba.nvmet_io_wait_total++;
2304                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2305                                        iflag);
2306 
2307                 /* Post a brand new DMA buffer to RQ */
2308                 qno = nvmebuf->idx;
2309                 lpfc_post_rq_buffer(
2310                         phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2311                         phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2312 
2313                 atomic_inc(&tgtp->defer_ctx);
2314                 return;
2315         }
2316 
2317         sid = sli4_sid_from_fc_hdr(fc_hdr);
2318 
2319         ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
2320         spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2321         list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2322         spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2323         if (ctxp->state != LPFC_NVMET_STE_FREE) {
2324                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2325                                 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2326                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2327         }
2328         ctxp->wqeq = NULL;
2329         ctxp->txrdy = NULL;
2330         ctxp->offset = 0;
2331         ctxp->phba = phba;
2332         ctxp->size = size;
2333         ctxp->oxid = oxid;
2334         ctxp->sid = sid;
2335         ctxp->idx = idx;
2336         ctxp->state = LPFC_NVMET_STE_RCV;
2337         ctxp->entry_cnt = 1;
2338         ctxp->flag = 0;
2339         ctxp->ctxbuf = ctx_buf;
2340         ctxp->rqb_buffer = (void *)nvmebuf;
2341         ctxp->hdwq = NULL;
2342         spin_lock_init(&ctxp->ctxlock);
2343 
2344 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2345         if (isr_timestamp)
2346                 ctxp->ts_isr_cmd = isr_timestamp;
2347         ctxp->ts_cmd_nvme = 0;
2348         ctxp->ts_nvme_data = 0;
2349         ctxp->ts_data_wqput = 0;
2350         ctxp->ts_isr_data = 0;
2351         ctxp->ts_data_nvme = 0;
2352         ctxp->ts_nvme_status = 0;
2353         ctxp->ts_status_wqput = 0;
2354         ctxp->ts_isr_status = 0;
2355         ctxp->ts_status_nvme = 0;
2356 #endif
2357 
2358         atomic_inc(&tgtp->rcv_fcp_cmd_in);
2359         /* check for cq processing load */
2360         if (!cqflag) {
2361                 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2362                 return;
2363         }
2364 
2365         if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2366                 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2367                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2368                                 "6325 Unable to queue work for oxid x%x. "
2369                                 "FCP Drop IO [x%x x%x x%x]\n",
2370                                 ctxp->oxid,
2371                                 atomic_read(&tgtp->rcv_fcp_cmd_in),
2372                                 atomic_read(&tgtp->rcv_fcp_cmd_out),
2373                                 atomic_read(&tgtp->xmt_fcp_release));
2374 
2375                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2376                 lpfc_nvmet_defer_release(phba, ctxp);
2377                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2378                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2379         }
2380 }
2381 
2382 /**
2383  * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2384  * @phba: pointer to lpfc hba data structure.
2385  * @pring: pointer to a SLI ring.
2386  * @nvmebuf: pointer to received nvme data structure.
2387  *
2388  * This routine is used to process an unsolicited event received from a SLI
2389  * (Service Level Interface) ring. The actual processing of the data buffer
2390  * associated with the unsolicited event is done by invoking the routine
2391  * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2392  * SLI RQ on which the unsolicited event was received.
2393  **/
2394 void
2395 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2396                           struct lpfc_iocbq *piocb)
2397 {
2398         struct lpfc_dmabuf *d_buf;
2399         struct hbq_dmabuf *nvmebuf;
2400 
2401         d_buf = piocb->context2;
2402         nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2403 
2404         if (phba->nvmet_support == 0) {
2405                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2406                 return;
2407         }
2408         lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
2409 }
2410 
2411 /**
2412  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2413  * @phba: pointer to lpfc hba data structure.
2414  * @idx: relative index of MRQ vector
2415  * @nvmebuf: pointer to received nvme data structure.
2416  * @isr_timestamp: in jiffies.
2417  * @cqflag: cq processing information regarding workload.
2418  *
2419  * This routine is used to process an unsolicited event received from a SLI
2420  * (Service Level Interface) ring. The actual processing of the data buffer
2421  * associated with the unsolicited event is done by invoking the routine
2422  * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2423  * SLI RQ on which the unsolicited event was received.
2424  **/
2425 void
2426 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2427                            uint32_t idx,
2428                            struct rqb_dmabuf *nvmebuf,
2429                            uint64_t isr_timestamp,
2430                            uint8_t cqflag)
2431 {
2432         if (phba->nvmet_support == 0) {
2433                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2434                 return;
2435         }
2436         lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2437 }
2438 
2439 /**
2440  * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2441  * @phba: pointer to a host N_Port data structure.
2442  * @ctxp: Context info for NVME LS Request
2443  * @rspbuf: DMA buffer of NVME command.
2444  * @rspsize: size of the NVME command.
2445  *
2446  * This routine is used for allocating a lpfc-WQE data structure from
2447  * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2448  * passed into the routine for discovery state machine to issue an Extended
2449  * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2450  * and preparation routine that is used by all the discovery state machine
2451  * routines and the NVME command-specific fields will be later set up by
2452  * the individual discovery machine routines after calling this routine
2453  * allocating and preparing a generic WQE data structure. It fills in the
2454  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2455  * payload and response payload (if expected). The reference count on the
2456  * ndlp is incremented by 1 and the reference to the ndlp is put into
2457  * context1 of the WQE data structure for this WQE to hold the ndlp
2458  * reference for the command's callback function to access later.
2459  *
2460  * Return code
2461  *   Pointer to the newly allocated/prepared nvme wqe data structure
2462  *   NULL - when nvme wqe data structure allocation/preparation failed
2463  **/
2464 static struct lpfc_iocbq *
2465 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2466                        struct lpfc_nvmet_rcv_ctx *ctxp,
2467                        dma_addr_t rspbuf, uint16_t rspsize)
2468 {
2469         struct lpfc_nodelist *ndlp;
2470         struct lpfc_iocbq *nvmewqe;
2471         union lpfc_wqe128 *wqe;
2472 
2473         if (!lpfc_is_link_up(phba)) {
2474                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2475                                 "6104 NVMET prep LS wqe: link err: "
2476                                 "NPORT x%x oxid:x%x ste %d\n",
2477                                 ctxp->sid, ctxp->oxid, ctxp->state);
2478                 return NULL;
2479         }
2480 
2481         /* Allocate buffer for  command wqe */
2482         nvmewqe = lpfc_sli_get_iocbq(phba);
2483         if (nvmewqe == NULL) {
2484                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2485                                 "6105 NVMET prep LS wqe: No WQE: "
2486                                 "NPORT x%x oxid x%x ste %d\n",
2487                                 ctxp->sid, ctxp->oxid, ctxp->state);
2488                 return NULL;
2489         }
2490 
2491         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2492         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2493             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2494             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2495                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2496                                 "6106 NVMET prep LS wqe: No ndlp: "
2497                                 "NPORT x%x oxid x%x ste %d\n",
2498                                 ctxp->sid, ctxp->oxid, ctxp->state);
2499                 goto nvme_wqe_free_wqeq_exit;
2500         }
2501         ctxp->wqeq = nvmewqe;
2502 
2503         /* prevent preparing wqe with NULL ndlp reference */
2504         nvmewqe->context1 = lpfc_nlp_get(ndlp);
2505         if (nvmewqe->context1 == NULL)
2506                 goto nvme_wqe_free_wqeq_exit;
2507         nvmewqe->context2 = ctxp;
2508 
2509         wqe = &nvmewqe->wqe;
2510         memset(wqe, 0, sizeof(union lpfc_wqe));
2511 
2512         /* Words 0 - 2 */
2513         wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2514         wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2515         wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2516         wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2517 
2518         /* Word 3 */
2519 
2520         /* Word 4 */
2521 
2522         /* Word 5 */
2523         bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2524         bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2525         bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2526         bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2527         bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2528 
2529         /* Word 6 */
2530         bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2531                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2532         bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2533 
2534         /* Word 7 */
2535         bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2536                CMD_XMIT_SEQUENCE64_WQE);
2537         bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2538         bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2539         bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2540 
2541         /* Word 8 */
2542         wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2543 
2544         /* Word 9 */
2545         bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2546         /* Needs to be set by caller */
2547         bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2548 
2549         /* Word 10 */
2550         bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2551         bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2552         bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2553                LPFC_WQE_LENLOC_WORD12);
2554         bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2555 
2556         /* Word 11 */
2557         bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2558                LPFC_WQE_CQ_ID_DEFAULT);
2559         bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2560                OTHER_COMMAND);
2561 
2562         /* Word 12 */
2563         wqe->xmit_sequence.xmit_len = rspsize;
2564 
2565         nvmewqe->retry = 1;
2566         nvmewqe->vport = phba->pport;
2567         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2568         nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2569 
2570         /* Xmit NVMET response to remote NPORT <did> */
2571         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2572                         "6039 Xmit NVMET LS response to remote "
2573                         "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2574                         ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2575                         rspsize);
2576         return nvmewqe;
2577 
2578 nvme_wqe_free_wqeq_exit:
2579         nvmewqe->context2 = NULL;
2580         nvmewqe->context3 = NULL;
2581         lpfc_sli_release_iocbq(phba, nvmewqe);
2582         return NULL;
2583 }
2584 
2585 
2586 static struct lpfc_iocbq *
2587 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2588                         struct lpfc_nvmet_rcv_ctx *ctxp)
2589 {
2590         struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2591         struct lpfc_nvmet_tgtport *tgtp;
2592         struct sli4_sge *sgl;
2593         struct lpfc_nodelist *ndlp;
2594         struct lpfc_iocbq *nvmewqe;
2595         struct scatterlist *sgel;
2596         union lpfc_wqe128 *wqe;
2597         struct ulp_bde64 *bde;
2598         uint32_t *txrdy;
2599         dma_addr_t physaddr;
2600         int i, cnt;
2601         int do_pbde;
2602         int xc = 1;
2603 
2604         if (!lpfc_is_link_up(phba)) {
2605                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2606                                 "6107 NVMET prep FCP wqe: link err:"
2607                                 "NPORT x%x oxid x%x ste %d\n",
2608                                 ctxp->sid, ctxp->oxid, ctxp->state);
2609                 return NULL;
2610         }
2611 
2612         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2613         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2614             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2615              (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2616                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2617                                 "6108 NVMET prep FCP wqe: no ndlp: "
2618                                 "NPORT x%x oxid x%x ste %d\n",
2619                                 ctxp->sid, ctxp->oxid, ctxp->state);
2620                 return NULL;
2621         }
2622 
2623         if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2624                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2625                                 "6109 NVMET prep FCP wqe: seg cnt err: "
2626                                 "NPORT x%x oxid x%x ste %d cnt %d\n",
2627                                 ctxp->sid, ctxp->oxid, ctxp->state,
2628                                 phba->cfg_nvme_seg_cnt);
2629                 return NULL;
2630         }
2631 
2632         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2633         nvmewqe = ctxp->wqeq;
2634         if (nvmewqe == NULL) {
2635                 /* Allocate buffer for  command wqe */
2636                 nvmewqe = ctxp->ctxbuf->iocbq;
2637                 if (nvmewqe == NULL) {
2638                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2639                                         "6110 NVMET prep FCP wqe: No "
2640                                         "WQE: NPORT x%x oxid x%x ste %d\n",
2641                                         ctxp->sid, ctxp->oxid, ctxp->state);
2642                         return NULL;
2643                 }
2644                 ctxp->wqeq = nvmewqe;
2645                 xc = 0; /* create new XRI */
2646                 nvmewqe->sli4_lxritag = NO_XRI;
2647                 nvmewqe->sli4_xritag = NO_XRI;
2648         }
2649 
2650         /* Sanity check */
2651         if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2652             (ctxp->entry_cnt == 1)) ||
2653             (ctxp->state == LPFC_NVMET_STE_DATA)) {
2654                 wqe = &nvmewqe->wqe;
2655         } else {
2656                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2657                                 "6111 Wrong state NVMET FCP: %d  cnt %d\n",
2658                                 ctxp->state, ctxp->entry_cnt);
2659                 return NULL;
2660         }
2661 
2662         sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2663         switch (rsp->op) {
2664         case NVMET_FCOP_READDATA:
2665         case NVMET_FCOP_READDATA_RSP:
2666                 /* From the tsend template, initialize words 7 - 11 */
2667                 memcpy(&wqe->words[7],
2668                        &lpfc_tsend_cmd_template.words[7],
2669                        sizeof(uint32_t) * 5);
2670 
2671                 /* Words 0 - 2 : The first sg segment */
2672                 sgel = &rsp->sg[0];
2673                 physaddr = sg_dma_address(sgel);
2674                 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2675                 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2676                 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2677                 wqe->fcp_tsend.bde.addrHigh =
2678                         cpu_to_le32(putPaddrHigh(physaddr));
2679 
2680                 /* Word 3 */
2681                 wqe->fcp_tsend.payload_offset_len = 0;
2682 
2683                 /* Word 4 */
2684                 wqe->fcp_tsend.relative_offset = ctxp->offset;
2685 
2686                 /* Word 5 */
2687                 wqe->fcp_tsend.reserved = 0;
2688 
2689                 /* Word 6 */
2690                 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2691                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2692                 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2693                        nvmewqe->sli4_xritag);
2694 
2695                 /* Word 7 - set ar later */
2696 
2697                 /* Word 8 */
2698                 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2699 
2700                 /* Word 9 */
2701                 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2702                 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2703 
2704                 /* Word 10 - set wqes later, in template xc=1 */
2705                 if (!xc)
2706                         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2707 
2708                 /* Word 11 - set sup, irsp, irsplen later */
2709                 do_pbde = 0;
2710 
2711                 /* Word 12 */
2712                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2713 
2714                 /* Setup 2 SKIP SGEs */
2715                 sgl->addr_hi = 0;
2716                 sgl->addr_lo = 0;
2717                 sgl->word2 = 0;
2718                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2719                 sgl->word2 = cpu_to_le32(sgl->word2);
2720                 sgl->sge_len = 0;
2721                 sgl++;
2722                 sgl->addr_hi = 0;
2723                 sgl->addr_lo = 0;
2724                 sgl->word2 = 0;
2725                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2726                 sgl->word2 = cpu_to_le32(sgl->word2);
2727                 sgl->sge_len = 0;
2728                 sgl++;
2729                 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2730                         atomic_inc(&tgtp->xmt_fcp_read_rsp);
2731 
2732                         /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2733 
2734                         if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2735                                 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2736                                         bf_set(wqe_sup,
2737                                                &wqe->fcp_tsend.wqe_com, 1);
2738                         } else {
2739                                 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2740                                 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2741                                 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2742                                        ((rsp->rsplen >> 2) - 1));
2743                                 memcpy(&wqe->words[16], rsp->rspaddr,
2744                                        rsp->rsplen);
2745                         }
2746                 } else {
2747                         atomic_inc(&tgtp->xmt_fcp_read);
2748 
2749                         /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2750                         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2751                 }
2752                 break;
2753 
2754         case NVMET_FCOP_WRITEDATA:
2755                 /* From the treceive template, initialize words 3 - 11 */
2756                 memcpy(&wqe->words[3],
2757                        &lpfc_treceive_cmd_template.words[3],
2758                        sizeof(uint32_t) * 9);
2759 
2760                 /* Words 0 - 2 : The first sg segment */
2761                 txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2762                                        GFP_KERNEL, &physaddr);
2763                 if (!txrdy) {
2764                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2765                                         "6041 Bad txrdy buffer: oxid x%x\n",
2766                                         ctxp->oxid);
2767                         return NULL;
2768                 }
2769                 ctxp->txrdy = txrdy;
2770                 ctxp->txrdy_phys = physaddr;
2771                 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2772                 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2773                 wqe->fcp_treceive.bde.addrLow =
2774                         cpu_to_le32(putPaddrLow(physaddr));
2775                 wqe->fcp_treceive.bde.addrHigh =
2776                         cpu_to_le32(putPaddrHigh(physaddr));
2777 
2778                 /* Word 4 */
2779                 wqe->fcp_treceive.relative_offset = ctxp->offset;
2780 
2781                 /* Word 6 */
2782                 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2783                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2784                 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2785                        nvmewqe->sli4_xritag);
2786 
2787                 /* Word 7 */
2788 
2789                 /* Word 8 */
2790                 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2791 
2792                 /* Word 9 */
2793                 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2794                 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2795 
2796                 /* Word 10 - in template xc=1 */
2797                 if (!xc)
2798                         bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2799 
2800                 /* Word 11 - set pbde later */
2801                 if (phba->cfg_enable_pbde) {
2802                         do_pbde = 1;
2803                 } else {
2804                         bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2805                         do_pbde = 0;
2806                 }
2807 
2808                 /* Word 12 */
2809                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2810 
2811                 /* Setup 1 TXRDY and 1 SKIP SGE */
2812                 txrdy[0] = 0;
2813                 txrdy[1] = cpu_to_be32(rsp->transfer_length);
2814                 txrdy[2] = 0;
2815 
2816                 sgl->addr_hi = putPaddrHigh(physaddr);
2817                 sgl->addr_lo = putPaddrLow(physaddr);
2818                 sgl->word2 = 0;
2819                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2820                 sgl->word2 = cpu_to_le32(sgl->word2);
2821                 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2822                 sgl++;
2823                 sgl->addr_hi = 0;
2824                 sgl->addr_lo = 0;
2825                 sgl->word2 = 0;
2826                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2827                 sgl->word2 = cpu_to_le32(sgl->word2);
2828                 sgl->sge_len = 0;
2829                 sgl++;
2830                 atomic_inc(&tgtp->xmt_fcp_write);
2831                 break;
2832 
2833         case NVMET_FCOP_RSP:
2834                 /* From the treceive template, initialize words 4 - 11 */
2835                 memcpy(&wqe->words[4],
2836                        &lpfc_trsp_cmd_template.words[4],
2837                        sizeof(uint32_t) * 8);
2838 
2839                 /* Words 0 - 2 */
2840                 physaddr = rsp->rspdma;
2841                 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2842                 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2843                 wqe->fcp_trsp.bde.addrLow =
2844                         cpu_to_le32(putPaddrLow(physaddr));
2845                 wqe->fcp_trsp.bde.addrHigh =
2846                         cpu_to_le32(putPaddrHigh(physaddr));
2847 
2848                 /* Word 3 */
2849                 wqe->fcp_trsp.response_len = rsp->rsplen;
2850 
2851                 /* Word 6 */
2852                 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2853                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2854                 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2855                        nvmewqe->sli4_xritag);
2856 
2857                 /* Word 7 */
2858 
2859                 /* Word 8 */
2860                 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2861 
2862                 /* Word 9 */
2863                 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2864                 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2865 
2866                 /* Word 10 */
2867                 if (xc)
2868                         bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2869 
2870                 /* Word 11 */
2871                 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2872                 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2873                         /* Bad response - embed it */
2874                         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2875                         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2876                         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2877                                ((rsp->rsplen >> 2) - 1));
2878                         memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2879                 }
2880                 do_pbde = 0;
2881 
2882                 /* Word 12 */
2883                 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2884 
2885                 /* Use rspbuf, NOT sg list */
2886                 rsp->sg_cnt = 0;
2887                 sgl->word2 = 0;
2888                 atomic_inc(&tgtp->xmt_fcp_rsp);
2889                 break;
2890 
2891         default:
2892                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2893                                 "6064 Unknown Rsp Op %d\n",
2894                                 rsp->op);
2895                 return NULL;
2896         }
2897 
2898         nvmewqe->retry = 1;
2899         nvmewqe->vport = phba->pport;
2900         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2901         nvmewqe->context1 = ndlp;
2902 
2903         for_each_sg(rsp->sg, sgel, rsp->sg_cnt, i) {
2904                 physaddr = sg_dma_address(sgel);
2905                 cnt = sg_dma_len(sgel);
2906                 sgl->addr_hi = putPaddrHigh(physaddr);
2907                 sgl->addr_lo = putPaddrLow(physaddr);
2908                 sgl->word2 = 0;
2909                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2910                 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2911                 if ((i+1) == rsp->sg_cnt)
2912                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2913                 sgl->word2 = cpu_to_le32(sgl->word2);
2914                 sgl->sge_len = cpu_to_le32(cnt);
2915                 if (i == 0) {
2916                         bde = (struct ulp_bde64 *)&wqe->words[13];
2917                         if (do_pbde) {
2918                                 /* Words 13-15  (PBDE) */
2919                                 bde->addrLow = sgl->addr_lo;
2920                                 bde->addrHigh = sgl->addr_hi;
2921                                 bde->tus.f.bdeSize =
2922                                         le32_to_cpu(sgl->sge_len);
2923                                 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2924                                 bde->tus.w = cpu_to_le32(bde->tus.w);
2925                         } else {
2926                                 memset(bde, 0, sizeof(struct ulp_bde64));
2927                         }
2928                 }
2929                 sgl++;
2930                 ctxp->offset += cnt;
2931         }
2932         ctxp->state = LPFC_NVMET_STE_DATA;
2933         ctxp->entry_cnt++;
2934         return nvmewqe;
2935 }
2936 
2937 /**
2938  * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2939  * @phba: Pointer to HBA context object.
2940  * @cmdwqe: Pointer to driver command WQE object.
2941  * @wcqe: Pointer to driver response CQE object.
2942  *
2943  * The function is called from SLI ring event handler with no
2944  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2945  * The function frees memory resources used for the NVME commands.
2946  **/
2947 static void
2948 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2949                              struct lpfc_wcqe_complete *wcqe)
2950 {
2951         struct lpfc_nvmet_rcv_ctx *ctxp;
2952         struct lpfc_nvmet_tgtport *tgtp;
2953         uint32_t result;
2954         unsigned long flags;
2955         bool released = false;
2956 
2957         ctxp = cmdwqe->context2;
2958         result = wcqe->parameter;
2959 
2960         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2961         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2962                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2963 
2964         spin_lock_irqsave(&ctxp->ctxlock, flags);
2965         ctxp->state = LPFC_NVMET_STE_DONE;
2966 
2967         /* Check if we already received a free context call
2968          * and we have completed processing an abort situation.
2969          */
2970         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2971             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2972                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2973                 list_del_init(&ctxp->list);
2974                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2975                 released = true;
2976         }
2977         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2978         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2979         atomic_inc(&tgtp->xmt_abort_rsp);
2980 
2981         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2982                         "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
2983                         "WCQE: %08x %08x %08x %08x\n",
2984                         ctxp->oxid, ctxp->flag, released,
2985                         wcqe->word0, wcqe->total_data_placed,
2986                         result, wcqe->word3);
2987 
2988         cmdwqe->context2 = NULL;
2989         cmdwqe->context3 = NULL;
2990         /*
2991          * if transport has released ctx, then can reuse it. Otherwise,
2992          * will be recycled by transport release call.
2993          */
2994         if (released)
2995                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2996 
2997         /* This is the iocbq for the abort, not the command */
2998         lpfc_sli_release_iocbq(phba, cmdwqe);
2999 
3000         /* Since iaab/iaar are NOT set, there is no work left.
3001          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
3002          * should have been called already.
3003          */
3004 }
3005 
3006 /**
3007  * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
3008  * @phba: Pointer to HBA context object.
3009  * @cmdwqe: Pointer to driver command WQE object.
3010  * @wcqe: Pointer to driver response CQE object.
3011  *
3012  * The function is called from SLI ring event handler with no
3013  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3014  * The function frees memory resources used for the NVME commands.
3015  **/
3016 static void
3017 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3018                                struct lpfc_wcqe_complete *wcqe)
3019 {
3020         struct lpfc_nvmet_rcv_ctx *ctxp;
3021         struct lpfc_nvmet_tgtport *tgtp;
3022         unsigned long flags;
3023         uint32_t result;
3024         bool released = false;
3025 
3026         ctxp = cmdwqe->context2;
3027         result = wcqe->parameter;
3028 
3029         if (!ctxp) {
3030                 /* if context is clear, related io alrady complete */
3031                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3032                                 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3033                                 wcqe->word0, wcqe->total_data_placed,
3034                                 result, wcqe->word3);
3035                 return;
3036         }
3037 
3038         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3039         spin_lock_irqsave(&ctxp->ctxlock, flags);
3040         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
3041                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3042 
3043         /* Sanity check */
3044         if (ctxp->state != LPFC_NVMET_STE_ABORT) {
3045                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3046                                 "6112 ABTS Wrong state:%d oxid x%x\n",
3047                                 ctxp->state, ctxp->oxid);
3048         }
3049 
3050         /* Check if we already received a free context call
3051          * and we have completed processing an abort situation.
3052          */
3053         ctxp->state = LPFC_NVMET_STE_DONE;
3054         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
3055             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
3056                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3057                 list_del_init(&ctxp->list);
3058                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3059                 released = true;
3060         }
3061         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3062         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3063         atomic_inc(&tgtp->xmt_abort_rsp);
3064 
3065         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3066                         "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3067                         "WCQE: %08x %08x %08x %08x\n",
3068                         ctxp->oxid, ctxp->flag, released,
3069                         wcqe->word0, wcqe->total_data_placed,
3070                         result, wcqe->word3);
3071 
3072         cmdwqe->context2 = NULL;
3073         cmdwqe->context3 = NULL;
3074         /*
3075          * if transport has released ctx, then can reuse it. Otherwise,
3076          * will be recycled by transport release call.
3077          */
3078         if (released)
3079                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3080 
3081         /* Since iaab/iaar are NOT set, there is no work left.
3082          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
3083          * should have been called already.
3084          */
3085 }
3086 
3087 /**
3088  * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
3089  * @phba: Pointer to HBA context object.
3090  * @cmdwqe: Pointer to driver command WQE object.
3091  * @wcqe: Pointer to driver response CQE object.
3092  *
3093  * The function is called from SLI ring event handler with no
3094  * lock held. This function is the completion handler for NVME ABTS for LS cmds
3095  * The function frees memory resources used for the NVME commands.
3096  **/
3097 static void
3098 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3099                             struct lpfc_wcqe_complete *wcqe)
3100 {
3101         struct lpfc_nvmet_rcv_ctx *ctxp;
3102         struct lpfc_nvmet_tgtport *tgtp;
3103         uint32_t result;
3104 
3105         ctxp = cmdwqe->context2;
3106         result = wcqe->parameter;
3107 
3108         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3109         atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3110 
3111         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3112                         "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3113                         ctxp, wcqe->word0, wcqe->total_data_placed,
3114                         result, wcqe->word3);
3115 
3116         if (!ctxp) {
3117                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3118                                 "6415 NVMET LS Abort No ctx: WCQE: "
3119                                  "%08x %08x %08x %08x\n",
3120                                 wcqe->word0, wcqe->total_data_placed,
3121                                 result, wcqe->word3);
3122 
3123                 lpfc_sli_release_iocbq(phba, cmdwqe);
3124                 return;
3125         }
3126 
3127         if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
3128                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3129                                 "6416 NVMET LS abort cmpl state mismatch: "
3130                                 "oxid x%x: %d %d\n",
3131                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3132         }
3133 
3134         cmdwqe->context2 = NULL;
3135         cmdwqe->context3 = NULL;
3136         lpfc_sli_release_iocbq(phba, cmdwqe);
3137         kfree(ctxp);
3138 }
3139 
3140 static int
3141 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
3142                              struct lpfc_nvmet_rcv_ctx *ctxp,
3143                              uint32_t sid, uint16_t xri)
3144 {
3145         struct lpfc_nvmet_tgtport *tgtp;
3146         struct lpfc_iocbq *abts_wqeq;
3147         union lpfc_wqe128 *wqe_abts;
3148         struct lpfc_nodelist *ndlp;
3149 
3150         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3151                         "6067 ABTS: sid %x xri x%x/x%x\n",
3152                         sid, xri, ctxp->wqeq->sli4_xritag);
3153 
3154         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3155 
3156         ndlp = lpfc_findnode_did(phba->pport, sid);
3157         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3158             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3159             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3160                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3161                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3162                                 "6134 Drop ABTS - wrong NDLP state x%x.\n",
3163                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3164 
3165                 /* No failure to an ABTS request. */
3166                 return 0;
3167         }
3168 
3169         abts_wqeq = ctxp->wqeq;
3170         wqe_abts = &abts_wqeq->wqe;
3171 
3172         /*
3173          * Since we zero the whole WQE, we need to ensure we set the WQE fields
3174          * that were initialized in lpfc_sli4_nvmet_alloc.
3175          */
3176         memset(wqe_abts, 0, sizeof(union lpfc_wqe));
3177 
3178         /* Word 5 */
3179         bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
3180         bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
3181         bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
3182         bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
3183         bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
3184 
3185         /* Word 6 */
3186         bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
3187                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
3188         bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
3189                abts_wqeq->sli4_xritag);
3190 
3191         /* Word 7 */
3192         bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
3193                CMD_XMIT_SEQUENCE64_WQE);
3194         bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
3195         bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
3196         bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3197 
3198         /* Word 8 */
3199         wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3200 
3201         /* Word 9 */
3202         bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3203         /* Needs to be set by caller */
3204         bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3205 
3206         /* Word 10 */
3207         bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
3208         bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3209         bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3210                LPFC_WQE_LENLOC_WORD12);
3211         bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3212         bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3213 
3214         /* Word 11 */
3215         bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3216                LPFC_WQE_CQ_ID_DEFAULT);
3217         bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3218                OTHER_COMMAND);
3219 
3220         abts_wqeq->vport = phba->pport;
3221         abts_wqeq->context1 = ndlp;
3222         abts_wqeq->context2 = ctxp;
3223         abts_wqeq->context3 = NULL;
3224         abts_wqeq->rsvd2 = 0;
3225         /* hba_wqidx should already be setup from command we are aborting */
3226         abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3227         abts_wqeq->iocb.ulpLe = 1;
3228 
3229         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3230                         "6069 Issue ABTS to xri x%x reqtag x%x\n",
3231                         xri, abts_wqeq->iotag);
3232         return 1;
3233 }
3234 
3235 static int
3236 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3237                                struct lpfc_nvmet_rcv_ctx *ctxp,
3238                                uint32_t sid, uint16_t xri)
3239 {
3240         struct lpfc_nvmet_tgtport *tgtp;
3241         struct lpfc_iocbq *abts_wqeq;
3242         union lpfc_wqe128 *abts_wqe;
3243         struct lpfc_nodelist *ndlp;
3244         unsigned long flags;
3245         int rc;
3246 
3247         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3248         if (!ctxp->wqeq) {
3249                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3250                 ctxp->wqeq->hba_wqidx = 0;
3251         }
3252 
3253         ndlp = lpfc_findnode_did(phba->pport, sid);
3254         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3255             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3256             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3257                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3258                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3259                                 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3260                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3261 
3262                 /* No failure to an ABTS request. */
3263                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3264                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3265                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3266                 return 0;
3267         }
3268 
3269         /* Issue ABTS for this WQE based on iotag */
3270         ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3271         spin_lock_irqsave(&ctxp->ctxlock, flags);
3272         if (!ctxp->abort_wqeq) {
3273                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3274                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3275                                 "6161 ABORT failed: No wqeqs: "
3276                                 "xri: x%x\n", ctxp->oxid);
3277                 /* No failure to an ABTS request. */
3278                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3279                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3280                 return 0;
3281         }
3282         abts_wqeq = ctxp->abort_wqeq;
3283         abts_wqe = &abts_wqeq->wqe;
3284         ctxp->state = LPFC_NVMET_STE_ABORT;
3285         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3286 
3287         /* Announce entry to new IO submit field. */
3288         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3289                         "6162 ABORT Request to rport DID x%06x "
3290                         "for xri x%x x%x\n",
3291                         ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3292 
3293         /* If the hba is getting reset, this flag is set.  It is
3294          * cleared when the reset is complete and rings reestablished.
3295          */
3296         spin_lock_irqsave(&phba->hbalock, flags);
3297         /* driver queued commands are in process of being flushed */
3298         if (phba->hba_flag & HBA_IOQ_FLUSH) {
3299                 spin_unlock_irqrestore(&phba->hbalock, flags);
3300                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3301                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3302                                 "6163 Driver in reset cleanup - flushing "
3303                                 "NVME Req now. hba_flag x%x oxid x%x\n",
3304                                 phba->hba_flag, ctxp->oxid);
3305                 lpfc_sli_release_iocbq(phba, abts_wqeq);
3306                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3307                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3308                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3309                 return 0;
3310         }
3311 
3312         /* Outstanding abort is in progress */
3313         if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3314                 spin_unlock_irqrestore(&phba->hbalock, flags);
3315                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3316                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3317                                 "6164 Outstanding NVME I/O Abort Request "
3318                                 "still pending on oxid x%x\n",
3319                                 ctxp->oxid);
3320                 lpfc_sli_release_iocbq(phba, abts_wqeq);
3321                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3322                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3323                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3324                 return 0;
3325         }
3326 
3327         /* Ready - mark outstanding as aborted by driver. */
3328         abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3329 
3330         /* WQEs are reused.  Clear stale data and set key fields to
3331          * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3332          */
3333         memset(abts_wqe, 0, sizeof(*abts_wqe));
3334 
3335         /* word 3 */
3336         bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
3337 
3338         /* word 7 */
3339         bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
3340         bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3341 
3342         /* word 8 - tell the FW to abort the IO associated with this
3343          * outstanding exchange ID.
3344          */
3345         abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
3346 
3347         /* word 9 - this is the iotag for the abts_wqe completion. */
3348         bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
3349                abts_wqeq->iotag);
3350 
3351         /* word 10 */
3352         bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
3353         bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3354 
3355         /* word 11 */
3356         bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3357         bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
3358         bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3359 
3360         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3361         abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3362         abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3363         abts_wqeq->iocb_cmpl = 0;
3364         abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3365         abts_wqeq->context2 = ctxp;
3366         abts_wqeq->vport = phba->pport;
3367         if (!ctxp->hdwq)
3368                 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3369 
3370         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3371         spin_unlock_irqrestore(&phba->hbalock, flags);
3372         if (rc == WQE_SUCCESS) {
3373                 atomic_inc(&tgtp->xmt_abort_sol);
3374                 return 0;
3375         }
3376 
3377         atomic_inc(&tgtp->xmt_abort_rsp_error);
3378         spin_lock_irqsave(&ctxp->ctxlock, flags);
3379         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3380         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3381         lpfc_sli_release_iocbq(phba, abts_wqeq);
3382         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3383                         "6166 Failed ABORT issue_wqe with status x%x "
3384                         "for oxid x%x.\n",
3385                         rc, ctxp->oxid);
3386         return 1;
3387 }
3388 
3389 static int
3390 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3391                                  struct lpfc_nvmet_rcv_ctx *ctxp,
3392                                  uint32_t sid, uint16_t xri)
3393 {
3394         struct lpfc_nvmet_tgtport *tgtp;
3395         struct lpfc_iocbq *abts_wqeq;
3396         unsigned long flags;
3397         bool released = false;
3398         int rc;
3399 
3400         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3401         if (!ctxp->wqeq) {
3402                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3403                 ctxp->wqeq->hba_wqidx = 0;
3404         }
3405 
3406         if (ctxp->state == LPFC_NVMET_STE_FREE) {
3407                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3408                                 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3409                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3410                 rc = WQE_BUSY;
3411                 goto aerr;
3412         }
3413         ctxp->state = LPFC_NVMET_STE_ABORT;
3414         ctxp->entry_cnt++;
3415         rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3416         if (rc == 0)
3417                 goto aerr;
3418 
3419         spin_lock_irqsave(&phba->hbalock, flags);
3420         abts_wqeq = ctxp->wqeq;
3421         abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3422         abts_wqeq->iocb_cmpl = NULL;
3423         abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3424         if (!ctxp->hdwq)
3425                 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3426 
3427         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3428         spin_unlock_irqrestore(&phba->hbalock, flags);
3429         if (rc == WQE_SUCCESS) {
3430                 return 0;
3431         }
3432 
3433 aerr:
3434         spin_lock_irqsave(&ctxp->ctxlock, flags);
3435         if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
3436                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3437                 list_del_init(&ctxp->list);
3438                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3439                 released = true;
3440         }
3441         ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
3442         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3443 
3444         atomic_inc(&tgtp->xmt_abort_rsp_error);
3445         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3446                         "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3447                         "(%x)\n",
3448                         ctxp->oxid, rc, released);
3449         if (released)
3450                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3451         return 1;
3452 }
3453 
3454 static int
3455 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3456                                 struct lpfc_nvmet_rcv_ctx *ctxp,
3457                                 uint32_t sid, uint16_t xri)
3458 {
3459         struct lpfc_nvmet_tgtport *tgtp;
3460         struct lpfc_iocbq *abts_wqeq;
3461         unsigned long flags;
3462         int rc;
3463 
3464         if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3465             (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3466                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3467                 ctxp->entry_cnt++;
3468         } else {
3469                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3470                                 "6418 NVMET LS abort state mismatch "
3471                                 "IO x%x: %d %d\n",
3472                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3473                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3474         }
3475 
3476         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3477         if (!ctxp->wqeq) {
3478                 /* Issue ABTS for this WQE based on iotag */
3479                 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3480                 if (!ctxp->wqeq) {
3481                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3482                                         "6068 Abort failed: No wqeqs: "
3483                                         "xri: x%x\n", xri);
3484                         /* No failure to an ABTS request. */
3485                         kfree(ctxp);
3486                         return 0;
3487                 }
3488         }
3489         abts_wqeq = ctxp->wqeq;
3490 
3491         if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3492                 rc = WQE_BUSY;
3493                 goto out;
3494         }
3495 
3496         spin_lock_irqsave(&phba->hbalock, flags);
3497         abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3498         abts_wqeq->iocb_cmpl = 0;
3499         abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
3500         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3501         spin_unlock_irqrestore(&phba->hbalock, flags);
3502         if (rc == WQE_SUCCESS) {
3503                 atomic_inc(&tgtp->xmt_abort_unsol);
3504                 return 0;
3505         }
3506 out:
3507         atomic_inc(&tgtp->xmt_abort_rsp_error);
3508         abts_wqeq->context2 = NULL;
3509         abts_wqeq->context3 = NULL;
3510         lpfc_sli_release_iocbq(phba, abts_wqeq);
3511         kfree(ctxp);
3512         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3513                         "6056 Failed to Issue ABTS. Status x%x\n", rc);
3514         return 0;
3515 }

/* [<][>][^][v][top][bottom][index][help] */