root/drivers/scsi/lpfc/lpfc_init.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. lpfc_config_port_prep
  2. lpfc_config_async_cmpl
  3. lpfc_dump_wakeup_param_cmpl
  4. lpfc_update_vport_wwn
  5. lpfc_config_port_post
  6. lpfc_hba_init_link
  7. lpfc_hba_init_link_fc_topology
  8. lpfc_hba_down_link
  9. lpfc_hba_down_prep
  10. lpfc_sli4_free_sp_events
  11. lpfc_hba_free_post_buf
  12. lpfc_hba_clean_txcmplq
  13. lpfc_hba_down_post_s3
  14. lpfc_hba_down_post_s4
  15. lpfc_hba_down_post
  16. lpfc_hb_timeout
  17. lpfc_rrq_timeout
  18. lpfc_hb_mbox_cmpl
  19. lpfc_hb_eq_delay_work
  20. lpfc_hb_mxp_handler
  21. lpfc_hb_timeout_handler
  22. lpfc_offline_eratt
  23. lpfc_sli4_offline_eratt
  24. lpfc_handle_deferred_eratt
  25. lpfc_board_errevt_to_mgmt
  26. lpfc_handle_eratt_s3
  27. lpfc_sli4_port_sta_fn_reset
  28. lpfc_handle_eratt_s4
  29. lpfc_handle_eratt
  30. lpfc_handle_latt
  31. lpfc_parse_vpd
  32. lpfc_get_hba_model_desc
  33. lpfc_post_buffer
  34. lpfc_post_rcv_buf
  35. lpfc_sha_init
  36. lpfc_sha_iterate
  37. lpfc_challenge_key
  38. lpfc_hba_init
  39. lpfc_cleanup
  40. lpfc_stop_vport_timers
  41. __lpfc_sli4_stop_fcf_redisc_wait_timer
  42. lpfc_sli4_stop_fcf_redisc_wait_timer
  43. lpfc_stop_hba_timers
  44. lpfc_block_mgmt_io
  45. lpfc_sli4_node_prep
  46. lpfc_create_expedite_pool
  47. lpfc_destroy_expedite_pool
  48. lpfc_create_multixri_pools
  49. lpfc_destroy_multixri_pools
  50. lpfc_online
  51. lpfc_unblock_mgmt_io
  52. lpfc_offline_prep
  53. lpfc_offline
  54. lpfc_scsi_free
  55. lpfc_io_free
  56. lpfc_sli4_els_sgl_update
  57. lpfc_sli4_nvmet_sgl_update
  58. lpfc_io_buf_flush
  59. lpfc_io_buf_replenish
  60. lpfc_sli4_io_sgl_update
  61. lpfc_new_io_buf
  62. lpfc_get_wwpn
  63. lpfc_create_port
  64. destroy_port
  65. lpfc_get_instance
  66. lpfc_scan_finished
  67. lpfc_host_supported_speeds_set
  68. lpfc_host_attrib_init
  69. lpfc_stop_port_s3
  70. lpfc_stop_port_s4
  71. lpfc_stop_port
  72. lpfc_fcf_redisc_wait_start_timer
  73. lpfc_sli4_fcf_redisc_wait_tmo
  74. lpfc_sli4_parse_latt_fault
  75. lpfc_sli4_parse_latt_type
  76. lpfc_sli_port_speed_get
  77. lpfc_sli4_port_speed_parse
  78. lpfc_sli4_async_link_evt
  79. lpfc_async_link_speed_to_read_top
  80. lpfc_update_trunk_link_status
  81. lpfc_sli4_async_fc_evt
  82. lpfc_sli4_async_sli_evt
  83. lpfc_sli4_perform_vport_cvl
  84. lpfc_sli4_perform_all_vport_cvl
  85. lpfc_sli4_async_fip_evt
  86. lpfc_sli4_async_dcbx_evt
  87. lpfc_sli4_async_grp5_evt
  88. lpfc_sli4_async_event_proc
  89. lpfc_sli4_fcf_redisc_event_proc
  90. lpfc_api_table_setup
  91. lpfc_log_intr_mode
  92. lpfc_enable_pci_dev
  93. lpfc_disable_pci_dev
  94. lpfc_reset_hba
  95. lpfc_sli_sriov_nr_virtfn_get
  96. lpfc_sli_probe_sriov_nr_virtfn
  97. lpfc_setup_driver_resource_phase1
  98. lpfc_sli_driver_resource_setup
  99. lpfc_sli_driver_resource_unset
  100. lpfc_sli4_driver_resource_setup
  101. lpfc_sli4_driver_resource_unset
  102. lpfc_init_api_table_setup
  103. lpfc_setup_driver_resource_phase2
  104. lpfc_unset_driver_resource_phase2
  105. lpfc_free_iocb_list
  106. lpfc_init_iocb_list
  107. lpfc_free_sgl_list
  108. lpfc_free_els_sgl_list
  109. lpfc_free_nvmet_sgl_list
  110. lpfc_init_active_sgl_array
  111. lpfc_free_active_sgl
  112. lpfc_init_sgl_list
  113. lpfc_sli4_init_rpi_hdrs
  114. lpfc_sli4_create_rpi_hdr
  115. lpfc_sli4_remove_rpi_hdrs
  116. lpfc_hba_alloc
  117. lpfc_hba_free
  118. lpfc_create_shost
  119. lpfc_destroy_shost
  120. lpfc_setup_bg
  121. lpfc_post_init_setup
  122. lpfc_sli_pci_mem_setup
  123. lpfc_sli_pci_mem_unset
  124. lpfc_sli4_post_status_check
  125. lpfc_sli4_bar0_register_memmap
  126. lpfc_sli4_bar1_register_memmap
  127. lpfc_sli4_bar2_register_memmap
  128. lpfc_create_bootstrap_mbox
  129. lpfc_destroy_bootstrap_mbox
  130. lpfc_sli4_read_config
  131. lpfc_setup_endian_order
  132. lpfc_sli4_queue_verify
  133. lpfc_alloc_io_wq_cq
  134. lpfc_sli4_queue_create
  135. __lpfc_sli4_release_queue
  136. lpfc_sli4_release_queues
  137. lpfc_sli4_release_hdwq
  138. lpfc_sli4_queue_destroy
  139. lpfc_free_rq_buffer
  140. lpfc_create_wq_cq
  141. lpfc_setup_cq_lookup
  142. lpfc_sli4_queue_setup
  143. lpfc_sli4_queue_unset
  144. lpfc_sli4_cq_event_pool_create
  145. lpfc_sli4_cq_event_pool_destroy
  146. __lpfc_sli4_cq_event_alloc
  147. lpfc_sli4_cq_event_alloc
  148. __lpfc_sli4_cq_event_release
  149. lpfc_sli4_cq_event_release
  150. lpfc_sli4_cq_event_release_all
  151. lpfc_pci_function_reset
  152. lpfc_sli4_pci_mem_setup
  153. lpfc_sli4_pci_mem_unset
  154. lpfc_sli_enable_msix
  155. lpfc_sli_enable_msi
  156. lpfc_sli_enable_intr
  157. lpfc_sli_disable_intr
  158. lpfc_find_cpu_handle
  159. lpfc_find_hyper
  160. lpfc_cpu_affinity_check
  161. lpfc_cpuhp_get_eq
  162. __lpfc_cpuhp_remove
  163. lpfc_cpuhp_remove
  164. lpfc_cpuhp_add
  165. __lpfc_cpuhp_checks
  166. lpfc_cpu_offline
  167. lpfc_cpu_online
  168. lpfc_sli4_enable_msix
  169. lpfc_sli4_enable_msi
  170. lpfc_sli4_enable_intr
  171. lpfc_sli4_disable_intr
  172. lpfc_unset_hba
  173. lpfc_sli4_xri_exchange_busy_wait
  174. lpfc_sli4_hba_unset
  175. lpfc_pc_sli4_params_get
  176. lpfc_get_sli4_parameters
  177. lpfc_pci_probe_one_s3
  178. lpfc_pci_remove_one_s3
  179. lpfc_pci_suspend_one_s3
  180. lpfc_pci_resume_one_s3
  181. lpfc_sli_prep_dev_for_recover
  182. lpfc_sli_prep_dev_for_reset
  183. lpfc_sli_prep_dev_for_perm_failure
  184. lpfc_io_error_detected_s3
  185. lpfc_io_slot_reset_s3
  186. lpfc_io_resume_s3
  187. lpfc_sli4_get_els_iocb_cnt
  188. lpfc_sli4_get_iocb_cnt
  189. lpfc_log_write_firmware_error
  190. lpfc_write_firmware
  191. lpfc_sli4_request_firmware_update
  192. lpfc_pci_probe_one_s4
  193. lpfc_pci_remove_one_s4
  194. lpfc_pci_suspend_one_s4
  195. lpfc_pci_resume_one_s4
  196. lpfc_sli4_prep_dev_for_recover
  197. lpfc_sli4_prep_dev_for_reset
  198. lpfc_sli4_prep_dev_for_perm_failure
  199. lpfc_io_error_detected_s4
  200. lpfc_io_slot_reset_s4
  201. lpfc_io_resume_s4
  202. lpfc_pci_probe_one
  203. lpfc_pci_remove_one
  204. lpfc_pci_suspend_one
  205. lpfc_pci_resume_one
  206. lpfc_io_error_detected
  207. lpfc_io_slot_reset
  208. lpfc_io_resume
  209. lpfc_sli4_oas_verify
  210. lpfc_sli4_ras_init
  211. lpfc_init
  212. lpfc_exit

   1 /*******************************************************************
   2  * This file is part of the Emulex Linux Device Driver for         *
   3  * Fibre Channel Host Bus Adapters.                                *
   4  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
   5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
   6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7  * EMULEX and SLI are trademarks of Emulex.                        *
   8  * www.broadcom.com                                                *
   9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10  *                                                                 *
  11  * This program is free software; you can redistribute it and/or   *
  12  * modify it under the terms of version 2 of the GNU General       *
  13  * Public License as published by the Free Software Foundation.    *
  14  * This program is distributed in the hope that it will be useful. *
  15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20  * more details, a copy of which can be found in the file COPYING  *
  21  * included with this package.                                     *
  22  *******************************************************************/
  23 
  24 #include <linux/blkdev.h>
  25 #include <linux/delay.h>
  26 #include <linux/dma-mapping.h>
  27 #include <linux/idr.h>
  28 #include <linux/interrupt.h>
  29 #include <linux/module.h>
  30 #include <linux/kthread.h>
  31 #include <linux/pci.h>
  32 #include <linux/spinlock.h>
  33 #include <linux/ctype.h>
  34 #include <linux/aer.h>
  35 #include <linux/slab.h>
  36 #include <linux/firmware.h>
  37 #include <linux/miscdevice.h>
  38 #include <linux/percpu.h>
  39 #include <linux/msi.h>
  40 #include <linux/irq.h>
  41 #include <linux/bitops.h>
  42 #include <linux/crash_dump.h>
  43 #include <linux/cpuhotplug.h>
  44 
  45 #include <scsi/scsi.h>
  46 #include <scsi/scsi_device.h>
  47 #include <scsi/scsi_host.h>
  48 #include <scsi/scsi_transport_fc.h>
  49 #include <scsi/scsi_tcq.h>
  50 #include <scsi/fc/fc_fs.h>
  51 
  52 #include <linux/nvme-fc-driver.h>
  53 
  54 #include "lpfc_hw4.h"
  55 #include "lpfc_hw.h"
  56 #include "lpfc_sli.h"
  57 #include "lpfc_sli4.h"
  58 #include "lpfc_nl.h"
  59 #include "lpfc_disc.h"
  60 #include "lpfc.h"
  61 #include "lpfc_scsi.h"
  62 #include "lpfc_nvme.h"
  63 #include "lpfc_nvmet.h"
  64 #include "lpfc_logmsg.h"
  65 #include "lpfc_crtn.h"
  66 #include "lpfc_vport.h"
  67 #include "lpfc_version.h"
  68 #include "lpfc_ids.h"
  69 
  70 static enum cpuhp_state lpfc_cpuhp_state;
  71 /* Used when mapping IRQ vectors in a driver centric manner */
  72 static uint32_t lpfc_present_cpu;
  73 
  74 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
  75 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
  76 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
  77 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
  78 static int lpfc_post_rcv_buf(struct lpfc_hba *);
  79 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
  80 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
  81 static int lpfc_setup_endian_order(struct lpfc_hba *);
  82 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
  83 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
  84 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
  85 static void lpfc_init_sgl_list(struct lpfc_hba *);
  86 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
  87 static void lpfc_free_active_sgl(struct lpfc_hba *);
  88 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
  89 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
  90 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
  91 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
  92 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
  93 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
  94 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
  95 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
  96 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
  97 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
  98 
  99 static struct scsi_transport_template *lpfc_transport_template = NULL;
 100 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
 101 static DEFINE_IDR(lpfc_hba_index);
 102 #define LPFC_NVMET_BUF_POST 254
 103 
 104 /**
 105  * lpfc_config_port_prep - Perform lpfc initialization prior to config port
 106  * @phba: pointer to lpfc hba data structure.
 107  *
 108  * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
 109  * mailbox command. It retrieves the revision information from the HBA and
 110  * collects the Vital Product Data (VPD) about the HBA for preparing the
 111  * configuration of the HBA.
 112  *
 113  * Return codes:
 114  *   0 - success.
 115  *   -ERESTART - requests the SLI layer to reset the HBA and try again.
 116  *   Any other value - indicates an error.
 117  **/
 118 int
 119 lpfc_config_port_prep(struct lpfc_hba *phba)
 120 {
 121         lpfc_vpd_t *vp = &phba->vpd;
 122         int i = 0, rc;
 123         LPFC_MBOXQ_t *pmb;
 124         MAILBOX_t *mb;
 125         char *lpfc_vpd_data = NULL;
 126         uint16_t offset = 0;
 127         static char licensed[56] =
 128                     "key unlock for use with gnu public licensed code only\0";
 129         static int init_key = 1;
 130 
 131         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 132         if (!pmb) {
 133                 phba->link_state = LPFC_HBA_ERROR;
 134                 return -ENOMEM;
 135         }
 136 
 137         mb = &pmb->u.mb;
 138         phba->link_state = LPFC_INIT_MBX_CMDS;
 139 
 140         if (lpfc_is_LC_HBA(phba->pcidev->device)) {
 141                 if (init_key) {
 142                         uint32_t *ptext = (uint32_t *) licensed;
 143 
 144                         for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
 145                                 *ptext = cpu_to_be32(*ptext);
 146                         init_key = 0;
 147                 }
 148 
 149                 lpfc_read_nv(phba, pmb);
 150                 memset((char*)mb->un.varRDnvp.rsvd3, 0,
 151                         sizeof (mb->un.varRDnvp.rsvd3));
 152                 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
 153                          sizeof (licensed));
 154 
 155                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 156 
 157                 if (rc != MBX_SUCCESS) {
 158                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
 159                                         "0324 Config Port initialization "
 160                                         "error, mbxCmd x%x READ_NVPARM, "
 161                                         "mbxStatus x%x\n",
 162                                         mb->mbxCommand, mb->mbxStatus);
 163                         mempool_free(pmb, phba->mbox_mem_pool);
 164                         return -ERESTART;
 165                 }
 166                 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
 167                        sizeof(phba->wwnn));
 168                 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
 169                        sizeof(phba->wwpn));
 170         }
 171 
 172         /*
 173          * Clear all option bits except LPFC_SLI3_BG_ENABLED,
 174          * which was already set in lpfc_get_cfgparam()
 175          */
 176         phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
 177 
 178         /* Setup and issue mailbox READ REV command */
 179         lpfc_read_rev(phba, pmb);
 180         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 181         if (rc != MBX_SUCCESS) {
 182                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 183                                 "0439 Adapter failed to init, mbxCmd x%x "
 184                                 "READ_REV, mbxStatus x%x\n",
 185                                 mb->mbxCommand, mb->mbxStatus);
 186                 mempool_free( pmb, phba->mbox_mem_pool);
 187                 return -ERESTART;
 188         }
 189 
 190 
 191         /*
 192          * The value of rr must be 1 since the driver set the cv field to 1.
 193          * This setting requires the FW to set all revision fields.
 194          */
 195         if (mb->un.varRdRev.rr == 0) {
 196                 vp->rev.rBit = 0;
 197                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 198                                 "0440 Adapter failed to init, READ_REV has "
 199                                 "missing revision information.\n");
 200                 mempool_free(pmb, phba->mbox_mem_pool);
 201                 return -ERESTART;
 202         }
 203 
 204         if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
 205                 mempool_free(pmb, phba->mbox_mem_pool);
 206                 return -EINVAL;
 207         }
 208 
 209         /* Save information as VPD data */
 210         vp->rev.rBit = 1;
 211         memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
 212         vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
 213         memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
 214         vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
 215         memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
 216         vp->rev.biuRev = mb->un.varRdRev.biuRev;
 217         vp->rev.smRev = mb->un.varRdRev.smRev;
 218         vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
 219         vp->rev.endecRev = mb->un.varRdRev.endecRev;
 220         vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
 221         vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
 222         vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
 223         vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
 224         vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
 225         vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
 226 
 227         /* If the sli feature level is less then 9, we must
 228          * tear down all RPIs and VPIs on link down if NPIV
 229          * is enabled.
 230          */
 231         if (vp->rev.feaLevelHigh < 9)
 232                 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
 233 
 234         if (lpfc_is_LC_HBA(phba->pcidev->device))
 235                 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
 236                                                 sizeof (phba->RandomData));
 237 
 238         /* Get adapter VPD information */
 239         lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
 240         if (!lpfc_vpd_data)
 241                 goto out_free_mbox;
 242         do {
 243                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
 244                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 245 
 246                 if (rc != MBX_SUCCESS) {
 247                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 248                                         "0441 VPD not present on adapter, "
 249                                         "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
 250                                         mb->mbxCommand, mb->mbxStatus);
 251                         mb->un.varDmp.word_cnt = 0;
 252                 }
 253                 /* dump mem may return a zero when finished or we got a
 254                  * mailbox error, either way we are done.
 255                  */
 256                 if (mb->un.varDmp.word_cnt == 0)
 257                         break;
 258                 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
 259                         mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
 260                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
 261                                       lpfc_vpd_data + offset,
 262                                       mb->un.varDmp.word_cnt);
 263                 offset += mb->un.varDmp.word_cnt;
 264         } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
 265         lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
 266 
 267         kfree(lpfc_vpd_data);
 268 out_free_mbox:
 269         mempool_free(pmb, phba->mbox_mem_pool);
 270         return 0;
 271 }
 272 
 273 /**
 274  * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
 275  * @phba: pointer to lpfc hba data structure.
 276  * @pmboxq: pointer to the driver internal queue element for mailbox command.
 277  *
 278  * This is the completion handler for driver's configuring asynchronous event
 279  * mailbox command to the device. If the mailbox command returns successfully,
 280  * it will set internal async event support flag to 1; otherwise, it will
 281  * set internal async event support flag to 0.
 282  **/
 283 static void
 284 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
 285 {
 286         if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
 287                 phba->temp_sensor_support = 1;
 288         else
 289                 phba->temp_sensor_support = 0;
 290         mempool_free(pmboxq, phba->mbox_mem_pool);
 291         return;
 292 }
 293 
 294 /**
 295  * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
 296  * @phba: pointer to lpfc hba data structure.
 297  * @pmboxq: pointer to the driver internal queue element for mailbox command.
 298  *
 299  * This is the completion handler for dump mailbox command for getting
 300  * wake up parameters. When this command complete, the response contain
 301  * Option rom version of the HBA. This function translate the version number
 302  * into a human readable string and store it in OptionROMVersion.
 303  **/
 304 static void
 305 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 306 {
 307         struct prog_id *prg;
 308         uint32_t prog_id_word;
 309         char dist = ' ';
 310         /* character array used for decoding dist type. */
 311         char dist_char[] = "nabx";
 312 
 313         if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
 314                 mempool_free(pmboxq, phba->mbox_mem_pool);
 315                 return;
 316         }
 317 
 318         prg = (struct prog_id *) &prog_id_word;
 319 
 320         /* word 7 contain option rom version */
 321         prog_id_word = pmboxq->u.mb.un.varWords[7];
 322 
 323         /* Decode the Option rom version word to a readable string */
 324         if (prg->dist < 4)
 325                 dist = dist_char[prg->dist];
 326 
 327         if ((prg->dist == 3) && (prg->num == 0))
 328                 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
 329                         prg->ver, prg->rev, prg->lev);
 330         else
 331                 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
 332                         prg->ver, prg->rev, prg->lev,
 333                         dist, prg->num);
 334         mempool_free(pmboxq, phba->mbox_mem_pool);
 335         return;
 336 }
 337 
 338 /**
 339  * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
 340  *      cfg_soft_wwnn, cfg_soft_wwpn
 341  * @vport: pointer to lpfc vport data structure.
 342  *
 343  *
 344  * Return codes
 345  *   None.
 346  **/
 347 void
 348 lpfc_update_vport_wwn(struct lpfc_vport *vport)
 349 {
 350         uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
 351         u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
 352 
 353         /* If the soft name exists then update it using the service params */
 354         if (vport->phba->cfg_soft_wwnn)
 355                 u64_to_wwn(vport->phba->cfg_soft_wwnn,
 356                            vport->fc_sparam.nodeName.u.wwn);
 357         if (vport->phba->cfg_soft_wwpn)
 358                 u64_to_wwn(vport->phba->cfg_soft_wwpn,
 359                            vport->fc_sparam.portName.u.wwn);
 360 
 361         /*
 362          * If the name is empty or there exists a soft name
 363          * then copy the service params name, otherwise use the fc name
 364          */
 365         if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
 366                 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
 367                         sizeof(struct lpfc_name));
 368         else
 369                 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
 370                         sizeof(struct lpfc_name));
 371 
 372         /*
 373          * If the port name has changed, then set the Param changes flag
 374          * to unreg the login
 375          */
 376         if (vport->fc_portname.u.wwn[0] != 0 &&
 377                 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
 378                         sizeof(struct lpfc_name)))
 379                 vport->vport_flag |= FAWWPN_PARAM_CHG;
 380 
 381         if (vport->fc_portname.u.wwn[0] == 0 ||
 382             vport->phba->cfg_soft_wwpn ||
 383             (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
 384             vport->vport_flag & FAWWPN_SET) {
 385                 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
 386                         sizeof(struct lpfc_name));
 387                 vport->vport_flag &= ~FAWWPN_SET;
 388                 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
 389                         vport->vport_flag |= FAWWPN_SET;
 390         }
 391         else
 392                 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
 393                         sizeof(struct lpfc_name));
 394 }
 395 
 396 /**
 397  * lpfc_config_port_post - Perform lpfc initialization after config port
 398  * @phba: pointer to lpfc hba data structure.
 399  *
 400  * This routine will do LPFC initialization after the CONFIG_PORT mailbox
 401  * command call. It performs all internal resource and state setups on the
 402  * port: post IOCB buffers, enable appropriate host interrupt attentions,
 403  * ELS ring timers, etc.
 404  *
 405  * Return codes
 406  *   0 - success.
 407  *   Any other value - error.
 408  **/
 409 int
 410 lpfc_config_port_post(struct lpfc_hba *phba)
 411 {
 412         struct lpfc_vport *vport = phba->pport;
 413         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 414         LPFC_MBOXQ_t *pmb;
 415         MAILBOX_t *mb;
 416         struct lpfc_dmabuf *mp;
 417         struct lpfc_sli *psli = &phba->sli;
 418         uint32_t status, timeout;
 419         int i, j;
 420         int rc;
 421 
 422         spin_lock_irq(&phba->hbalock);
 423         /*
 424          * If the Config port completed correctly the HBA is not
 425          * over heated any more.
 426          */
 427         if (phba->over_temp_state == HBA_OVER_TEMP)
 428                 phba->over_temp_state = HBA_NORMAL_TEMP;
 429         spin_unlock_irq(&phba->hbalock);
 430 
 431         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 432         if (!pmb) {
 433                 phba->link_state = LPFC_HBA_ERROR;
 434                 return -ENOMEM;
 435         }
 436         mb = &pmb->u.mb;
 437 
 438         /* Get login parameters for NID.  */
 439         rc = lpfc_read_sparam(phba, pmb, 0);
 440         if (rc) {
 441                 mempool_free(pmb, phba->mbox_mem_pool);
 442                 return -ENOMEM;
 443         }
 444 
 445         pmb->vport = vport;
 446         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
 447                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 448                                 "0448 Adapter failed init, mbxCmd x%x "
 449                                 "READ_SPARM mbxStatus x%x\n",
 450                                 mb->mbxCommand, mb->mbxStatus);
 451                 phba->link_state = LPFC_HBA_ERROR;
 452                 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 453                 mempool_free(pmb, phba->mbox_mem_pool);
 454                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
 455                 kfree(mp);
 456                 return -EIO;
 457         }
 458 
 459         mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 460 
 461         memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
 462         lpfc_mbuf_free(phba, mp->virt, mp->phys);
 463         kfree(mp);
 464         pmb->ctx_buf = NULL;
 465         lpfc_update_vport_wwn(vport);
 466 
 467         /* Update the fc_host data structures with new wwn. */
 468         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
 469         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
 470         fc_host_max_npiv_vports(shost) = phba->max_vpi;
 471 
 472         /* If no serial number in VPD data, use low 6 bytes of WWNN */
 473         /* This should be consolidated into parse_vpd ? - mr */
 474         if (phba->SerialNumber[0] == 0) {
 475                 uint8_t *outptr;
 476 
 477                 outptr = &vport->fc_nodename.u.s.IEEE[0];
 478                 for (i = 0; i < 12; i++) {
 479                         status = *outptr++;
 480                         j = ((status & 0xf0) >> 4);
 481                         if (j <= 9)
 482                                 phba->SerialNumber[i] =
 483                                     (char)((uint8_t) 0x30 + (uint8_t) j);
 484                         else
 485                                 phba->SerialNumber[i] =
 486                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
 487                         i++;
 488                         j = (status & 0xf);
 489                         if (j <= 9)
 490                                 phba->SerialNumber[i] =
 491                                     (char)((uint8_t) 0x30 + (uint8_t) j);
 492                         else
 493                                 phba->SerialNumber[i] =
 494                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
 495                 }
 496         }
 497 
 498         lpfc_read_config(phba, pmb);
 499         pmb->vport = vport;
 500         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
 501                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 502                                 "0453 Adapter failed to init, mbxCmd x%x "
 503                                 "READ_CONFIG, mbxStatus x%x\n",
 504                                 mb->mbxCommand, mb->mbxStatus);
 505                 phba->link_state = LPFC_HBA_ERROR;
 506                 mempool_free( pmb, phba->mbox_mem_pool);
 507                 return -EIO;
 508         }
 509 
 510         /* Check if the port is disabled */
 511         lpfc_sli_read_link_ste(phba);
 512 
 513         /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
 514         i = (mb->un.varRdConfig.max_xri + 1);
 515         if (phba->cfg_hba_queue_depth > i) {
 516                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
 517                                 "3359 HBA queue depth changed from %d to %d\n",
 518                                 phba->cfg_hba_queue_depth, i);
 519                 phba->cfg_hba_queue_depth = i;
 520         }
 521 
 522         /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3)  */
 523         i = (mb->un.varRdConfig.max_xri >> 3);
 524         if (phba->pport->cfg_lun_queue_depth > i) {
 525                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
 526                                 "3360 LUN queue depth changed from %d to %d\n",
 527                                 phba->pport->cfg_lun_queue_depth, i);
 528                 phba->pport->cfg_lun_queue_depth = i;
 529         }
 530 
 531         phba->lmt = mb->un.varRdConfig.lmt;
 532 
 533         /* Get the default values for Model Name and Description */
 534         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
 535 
 536         phba->link_state = LPFC_LINK_DOWN;
 537 
 538         /* Only process IOCBs on ELS ring till hba_state is READY */
 539         if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
 540                 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
 541         if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
 542                 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
 543 
 544         /* Post receive buffers for desired rings */
 545         if (phba->sli_rev != 3)
 546                 lpfc_post_rcv_buf(phba);
 547 
 548         /*
 549          * Configure HBA MSI-X attention conditions to messages if MSI-X mode
 550          */
 551         if (phba->intr_type == MSIX) {
 552                 rc = lpfc_config_msi(phba, pmb);
 553                 if (rc) {
 554                         mempool_free(pmb, phba->mbox_mem_pool);
 555                         return -EIO;
 556                 }
 557                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 558                 if (rc != MBX_SUCCESS) {
 559                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
 560                                         "0352 Config MSI mailbox command "
 561                                         "failed, mbxCmd x%x, mbxStatus x%x\n",
 562                                         pmb->u.mb.mbxCommand,
 563                                         pmb->u.mb.mbxStatus);
 564                         mempool_free(pmb, phba->mbox_mem_pool);
 565                         return -EIO;
 566                 }
 567         }
 568 
 569         spin_lock_irq(&phba->hbalock);
 570         /* Initialize ERATT handling flag */
 571         phba->hba_flag &= ~HBA_ERATT_HANDLED;
 572 
 573         /* Enable appropriate host interrupts */
 574         if (lpfc_readl(phba->HCregaddr, &status)) {
 575                 spin_unlock_irq(&phba->hbalock);
 576                 return -EIO;
 577         }
 578         status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
 579         if (psli->num_rings > 0)
 580                 status |= HC_R0INT_ENA;
 581         if (psli->num_rings > 1)
 582                 status |= HC_R1INT_ENA;
 583         if (psli->num_rings > 2)
 584                 status |= HC_R2INT_ENA;
 585         if (psli->num_rings > 3)
 586                 status |= HC_R3INT_ENA;
 587 
 588         if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
 589             (phba->cfg_poll & DISABLE_FCP_RING_INT))
 590                 status &= ~(HC_R0INT_ENA);
 591 
 592         writel(status, phba->HCregaddr);
 593         readl(phba->HCregaddr); /* flush */
 594         spin_unlock_irq(&phba->hbalock);
 595 
 596         /* Set up ring-0 (ELS) timer */
 597         timeout = phba->fc_ratov * 2;
 598         mod_timer(&vport->els_tmofunc,
 599                   jiffies + msecs_to_jiffies(1000 * timeout));
 600         /* Set up heart beat (HB) timer */
 601         mod_timer(&phba->hb_tmofunc,
 602                   jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
 603         phba->hb_outstanding = 0;
 604         phba->last_completion_time = jiffies;
 605         /* Set up error attention (ERATT) polling timer */
 606         mod_timer(&phba->eratt_poll,
 607                   jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
 608 
 609         if (phba->hba_flag & LINK_DISABLED) {
 610                 lpfc_printf_log(phba,
 611                         KERN_ERR, LOG_INIT,
 612                         "2598 Adapter Link is disabled.\n");
 613                 lpfc_down_link(phba, pmb);
 614                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 615                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 616                 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
 617                         lpfc_printf_log(phba,
 618                         KERN_ERR, LOG_INIT,
 619                         "2599 Adapter failed to issue DOWN_LINK"
 620                         " mbox command rc 0x%x\n", rc);
 621 
 622                         mempool_free(pmb, phba->mbox_mem_pool);
 623                         return -EIO;
 624                 }
 625         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
 626                 mempool_free(pmb, phba->mbox_mem_pool);
 627                 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
 628                 if (rc)
 629                         return rc;
 630         }
 631         /* MBOX buffer will be freed in mbox compl */
 632         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 633         if (!pmb) {
 634                 phba->link_state = LPFC_HBA_ERROR;
 635                 return -ENOMEM;
 636         }
 637 
 638         lpfc_config_async(phba, pmb, LPFC_ELS_RING);
 639         pmb->mbox_cmpl = lpfc_config_async_cmpl;
 640         pmb->vport = phba->pport;
 641         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 642 
 643         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 644                 lpfc_printf_log(phba,
 645                                 KERN_ERR,
 646                                 LOG_INIT,
 647                                 "0456 Adapter failed to issue "
 648                                 "ASYNCEVT_ENABLE mbox status x%x\n",
 649                                 rc);
 650                 mempool_free(pmb, phba->mbox_mem_pool);
 651         }
 652 
 653         /* Get Option rom version */
 654         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 655         if (!pmb) {
 656                 phba->link_state = LPFC_HBA_ERROR;
 657                 return -ENOMEM;
 658         }
 659 
 660         lpfc_dump_wakeup_param(phba, pmb);
 661         pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
 662         pmb->vport = phba->pport;
 663         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 664 
 665         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 666                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
 667                                 "to get Option ROM version status x%x\n", rc);
 668                 mempool_free(pmb, phba->mbox_mem_pool);
 669         }
 670 
 671         return 0;
 672 }
 673 
 674 /**
 675  * lpfc_hba_init_link - Initialize the FC link
 676  * @phba: pointer to lpfc hba data structure.
 677  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
 678  *
 679  * This routine will issue the INIT_LINK mailbox command call.
 680  * It is available to other drivers through the lpfc_hba data
 681  * structure for use as a delayed link up mechanism with the
 682  * module parameter lpfc_suppress_link_up.
 683  *
 684  * Return code
 685  *              0 - success
 686  *              Any other value - error
 687  **/
 688 static int
 689 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
 690 {
 691         return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
 692 }
 693 
 694 /**
 695  * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
 696  * @phba: pointer to lpfc hba data structure.
 697  * @fc_topology: desired fc topology.
 698  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
 699  *
 700  * This routine will issue the INIT_LINK mailbox command call.
 701  * It is available to other drivers through the lpfc_hba data
 702  * structure for use as a delayed link up mechanism with the
 703  * module parameter lpfc_suppress_link_up.
 704  *
 705  * Return code
 706  *              0 - success
 707  *              Any other value - error
 708  **/
 709 int
 710 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
 711                                uint32_t flag)
 712 {
 713         struct lpfc_vport *vport = phba->pport;
 714         LPFC_MBOXQ_t *pmb;
 715         MAILBOX_t *mb;
 716         int rc;
 717 
 718         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 719         if (!pmb) {
 720                 phba->link_state = LPFC_HBA_ERROR;
 721                 return -ENOMEM;
 722         }
 723         mb = &pmb->u.mb;
 724         pmb->vport = vport;
 725 
 726         if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
 727             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
 728              !(phba->lmt & LMT_1Gb)) ||
 729             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
 730              !(phba->lmt & LMT_2Gb)) ||
 731             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
 732              !(phba->lmt & LMT_4Gb)) ||
 733             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
 734              !(phba->lmt & LMT_8Gb)) ||
 735             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
 736              !(phba->lmt & LMT_10Gb)) ||
 737             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
 738              !(phba->lmt & LMT_16Gb)) ||
 739             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
 740              !(phba->lmt & LMT_32Gb)) ||
 741             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
 742              !(phba->lmt & LMT_64Gb))) {
 743                 /* Reset link speed to auto */
 744                 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
 745                         "1302 Invalid speed for this board:%d "
 746                         "Reset link speed to auto.\n",
 747                         phba->cfg_link_speed);
 748                         phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
 749         }
 750         lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
 751         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 752         if (phba->sli_rev < LPFC_SLI_REV4)
 753                 lpfc_set_loopback_flag(phba);
 754         rc = lpfc_sli_issue_mbox(phba, pmb, flag);
 755         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 756                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 757                         "0498 Adapter failed to init, mbxCmd x%x "
 758                         "INIT_LINK, mbxStatus x%x\n",
 759                         mb->mbxCommand, mb->mbxStatus);
 760                 if (phba->sli_rev <= LPFC_SLI_REV3) {
 761                         /* Clear all interrupt enable conditions */
 762                         writel(0, phba->HCregaddr);
 763                         readl(phba->HCregaddr); /* flush */
 764                         /* Clear all pending interrupts */
 765                         writel(0xffffffff, phba->HAregaddr);
 766                         readl(phba->HAregaddr); /* flush */
 767                 }
 768                 phba->link_state = LPFC_HBA_ERROR;
 769                 if (rc != MBX_BUSY || flag == MBX_POLL)
 770                         mempool_free(pmb, phba->mbox_mem_pool);
 771                 return -EIO;
 772         }
 773         phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
 774         if (flag == MBX_POLL)
 775                 mempool_free(pmb, phba->mbox_mem_pool);
 776 
 777         return 0;
 778 }
 779 
 780 /**
 781  * lpfc_hba_down_link - this routine downs the FC link
 782  * @phba: pointer to lpfc hba data structure.
 783  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
 784  *
 785  * This routine will issue the DOWN_LINK mailbox command call.
 786  * It is available to other drivers through the lpfc_hba data
 787  * structure for use to stop the link.
 788  *
 789  * Return code
 790  *              0 - success
 791  *              Any other value - error
 792  **/
 793 static int
 794 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
 795 {
 796         LPFC_MBOXQ_t *pmb;
 797         int rc;
 798 
 799         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 800         if (!pmb) {
 801                 phba->link_state = LPFC_HBA_ERROR;
 802                 return -ENOMEM;
 803         }
 804 
 805         lpfc_printf_log(phba,
 806                 KERN_ERR, LOG_INIT,
 807                 "0491 Adapter Link is disabled.\n");
 808         lpfc_down_link(phba, pmb);
 809         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 810         rc = lpfc_sli_issue_mbox(phba, pmb, flag);
 811         if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
 812                 lpfc_printf_log(phba,
 813                 KERN_ERR, LOG_INIT,
 814                 "2522 Adapter failed to issue DOWN_LINK"
 815                 " mbox command rc 0x%x\n", rc);
 816 
 817                 mempool_free(pmb, phba->mbox_mem_pool);
 818                 return -EIO;
 819         }
 820         if (flag == MBX_POLL)
 821                 mempool_free(pmb, phba->mbox_mem_pool);
 822 
 823         return 0;
 824 }
 825 
 826 /**
 827  * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
 828  * @phba: pointer to lpfc HBA data structure.
 829  *
 830  * This routine will do LPFC uninitialization before the HBA is reset when
 831  * bringing down the SLI Layer.
 832  *
 833  * Return codes
 834  *   0 - success.
 835  *   Any other value - error.
 836  **/
 837 int
 838 lpfc_hba_down_prep(struct lpfc_hba *phba)
 839 {
 840         struct lpfc_vport **vports;
 841         int i;
 842 
 843         if (phba->sli_rev <= LPFC_SLI_REV3) {
 844                 /* Disable interrupts */
 845                 writel(0, phba->HCregaddr);
 846                 readl(phba->HCregaddr); /* flush */
 847         }
 848 
 849         if (phba->pport->load_flag & FC_UNLOADING)
 850                 lpfc_cleanup_discovery_resources(phba->pport);
 851         else {
 852                 vports = lpfc_create_vport_work_array(phba);
 853                 if (vports != NULL)
 854                         for (i = 0; i <= phba->max_vports &&
 855                                 vports[i] != NULL; i++)
 856                                 lpfc_cleanup_discovery_resources(vports[i]);
 857                 lpfc_destroy_vport_work_array(phba, vports);
 858         }
 859         return 0;
 860 }
 861 
 862 /**
 863  * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
 864  * rspiocb which got deferred
 865  *
 866  * @phba: pointer to lpfc HBA data structure.
 867  *
 868  * This routine will cleanup completed slow path events after HBA is reset
 869  * when bringing down the SLI Layer.
 870  *
 871  *
 872  * Return codes
 873  *   void.
 874  **/
 875 static void
 876 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
 877 {
 878         struct lpfc_iocbq *rspiocbq;
 879         struct hbq_dmabuf *dmabuf;
 880         struct lpfc_cq_event *cq_event;
 881 
 882         spin_lock_irq(&phba->hbalock);
 883         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
 884         spin_unlock_irq(&phba->hbalock);
 885 
 886         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
 887                 /* Get the response iocb from the head of work queue */
 888                 spin_lock_irq(&phba->hbalock);
 889                 list_remove_head(&phba->sli4_hba.sp_queue_event,
 890                                  cq_event, struct lpfc_cq_event, list);
 891                 spin_unlock_irq(&phba->hbalock);
 892 
 893                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
 894                 case CQE_CODE_COMPL_WQE:
 895                         rspiocbq = container_of(cq_event, struct lpfc_iocbq,
 896                                                  cq_event);
 897                         lpfc_sli_release_iocbq(phba, rspiocbq);
 898                         break;
 899                 case CQE_CODE_RECEIVE:
 900                 case CQE_CODE_RECEIVE_V1:
 901                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
 902                                               cq_event);
 903                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
 904                 }
 905         }
 906 }
 907 
 908 /**
 909  * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
 910  * @phba: pointer to lpfc HBA data structure.
 911  *
 912  * This routine will cleanup posted ELS buffers after the HBA is reset
 913  * when bringing down the SLI Layer.
 914  *
 915  *
 916  * Return codes
 917  *   void.
 918  **/
 919 static void
 920 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
 921 {
 922         struct lpfc_sli *psli = &phba->sli;
 923         struct lpfc_sli_ring *pring;
 924         struct lpfc_dmabuf *mp, *next_mp;
 925         LIST_HEAD(buflist);
 926         int count;
 927 
 928         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
 929                 lpfc_sli_hbqbuf_free_all(phba);
 930         else {
 931                 /* Cleanup preposted buffers on the ELS ring */
 932                 pring = &psli->sli3_ring[LPFC_ELS_RING];
 933                 spin_lock_irq(&phba->hbalock);
 934                 list_splice_init(&pring->postbufq, &buflist);
 935                 spin_unlock_irq(&phba->hbalock);
 936 
 937                 count = 0;
 938                 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
 939                         list_del(&mp->list);
 940                         count++;
 941                         lpfc_mbuf_free(phba, mp->virt, mp->phys);
 942                         kfree(mp);
 943                 }
 944 
 945                 spin_lock_irq(&phba->hbalock);
 946                 pring->postbufq_cnt -= count;
 947                 spin_unlock_irq(&phba->hbalock);
 948         }
 949 }
 950 
 951 /**
 952  * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
 953  * @phba: pointer to lpfc HBA data structure.
 954  *
 955  * This routine will cleanup the txcmplq after the HBA is reset when bringing
 956  * down the SLI Layer.
 957  *
 958  * Return codes
 959  *   void
 960  **/
 961 static void
 962 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
 963 {
 964         struct lpfc_sli *psli = &phba->sli;
 965         struct lpfc_queue *qp = NULL;
 966         struct lpfc_sli_ring *pring;
 967         LIST_HEAD(completions);
 968         int i;
 969         struct lpfc_iocbq *piocb, *next_iocb;
 970 
 971         if (phba->sli_rev != LPFC_SLI_REV4) {
 972                 for (i = 0; i < psli->num_rings; i++) {
 973                         pring = &psli->sli3_ring[i];
 974                         spin_lock_irq(&phba->hbalock);
 975                         /* At this point in time the HBA is either reset or DOA
 976                          * Nothing should be on txcmplq as it will
 977                          * NEVER complete.
 978                          */
 979                         list_splice_init(&pring->txcmplq, &completions);
 980                         pring->txcmplq_cnt = 0;
 981                         spin_unlock_irq(&phba->hbalock);
 982 
 983                         lpfc_sli_abort_iocb_ring(phba, pring);
 984                 }
 985                 /* Cancel all the IOCBs from the completions list */
 986                 lpfc_sli_cancel_iocbs(phba, &completions,
 987                                       IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
 988                 return;
 989         }
 990         list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
 991                 pring = qp->pring;
 992                 if (!pring)
 993                         continue;
 994                 spin_lock_irq(&pring->ring_lock);
 995                 list_for_each_entry_safe(piocb, next_iocb,
 996                                          &pring->txcmplq, list)
 997                         piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
 998                 list_splice_init(&pring->txcmplq, &completions);
 999                 pring->txcmplq_cnt = 0;
1000                 spin_unlock_irq(&pring->ring_lock);
1001                 lpfc_sli_abort_iocb_ring(phba, pring);
1002         }
1003         /* Cancel all the IOCBs from the completions list */
1004         lpfc_sli_cancel_iocbs(phba, &completions,
1005                               IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1006 }
1007 
1008 /**
1009  * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1010         int i;
1011  * @phba: pointer to lpfc HBA data structure.
1012  *
1013  * This routine will do uninitialization after the HBA is reset when bring
1014  * down the SLI Layer.
1015  *
1016  * Return codes
1017  *   0 - success.
1018  *   Any other value - error.
1019  **/
1020 static int
1021 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1022 {
1023         lpfc_hba_free_post_buf(phba);
1024         lpfc_hba_clean_txcmplq(phba);
1025         return 0;
1026 }
1027 
1028 /**
1029  * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1030  * @phba: pointer to lpfc HBA data structure.
1031  *
1032  * This routine will do uninitialization after the HBA is reset when bring
1033  * down the SLI Layer.
1034  *
1035  * Return codes
1036  *   0 - success.
1037  *   Any other value - error.
1038  **/
1039 static int
1040 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1041 {
1042         struct lpfc_io_buf *psb, *psb_next;
1043         struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
1044         struct lpfc_sli4_hdw_queue *qp;
1045         LIST_HEAD(aborts);
1046         LIST_HEAD(nvme_aborts);
1047         LIST_HEAD(nvmet_aborts);
1048         struct lpfc_sglq *sglq_entry = NULL;
1049         int cnt, idx;
1050 
1051 
1052         lpfc_sli_hbqbuf_free_all(phba);
1053         lpfc_hba_clean_txcmplq(phba);
1054 
1055         /* At this point in time the HBA is either reset or DOA. Either
1056          * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1057          * on the lpfc_els_sgl_list so that it can either be freed if the
1058          * driver is unloading or reposted if the driver is restarting
1059          * the port.
1060          */
1061         spin_lock_irq(&phba->hbalock);  /* required for lpfc_els_sgl_list and */
1062                                         /* scsl_buf_list */
1063         /* sgl_list_lock required because worker thread uses this
1064          * list.
1065          */
1066         spin_lock(&phba->sli4_hba.sgl_list_lock);
1067         list_for_each_entry(sglq_entry,
1068                 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1069                 sglq_entry->state = SGL_FREED;
1070 
1071         list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1072                         &phba->sli4_hba.lpfc_els_sgl_list);
1073 
1074 
1075         spin_unlock(&phba->sli4_hba.sgl_list_lock);
1076 
1077         /* abts_xxxx_buf_list_lock required because worker thread uses this
1078          * list.
1079          */
1080         cnt = 0;
1081         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1082                 qp = &phba->sli4_hba.hdwq[idx];
1083 
1084                 spin_lock(&qp->abts_io_buf_list_lock);
1085                 list_splice_init(&qp->lpfc_abts_io_buf_list,
1086                                  &aborts);
1087 
1088                 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1089                         psb->pCmd = NULL;
1090                         psb->status = IOSTAT_SUCCESS;
1091                         cnt++;
1092                 }
1093                 spin_lock(&qp->io_buf_list_put_lock);
1094                 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1095                 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1096                 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1097                 qp->abts_scsi_io_bufs = 0;
1098                 qp->abts_nvme_io_bufs = 0;
1099                 spin_unlock(&qp->io_buf_list_put_lock);
1100                 spin_unlock(&qp->abts_io_buf_list_lock);
1101         }
1102         spin_unlock_irq(&phba->hbalock);
1103 
1104         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1105                 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1106                 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1107                                  &nvmet_aborts);
1108                 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1109                 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1110                         ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
1111                         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1112                 }
1113         }
1114 
1115         lpfc_sli4_free_sp_events(phba);
1116         return cnt;
1117 }
1118 
1119 /**
1120  * lpfc_hba_down_post - Wrapper func for hba down post routine
1121  * @phba: pointer to lpfc HBA data structure.
1122  *
1123  * This routine wraps the actual SLI3 or SLI4 routine for performing
1124  * uninitialization after the HBA is reset when bring down the SLI Layer.
1125  *
1126  * Return codes
1127  *   0 - success.
1128  *   Any other value - error.
1129  **/
1130 int
1131 lpfc_hba_down_post(struct lpfc_hba *phba)
1132 {
1133         return (*phba->lpfc_hba_down_post)(phba);
1134 }
1135 
1136 /**
1137  * lpfc_hb_timeout - The HBA-timer timeout handler
1138  * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1139  *
1140  * This is the HBA-timer timeout handler registered to the lpfc driver. When
1141  * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1142  * work-port-events bitmap and the worker thread is notified. This timeout
1143  * event will be used by the worker thread to invoke the actual timeout
1144  * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1145  * be performed in the timeout handler and the HBA timeout event bit shall
1146  * be cleared by the worker thread after it has taken the event bitmap out.
1147  **/
1148 static void
1149 lpfc_hb_timeout(struct timer_list *t)
1150 {
1151         struct lpfc_hba *phba;
1152         uint32_t tmo_posted;
1153         unsigned long iflag;
1154 
1155         phba = from_timer(phba, t, hb_tmofunc);
1156 
1157         /* Check for heart beat timeout conditions */
1158         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1159         tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1160         if (!tmo_posted)
1161                 phba->pport->work_port_events |= WORKER_HB_TMO;
1162         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1163 
1164         /* Tell the worker thread there is work to do */
1165         if (!tmo_posted)
1166                 lpfc_worker_wake_up(phba);
1167         return;
1168 }
1169 
1170 /**
1171  * lpfc_rrq_timeout - The RRQ-timer timeout handler
1172  * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1173  *
1174  * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1175  * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1176  * work-port-events bitmap and the worker thread is notified. This timeout
1177  * event will be used by the worker thread to invoke the actual timeout
1178  * handler routine, lpfc_rrq_handler. Any periodical operations will
1179  * be performed in the timeout handler and the RRQ timeout event bit shall
1180  * be cleared by the worker thread after it has taken the event bitmap out.
1181  **/
1182 static void
1183 lpfc_rrq_timeout(struct timer_list *t)
1184 {
1185         struct lpfc_hba *phba;
1186         unsigned long iflag;
1187 
1188         phba = from_timer(phba, t, rrq_tmr);
1189         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1190         if (!(phba->pport->load_flag & FC_UNLOADING))
1191                 phba->hba_flag |= HBA_RRQ_ACTIVE;
1192         else
1193                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1194         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1195 
1196         if (!(phba->pport->load_flag & FC_UNLOADING))
1197                 lpfc_worker_wake_up(phba);
1198 }
1199 
1200 /**
1201  * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1202  * @phba: pointer to lpfc hba data structure.
1203  * @pmboxq: pointer to the driver internal queue element for mailbox command.
1204  *
1205  * This is the callback function to the lpfc heart-beat mailbox command.
1206  * If configured, the lpfc driver issues the heart-beat mailbox command to
1207  * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1208  * heart-beat mailbox command is issued, the driver shall set up heart-beat
1209  * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1210  * heart-beat outstanding state. Once the mailbox command comes back and
1211  * no error conditions detected, the heart-beat mailbox command timer is
1212  * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1213  * state is cleared for the next heart-beat. If the timer expired with the
1214  * heart-beat outstanding state set, the driver will put the HBA offline.
1215  **/
1216 static void
1217 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1218 {
1219         unsigned long drvr_flag;
1220 
1221         spin_lock_irqsave(&phba->hbalock, drvr_flag);
1222         phba->hb_outstanding = 0;
1223         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1224 
1225         /* Check and reset heart-beat timer is necessary */
1226         mempool_free(pmboxq, phba->mbox_mem_pool);
1227         if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1228                 !(phba->link_state == LPFC_HBA_ERROR) &&
1229                 !(phba->pport->load_flag & FC_UNLOADING))
1230                 mod_timer(&phba->hb_tmofunc,
1231                           jiffies +
1232                           msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1233         return;
1234 }
1235 
1236 static void
1237 lpfc_hb_eq_delay_work(struct work_struct *work)
1238 {
1239         struct lpfc_hba *phba = container_of(to_delayed_work(work),
1240                                              struct lpfc_hba, eq_delay_work);
1241         struct lpfc_eq_intr_info *eqi, *eqi_new;
1242         struct lpfc_queue *eq, *eq_next;
1243         unsigned char *eqcnt = NULL;
1244         uint32_t usdelay;
1245         int i;
1246         bool update = false;
1247 
1248         if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1249                 return;
1250 
1251         if (phba->link_state == LPFC_HBA_ERROR ||
1252             phba->pport->fc_flag & FC_OFFLINE_MODE)
1253                 goto requeue;
1254 
1255         eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char),
1256                         GFP_KERNEL);
1257         if (!eqcnt)
1258                 goto requeue;
1259 
1260         if (phba->cfg_irq_chann > 1) {
1261                 /* Loop thru all IRQ vectors */
1262                 for (i = 0; i < phba->cfg_irq_chann; i++) {
1263                         /* Get the EQ corresponding to the IRQ vector */
1264                         eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1265                         if (!eq)
1266                                 continue;
1267                         if (eq->q_mode) {
1268                                 update = true;
1269                                 break;
1270                         }
1271                         if (eqcnt[eq->last_cpu] < 2)
1272                                 eqcnt[eq->last_cpu]++;
1273                 }
1274         } else
1275                 update = true;
1276 
1277         for_each_present_cpu(i) {
1278                 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1279                 if (!update && eqcnt[i] < 2) {
1280                         eqi->icnt = 0;
1281                         continue;
1282                 }
1283 
1284                 usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
1285                            LPFC_EQ_DELAY_STEP;
1286                 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1287                         usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1288 
1289                 eqi->icnt = 0;
1290 
1291                 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1292                         if (eq->last_cpu != i) {
1293                                 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1294                                                       eq->last_cpu);
1295                                 list_move_tail(&eq->cpu_list, &eqi_new->list);
1296                                 continue;
1297                         }
1298                         if (usdelay != eq->q_mode)
1299                                 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1300                                                          usdelay);
1301                 }
1302         }
1303 
1304         kfree(eqcnt);
1305 
1306 requeue:
1307         queue_delayed_work(phba->wq, &phba->eq_delay_work,
1308                            msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1309 }
1310 
1311 /**
1312  * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1313  * @phba: pointer to lpfc hba data structure.
1314  *
1315  * For each heartbeat, this routine does some heuristic methods to adjust
1316  * XRI distribution. The goal is to fully utilize free XRIs.
1317  **/
1318 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1319 {
1320         u32 i;
1321         u32 hwq_count;
1322 
1323         hwq_count = phba->cfg_hdw_queue;
1324         for (i = 0; i < hwq_count; i++) {
1325                 /* Adjust XRIs in private pool */
1326                 lpfc_adjust_pvt_pool_count(phba, i);
1327 
1328                 /* Adjust high watermark */
1329                 lpfc_adjust_high_watermark(phba, i);
1330 
1331 #ifdef LPFC_MXP_STAT
1332                 /* Snapshot pbl, pvt and busy count */
1333                 lpfc_snapshot_mxp(phba, i);
1334 #endif
1335         }
1336 }
1337 
1338 /**
1339  * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1340  * @phba: pointer to lpfc hba data structure.
1341  *
1342  * This is the actual HBA-timer timeout handler to be invoked by the worker
1343  * thread whenever the HBA timer fired and HBA-timeout event posted. This
1344  * handler performs any periodic operations needed for the device. If such
1345  * periodic event has already been attended to either in the interrupt handler
1346  * or by processing slow-ring or fast-ring events within the HBA-timer
1347  * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1348  * the timer for the next timeout period. If lpfc heart-beat mailbox command
1349  * is configured and there is no heart-beat mailbox command outstanding, a
1350  * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1351  * has been a heart-beat mailbox command outstanding, the HBA shall be put
1352  * to offline.
1353  **/
1354 void
1355 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1356 {
1357         struct lpfc_vport **vports;
1358         LPFC_MBOXQ_t *pmboxq;
1359         struct lpfc_dmabuf *buf_ptr;
1360         int retval, i;
1361         struct lpfc_sli *psli = &phba->sli;
1362         LIST_HEAD(completions);
1363 
1364         if (phba->cfg_xri_rebalancing) {
1365                 /* Multi-XRI pools handler */
1366                 lpfc_hb_mxp_handler(phba);
1367         }
1368 
1369         vports = lpfc_create_vport_work_array(phba);
1370         if (vports != NULL)
1371                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1372                         lpfc_rcv_seq_check_edtov(vports[i]);
1373                         lpfc_fdmi_change_check(vports[i]);
1374                 }
1375         lpfc_destroy_vport_work_array(phba, vports);
1376 
1377         if ((phba->link_state == LPFC_HBA_ERROR) ||
1378                 (phba->pport->load_flag & FC_UNLOADING) ||
1379                 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1380                 return;
1381 
1382         spin_lock_irq(&phba->pport->work_port_lock);
1383 
1384         if (time_after(phba->last_completion_time +
1385                         msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1386                         jiffies)) {
1387                 spin_unlock_irq(&phba->pport->work_port_lock);
1388                 if (!phba->hb_outstanding)
1389                         mod_timer(&phba->hb_tmofunc,
1390                                 jiffies +
1391                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1392                 else
1393                         mod_timer(&phba->hb_tmofunc,
1394                                 jiffies +
1395                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1396                 return;
1397         }
1398         spin_unlock_irq(&phba->pport->work_port_lock);
1399 
1400         if (phba->elsbuf_cnt &&
1401                 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1402                 spin_lock_irq(&phba->hbalock);
1403                 list_splice_init(&phba->elsbuf, &completions);
1404                 phba->elsbuf_cnt = 0;
1405                 phba->elsbuf_prev_cnt = 0;
1406                 spin_unlock_irq(&phba->hbalock);
1407 
1408                 while (!list_empty(&completions)) {
1409                         list_remove_head(&completions, buf_ptr,
1410                                 struct lpfc_dmabuf, list);
1411                         lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1412                         kfree(buf_ptr);
1413                 }
1414         }
1415         phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1416 
1417         /* If there is no heart beat outstanding, issue a heartbeat command */
1418         if (phba->cfg_enable_hba_heartbeat) {
1419                 if (!phba->hb_outstanding) {
1420                         if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1421                                 (list_empty(&psli->mboxq))) {
1422                                 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1423                                                         GFP_KERNEL);
1424                                 if (!pmboxq) {
1425                                         mod_timer(&phba->hb_tmofunc,
1426                                                  jiffies +
1427                                                  msecs_to_jiffies(1000 *
1428                                                  LPFC_HB_MBOX_INTERVAL));
1429                                         return;
1430                                 }
1431 
1432                                 lpfc_heart_beat(phba, pmboxq);
1433                                 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1434                                 pmboxq->vport = phba->pport;
1435                                 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1436                                                 MBX_NOWAIT);
1437 
1438                                 if (retval != MBX_BUSY &&
1439                                         retval != MBX_SUCCESS) {
1440                                         mempool_free(pmboxq,
1441                                                         phba->mbox_mem_pool);
1442                                         mod_timer(&phba->hb_tmofunc,
1443                                                 jiffies +
1444                                                 msecs_to_jiffies(1000 *
1445                                                 LPFC_HB_MBOX_INTERVAL));
1446                                         return;
1447                                 }
1448                                 phba->skipped_hb = 0;
1449                                 phba->hb_outstanding = 1;
1450                         } else if (time_before_eq(phba->last_completion_time,
1451                                         phba->skipped_hb)) {
1452                                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1453                                         "2857 Last completion time not "
1454                                         " updated in %d ms\n",
1455                                         jiffies_to_msecs(jiffies
1456                                                  - phba->last_completion_time));
1457                         } else
1458                                 phba->skipped_hb = jiffies;
1459 
1460                         mod_timer(&phba->hb_tmofunc,
1461                                  jiffies +
1462                                  msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1463                         return;
1464                 } else {
1465                         /*
1466                         * If heart beat timeout called with hb_outstanding set
1467                         * we need to give the hb mailbox cmd a chance to
1468                         * complete or TMO.
1469                         */
1470                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1471                                         "0459 Adapter heartbeat still out"
1472                                         "standing:last compl time was %d ms.\n",
1473                                         jiffies_to_msecs(jiffies
1474                                                  - phba->last_completion_time));
1475                         mod_timer(&phba->hb_tmofunc,
1476                                 jiffies +
1477                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1478                 }
1479         } else {
1480                         mod_timer(&phba->hb_tmofunc,
1481                                 jiffies +
1482                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1483         }
1484 }
1485 
1486 /**
1487  * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1488  * @phba: pointer to lpfc hba data structure.
1489  *
1490  * This routine is called to bring the HBA offline when HBA hardware error
1491  * other than Port Error 6 has been detected.
1492  **/
1493 static void
1494 lpfc_offline_eratt(struct lpfc_hba *phba)
1495 {
1496         struct lpfc_sli   *psli = &phba->sli;
1497 
1498         spin_lock_irq(&phba->hbalock);
1499         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1500         spin_unlock_irq(&phba->hbalock);
1501         lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1502 
1503         lpfc_offline(phba);
1504         lpfc_reset_barrier(phba);
1505         spin_lock_irq(&phba->hbalock);
1506         lpfc_sli_brdreset(phba);
1507         spin_unlock_irq(&phba->hbalock);
1508         lpfc_hba_down_post(phba);
1509         lpfc_sli_brdready(phba, HS_MBRDY);
1510         lpfc_unblock_mgmt_io(phba);
1511         phba->link_state = LPFC_HBA_ERROR;
1512         return;
1513 }
1514 
1515 /**
1516  * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1517  * @phba: pointer to lpfc hba data structure.
1518  *
1519  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1520  * other than Port Error 6 has been detected.
1521  **/
1522 void
1523 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1524 {
1525         spin_lock_irq(&phba->hbalock);
1526         phba->link_state = LPFC_HBA_ERROR;
1527         spin_unlock_irq(&phba->hbalock);
1528 
1529         lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1530         lpfc_sli_flush_io_rings(phba);
1531         lpfc_offline(phba);
1532         lpfc_hba_down_post(phba);
1533         lpfc_unblock_mgmt_io(phba);
1534 }
1535 
1536 /**
1537  * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1538  * @phba: pointer to lpfc hba data structure.
1539  *
1540  * This routine is invoked to handle the deferred HBA hardware error
1541  * conditions. This type of error is indicated by HBA by setting ER1
1542  * and another ER bit in the host status register. The driver will
1543  * wait until the ER1 bit clears before handling the error condition.
1544  **/
1545 static void
1546 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1547 {
1548         uint32_t old_host_status = phba->work_hs;
1549         struct lpfc_sli *psli = &phba->sli;
1550 
1551         /* If the pci channel is offline, ignore possible errors,
1552          * since we cannot communicate with the pci card anyway.
1553          */
1554         if (pci_channel_offline(phba->pcidev)) {
1555                 spin_lock_irq(&phba->hbalock);
1556                 phba->hba_flag &= ~DEFER_ERATT;
1557                 spin_unlock_irq(&phba->hbalock);
1558                 return;
1559         }
1560 
1561         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1562                 "0479 Deferred Adapter Hardware Error "
1563                 "Data: x%x x%x x%x\n",
1564                 phba->work_hs,
1565                 phba->work_status[0], phba->work_status[1]);
1566 
1567         spin_lock_irq(&phba->hbalock);
1568         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1569         spin_unlock_irq(&phba->hbalock);
1570 
1571 
1572         /*
1573          * Firmware stops when it triggred erratt. That could cause the I/Os
1574          * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1575          * SCSI layer retry it after re-establishing link.
1576          */
1577         lpfc_sli_abort_fcp_rings(phba);
1578 
1579         /*
1580          * There was a firmware error. Take the hba offline and then
1581          * attempt to restart it.
1582          */
1583         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1584         lpfc_offline(phba);
1585 
1586         /* Wait for the ER1 bit to clear.*/
1587         while (phba->work_hs & HS_FFER1) {
1588                 msleep(100);
1589                 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1590                         phba->work_hs = UNPLUG_ERR ;
1591                         break;
1592                 }
1593                 /* If driver is unloading let the worker thread continue */
1594                 if (phba->pport->load_flag & FC_UNLOADING) {
1595                         phba->work_hs = 0;
1596                         break;
1597                 }
1598         }
1599 
1600         /*
1601          * This is to ptrotect against a race condition in which
1602          * first write to the host attention register clear the
1603          * host status register.
1604          */
1605         if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1606                 phba->work_hs = old_host_status & ~HS_FFER1;
1607 
1608         spin_lock_irq(&phba->hbalock);
1609         phba->hba_flag &= ~DEFER_ERATT;
1610         spin_unlock_irq(&phba->hbalock);
1611         phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1612         phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1613 }
1614 
1615 static void
1616 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1617 {
1618         struct lpfc_board_event_header board_event;
1619         struct Scsi_Host *shost;
1620 
1621         board_event.event_type = FC_REG_BOARD_EVENT;
1622         board_event.subcategory = LPFC_EVENT_PORTINTERR;
1623         shost = lpfc_shost_from_vport(phba->pport);
1624         fc_host_post_vendor_event(shost, fc_get_event_number(),
1625                                   sizeof(board_event),
1626                                   (char *) &board_event,
1627                                   LPFC_NL_VENDOR_ID);
1628 }
1629 
1630 /**
1631  * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1632  * @phba: pointer to lpfc hba data structure.
1633  *
1634  * This routine is invoked to handle the following HBA hardware error
1635  * conditions:
1636  * 1 - HBA error attention interrupt
1637  * 2 - DMA ring index out of range
1638  * 3 - Mailbox command came back as unknown
1639  **/
1640 static void
1641 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1642 {
1643         struct lpfc_vport *vport = phba->pport;
1644         struct lpfc_sli   *psli = &phba->sli;
1645         uint32_t event_data;
1646         unsigned long temperature;
1647         struct temp_event temp_event_data;
1648         struct Scsi_Host  *shost;
1649 
1650         /* If the pci channel is offline, ignore possible errors,
1651          * since we cannot communicate with the pci card anyway.
1652          */
1653         if (pci_channel_offline(phba->pcidev)) {
1654                 spin_lock_irq(&phba->hbalock);
1655                 phba->hba_flag &= ~DEFER_ERATT;
1656                 spin_unlock_irq(&phba->hbalock);
1657                 return;
1658         }
1659 
1660         /* If resets are disabled then leave the HBA alone and return */
1661         if (!phba->cfg_enable_hba_reset)
1662                 return;
1663 
1664         /* Send an internal error event to mgmt application */
1665         lpfc_board_errevt_to_mgmt(phba);
1666 
1667         if (phba->hba_flag & DEFER_ERATT)
1668                 lpfc_handle_deferred_eratt(phba);
1669 
1670         if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1671                 if (phba->work_hs & HS_FFER6)
1672                         /* Re-establishing Link */
1673                         lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1674                                         "1301 Re-establishing Link "
1675                                         "Data: x%x x%x x%x\n",
1676                                         phba->work_hs, phba->work_status[0],
1677                                         phba->work_status[1]);
1678                 if (phba->work_hs & HS_FFER8)
1679                         /* Device Zeroization */
1680                         lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1681                                         "2861 Host Authentication device "
1682                                         "zeroization Data:x%x x%x x%x\n",
1683                                         phba->work_hs, phba->work_status[0],
1684                                         phba->work_status[1]);
1685 
1686                 spin_lock_irq(&phba->hbalock);
1687                 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1688                 spin_unlock_irq(&phba->hbalock);
1689 
1690                 /*
1691                 * Firmware stops when it triggled erratt with HS_FFER6.
1692                 * That could cause the I/Os dropped by the firmware.
1693                 * Error iocb (I/O) on txcmplq and let the SCSI layer
1694                 * retry it after re-establishing link.
1695                 */
1696                 lpfc_sli_abort_fcp_rings(phba);
1697 
1698                 /*
1699                  * There was a firmware error.  Take the hba offline and then
1700                  * attempt to restart it.
1701                  */
1702                 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1703                 lpfc_offline(phba);
1704                 lpfc_sli_brdrestart(phba);
1705                 if (lpfc_online(phba) == 0) {   /* Initialize the HBA */
1706                         lpfc_unblock_mgmt_io(phba);
1707                         return;
1708                 }
1709                 lpfc_unblock_mgmt_io(phba);
1710         } else if (phba->work_hs & HS_CRIT_TEMP) {
1711                 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1712                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1713                 temp_event_data.event_code = LPFC_CRIT_TEMP;
1714                 temp_event_data.data = (uint32_t)temperature;
1715 
1716                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1717                                 "0406 Adapter maximum temperature exceeded "
1718                                 "(%ld), taking this port offline "
1719                                 "Data: x%x x%x x%x\n",
1720                                 temperature, phba->work_hs,
1721                                 phba->work_status[0], phba->work_status[1]);
1722 
1723                 shost = lpfc_shost_from_vport(phba->pport);
1724                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1725                                           sizeof(temp_event_data),
1726                                           (char *) &temp_event_data,
1727                                           SCSI_NL_VID_TYPE_PCI
1728                                           | PCI_VENDOR_ID_EMULEX);
1729 
1730                 spin_lock_irq(&phba->hbalock);
1731                 phba->over_temp_state = HBA_OVER_TEMP;
1732                 spin_unlock_irq(&phba->hbalock);
1733                 lpfc_offline_eratt(phba);
1734 
1735         } else {
1736                 /* The if clause above forces this code path when the status
1737                  * failure is a value other than FFER6. Do not call the offline
1738                  * twice. This is the adapter hardware error path.
1739                  */
1740                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1741                                 "0457 Adapter Hardware Error "
1742                                 "Data: x%x x%x x%x\n",
1743                                 phba->work_hs,
1744                                 phba->work_status[0], phba->work_status[1]);
1745 
1746                 event_data = FC_REG_DUMP_EVENT;
1747                 shost = lpfc_shost_from_vport(vport);
1748                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1749                                 sizeof(event_data), (char *) &event_data,
1750                                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1751 
1752                 lpfc_offline_eratt(phba);
1753         }
1754         return;
1755 }
1756 
1757 /**
1758  * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1759  * @phba: pointer to lpfc hba data structure.
1760  * @mbx_action: flag for mailbox shutdown action.
1761  *
1762  * This routine is invoked to perform an SLI4 port PCI function reset in
1763  * response to port status register polling attention. It waits for port
1764  * status register (ERR, RDY, RN) bits before proceeding with function reset.
1765  * During this process, interrupt vectors are freed and later requested
1766  * for handling possible port resource change.
1767  **/
1768 static int
1769 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1770                             bool en_rn_msg)
1771 {
1772         int rc;
1773         uint32_t intr_mode;
1774 
1775         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1776             LPFC_SLI_INTF_IF_TYPE_2) {
1777                 /*
1778                  * On error status condition, driver need to wait for port
1779                  * ready before performing reset.
1780                  */
1781                 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1782                 if (rc)
1783                         return rc;
1784         }
1785 
1786         /* need reset: attempt for port recovery */
1787         if (en_rn_msg)
1788                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1789                                 "2887 Reset Needed: Attempting Port "
1790                                 "Recovery...\n");
1791         lpfc_offline_prep(phba, mbx_action);
1792         lpfc_sli_flush_io_rings(phba);
1793         lpfc_offline(phba);
1794         /* release interrupt for possible resource change */
1795         lpfc_sli4_disable_intr(phba);
1796         rc = lpfc_sli_brdrestart(phba);
1797         if (rc) {
1798                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1799                                 "6309 Failed to restart board\n");
1800                 return rc;
1801         }
1802         /* request and enable interrupt */
1803         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1804         if (intr_mode == LPFC_INTR_ERROR) {
1805                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1806                                 "3175 Failed to enable interrupt\n");
1807                 return -EIO;
1808         }
1809         phba->intr_mode = intr_mode;
1810         rc = lpfc_online(phba);
1811         if (rc == 0)
1812                 lpfc_unblock_mgmt_io(phba);
1813 
1814         return rc;
1815 }
1816 
1817 /**
1818  * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1819  * @phba: pointer to lpfc hba data structure.
1820  *
1821  * This routine is invoked to handle the SLI4 HBA hardware error attention
1822  * conditions.
1823  **/
1824 static void
1825 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1826 {
1827         struct lpfc_vport *vport = phba->pport;
1828         uint32_t event_data;
1829         struct Scsi_Host *shost;
1830         uint32_t if_type;
1831         struct lpfc_register portstat_reg = {0};
1832         uint32_t reg_err1, reg_err2;
1833         uint32_t uerrlo_reg, uemasklo_reg;
1834         uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1835         bool en_rn_msg = true;
1836         struct temp_event temp_event_data;
1837         struct lpfc_register portsmphr_reg;
1838         int rc, i;
1839 
1840         /* If the pci channel is offline, ignore possible errors, since
1841          * we cannot communicate with the pci card anyway.
1842          */
1843         if (pci_channel_offline(phba->pcidev)) {
1844                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1845                                 "3166 pci channel is offline\n");
1846                 lpfc_sli4_offline_eratt(phba);
1847                 return;
1848         }
1849 
1850         memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1851         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1852         switch (if_type) {
1853         case LPFC_SLI_INTF_IF_TYPE_0:
1854                 pci_rd_rc1 = lpfc_readl(
1855                                 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1856                                 &uerrlo_reg);
1857                 pci_rd_rc2 = lpfc_readl(
1858                                 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1859                                 &uemasklo_reg);
1860                 /* consider PCI bus read error as pci_channel_offline */
1861                 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1862                         return;
1863                 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1864                         lpfc_sli4_offline_eratt(phba);
1865                         return;
1866                 }
1867                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1868                                 "7623 Checking UE recoverable");
1869 
1870                 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1871                         if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1872                                        &portsmphr_reg.word0))
1873                                 continue;
1874 
1875                         smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1876                                                    &portsmphr_reg);
1877                         if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1878                             LPFC_PORT_SEM_UE_RECOVERABLE)
1879                                 break;
1880                         /*Sleep for 1Sec, before checking SEMAPHORE */
1881                         msleep(1000);
1882                 }
1883 
1884                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1885                                 "4827 smphr_port_status x%x : Waited %dSec",
1886                                 smphr_port_status, i);
1887 
1888                 /* Recoverable UE, reset the HBA device */
1889                 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1890                     LPFC_PORT_SEM_UE_RECOVERABLE) {
1891                         for (i = 0; i < 20; i++) {
1892                                 msleep(1000);
1893                                 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1894                                     &portsmphr_reg.word0) &&
1895                                     (LPFC_POST_STAGE_PORT_READY ==
1896                                      bf_get(lpfc_port_smphr_port_status,
1897                                      &portsmphr_reg))) {
1898                                         rc = lpfc_sli4_port_sta_fn_reset(phba,
1899                                                 LPFC_MBX_NO_WAIT, en_rn_msg);
1900                                         if (rc == 0)
1901                                                 return;
1902                                         lpfc_printf_log(phba,
1903                                                 KERN_ERR, LOG_INIT,
1904                                                 "4215 Failed to recover UE");
1905                                         break;
1906                                 }
1907                         }
1908                 }
1909                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1910                                 "7624 Firmware not ready: Failing UE recovery,"
1911                                 " waited %dSec", i);
1912                 phba->link_state = LPFC_HBA_ERROR;
1913                 break;
1914 
1915         case LPFC_SLI_INTF_IF_TYPE_2:
1916         case LPFC_SLI_INTF_IF_TYPE_6:
1917                 pci_rd_rc1 = lpfc_readl(
1918                                 phba->sli4_hba.u.if_type2.STATUSregaddr,
1919                                 &portstat_reg.word0);
1920                 /* consider PCI bus read error as pci_channel_offline */
1921                 if (pci_rd_rc1 == -EIO) {
1922                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1923                                 "3151 PCI bus read access failure: x%x\n",
1924                                 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1925                         lpfc_sli4_offline_eratt(phba);
1926                         return;
1927                 }
1928                 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1929                 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1930                 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1931                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1932                                 "2889 Port Overtemperature event, "
1933                                 "taking port offline Data: x%x x%x\n",
1934                                 reg_err1, reg_err2);
1935 
1936                         phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
1937                         temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1938                         temp_event_data.event_code = LPFC_CRIT_TEMP;
1939                         temp_event_data.data = 0xFFFFFFFF;
1940 
1941                         shost = lpfc_shost_from_vport(phba->pport);
1942                         fc_host_post_vendor_event(shost, fc_get_event_number(),
1943                                                   sizeof(temp_event_data),
1944                                                   (char *)&temp_event_data,
1945                                                   SCSI_NL_VID_TYPE_PCI
1946                                                   | PCI_VENDOR_ID_EMULEX);
1947 
1948                         spin_lock_irq(&phba->hbalock);
1949                         phba->over_temp_state = HBA_OVER_TEMP;
1950                         spin_unlock_irq(&phba->hbalock);
1951                         lpfc_sli4_offline_eratt(phba);
1952                         return;
1953                 }
1954                 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1955                     reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
1956                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1957                                         "3143 Port Down: Firmware Update "
1958                                         "Detected\n");
1959                         en_rn_msg = false;
1960                 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1961                          reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1962                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1963                                         "3144 Port Down: Debug Dump\n");
1964                 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1965                          reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1966                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1967                                         "3145 Port Down: Provisioning\n");
1968 
1969                 /* If resets are disabled then leave the HBA alone and return */
1970                 if (!phba->cfg_enable_hba_reset)
1971                         return;
1972 
1973                 /* Check port status register for function reset */
1974                 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1975                                 en_rn_msg);
1976                 if (rc == 0) {
1977                         /* don't report event on forced debug dump */
1978                         if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1979                             reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1980                                 return;
1981                         else
1982                                 break;
1983                 }
1984                 /* fall through for not able to recover */
1985                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1986                                 "3152 Unrecoverable error\n");
1987                 phba->link_state = LPFC_HBA_ERROR;
1988                 break;
1989         case LPFC_SLI_INTF_IF_TYPE_1:
1990         default:
1991                 break;
1992         }
1993         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1994                         "3123 Report dump event to upper layer\n");
1995         /* Send an internal error event to mgmt application */
1996         lpfc_board_errevt_to_mgmt(phba);
1997 
1998         event_data = FC_REG_DUMP_EVENT;
1999         shost = lpfc_shost_from_vport(vport);
2000         fc_host_post_vendor_event(shost, fc_get_event_number(),
2001                                   sizeof(event_data), (char *) &event_data,
2002                                   SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2003 }
2004 
2005 /**
2006  * lpfc_handle_eratt - Wrapper func for handling hba error attention
2007  * @phba: pointer to lpfc HBA data structure.
2008  *
2009  * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2010  * routine from the API jump table function pointer from the lpfc_hba struct.
2011  *
2012  * Return codes
2013  *   0 - success.
2014  *   Any other value - error.
2015  **/
2016 void
2017 lpfc_handle_eratt(struct lpfc_hba *phba)
2018 {
2019         (*phba->lpfc_handle_eratt)(phba);
2020 }
2021 
2022 /**
2023  * lpfc_handle_latt - The HBA link event handler
2024  * @phba: pointer to lpfc hba data structure.
2025  *
2026  * This routine is invoked from the worker thread to handle a HBA host
2027  * attention link event. SLI3 only.
2028  **/
2029 void
2030 lpfc_handle_latt(struct lpfc_hba *phba)
2031 {
2032         struct lpfc_vport *vport = phba->pport;
2033         struct lpfc_sli   *psli = &phba->sli;
2034         LPFC_MBOXQ_t *pmb;
2035         volatile uint32_t control;
2036         struct lpfc_dmabuf *mp;
2037         int rc = 0;
2038 
2039         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2040         if (!pmb) {
2041                 rc = 1;
2042                 goto lpfc_handle_latt_err_exit;
2043         }
2044 
2045         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2046         if (!mp) {
2047                 rc = 2;
2048                 goto lpfc_handle_latt_free_pmb;
2049         }
2050 
2051         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2052         if (!mp->virt) {
2053                 rc = 3;
2054                 goto lpfc_handle_latt_free_mp;
2055         }
2056 
2057         /* Cleanup any outstanding ELS commands */
2058         lpfc_els_flush_all_cmd(phba);
2059 
2060         psli->slistat.link_event++;
2061         lpfc_read_topology(phba, pmb, mp);
2062         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2063         pmb->vport = vport;
2064         /* Block ELS IOCBs until we have processed this mbox command */
2065         phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2066         rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2067         if (rc == MBX_NOT_FINISHED) {
2068                 rc = 4;
2069                 goto lpfc_handle_latt_free_mbuf;
2070         }
2071 
2072         /* Clear Link Attention in HA REG */
2073         spin_lock_irq(&phba->hbalock);
2074         writel(HA_LATT, phba->HAregaddr);
2075         readl(phba->HAregaddr); /* flush */
2076         spin_unlock_irq(&phba->hbalock);
2077 
2078         return;
2079 
2080 lpfc_handle_latt_free_mbuf:
2081         phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2082         lpfc_mbuf_free(phba, mp->virt, mp->phys);
2083 lpfc_handle_latt_free_mp:
2084         kfree(mp);
2085 lpfc_handle_latt_free_pmb:
2086         mempool_free(pmb, phba->mbox_mem_pool);
2087 lpfc_handle_latt_err_exit:
2088         /* Enable Link attention interrupts */
2089         spin_lock_irq(&phba->hbalock);
2090         psli->sli_flag |= LPFC_PROCESS_LA;
2091         control = readl(phba->HCregaddr);
2092         control |= HC_LAINT_ENA;
2093         writel(control, phba->HCregaddr);
2094         readl(phba->HCregaddr); /* flush */
2095 
2096         /* Clear Link Attention in HA REG */
2097         writel(HA_LATT, phba->HAregaddr);
2098         readl(phba->HAregaddr); /* flush */
2099         spin_unlock_irq(&phba->hbalock);
2100         lpfc_linkdown(phba);
2101         phba->link_state = LPFC_HBA_ERROR;
2102 
2103         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2104                      "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2105 
2106         return;
2107 }
2108 
2109 /**
2110  * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2111  * @phba: pointer to lpfc hba data structure.
2112  * @vpd: pointer to the vital product data.
2113  * @len: length of the vital product data in bytes.
2114  *
2115  * This routine parses the Vital Product Data (VPD). The VPD is treated as
2116  * an array of characters. In this routine, the ModelName, ProgramType, and
2117  * ModelDesc, etc. fields of the phba data structure will be populated.
2118  *
2119  * Return codes
2120  *   0 - pointer to the VPD passed in is NULL
2121  *   1 - success
2122  **/
2123 int
2124 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2125 {
2126         uint8_t lenlo, lenhi;
2127         int Length;
2128         int i, j;
2129         int finished = 0;
2130         int index = 0;
2131 
2132         if (!vpd)
2133                 return 0;
2134 
2135         /* Vital Product */
2136         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2137                         "0455 Vital Product Data: x%x x%x x%x x%x\n",
2138                         (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2139                         (uint32_t) vpd[3]);
2140         while (!finished && (index < (len - 4))) {
2141                 switch (vpd[index]) {
2142                 case 0x82:
2143                 case 0x91:
2144                         index += 1;
2145                         lenlo = vpd[index];
2146                         index += 1;
2147                         lenhi = vpd[index];
2148                         index += 1;
2149                         i = ((((unsigned short)lenhi) << 8) + lenlo);
2150                         index += i;
2151                         break;
2152                 case 0x90:
2153                         index += 1;
2154                         lenlo = vpd[index];
2155                         index += 1;
2156                         lenhi = vpd[index];
2157                         index += 1;
2158                         Length = ((((unsigned short)lenhi) << 8) + lenlo);
2159                         if (Length > len - index)
2160                                 Length = len - index;
2161                         while (Length > 0) {
2162                         /* Look for Serial Number */
2163                         if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2164                                 index += 2;
2165                                 i = vpd[index];
2166                                 index += 1;
2167                                 j = 0;
2168                                 Length -= (3+i);
2169                                 while(i--) {
2170                                         phba->SerialNumber[j++] = vpd[index++];
2171                                         if (j == 31)
2172                                                 break;
2173                                 }
2174                                 phba->SerialNumber[j] = 0;
2175                                 continue;
2176                         }
2177                         else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2178                                 phba->vpd_flag |= VPD_MODEL_DESC;
2179                                 index += 2;
2180                                 i = vpd[index];
2181                                 index += 1;
2182                                 j = 0;
2183                                 Length -= (3+i);
2184                                 while(i--) {
2185                                         phba->ModelDesc[j++] = vpd[index++];
2186                                         if (j == 255)
2187                                                 break;
2188                                 }
2189                                 phba->ModelDesc[j] = 0;
2190                                 continue;
2191                         }
2192                         else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2193                                 phba->vpd_flag |= VPD_MODEL_NAME;
2194                                 index += 2;
2195                                 i = vpd[index];
2196                                 index += 1;
2197                                 j = 0;
2198                                 Length -= (3+i);
2199                                 while(i--) {
2200                                         phba->ModelName[j++] = vpd[index++];
2201                                         if (j == 79)
2202                                                 break;
2203                                 }
2204                                 phba->ModelName[j] = 0;
2205                                 continue;
2206                         }
2207                         else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2208                                 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2209                                 index += 2;
2210                                 i = vpd[index];
2211                                 index += 1;
2212                                 j = 0;
2213                                 Length -= (3+i);
2214                                 while(i--) {
2215                                         phba->ProgramType[j++] = vpd[index++];
2216                                         if (j == 255)
2217                                                 break;
2218                                 }
2219                                 phba->ProgramType[j] = 0;
2220                                 continue;
2221                         }
2222                         else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2223                                 phba->vpd_flag |= VPD_PORT;
2224                                 index += 2;
2225                                 i = vpd[index];
2226                                 index += 1;
2227                                 j = 0;
2228                                 Length -= (3+i);
2229                                 while(i--) {
2230                                         if ((phba->sli_rev == LPFC_SLI_REV4) &&
2231                                             (phba->sli4_hba.pport_name_sta ==
2232                                              LPFC_SLI4_PPNAME_GET)) {
2233                                                 j++;
2234                                                 index++;
2235                                         } else
2236                                                 phba->Port[j++] = vpd[index++];
2237                                         if (j == 19)
2238                                                 break;
2239                                 }
2240                                 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2241                                     (phba->sli4_hba.pport_name_sta ==
2242                                      LPFC_SLI4_PPNAME_NON))
2243                                         phba->Port[j] = 0;
2244                                 continue;
2245                         }
2246                         else {
2247                                 index += 2;
2248                                 i = vpd[index];
2249                                 index += 1;
2250                                 index += i;
2251                                 Length -= (3 + i);
2252                         }
2253                 }
2254                 finished = 0;
2255                 break;
2256                 case 0x78:
2257                         finished = 1;
2258                         break;
2259                 default:
2260                         index ++;
2261                         break;
2262                 }
2263         }
2264 
2265         return(1);
2266 }
2267 
2268 /**
2269  * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2270  * @phba: pointer to lpfc hba data structure.
2271  * @mdp: pointer to the data structure to hold the derived model name.
2272  * @descp: pointer to the data structure to hold the derived description.
2273  *
2274  * This routine retrieves HBA's description based on its registered PCI device
2275  * ID. The @descp passed into this function points to an array of 256 chars. It
2276  * shall be returned with the model name, maximum speed, and the host bus type.
2277  * The @mdp passed into this function points to an array of 80 chars. When the
2278  * function returns, the @mdp will be filled with the model name.
2279  **/
2280 static void
2281 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2282 {
2283         lpfc_vpd_t *vp;
2284         uint16_t dev_id = phba->pcidev->device;
2285         int max_speed;
2286         int GE = 0;
2287         int oneConnect = 0; /* default is not a oneConnect */
2288         struct {
2289                 char *name;
2290                 char *bus;
2291                 char *function;
2292         } m = {"<Unknown>", "", ""};
2293 
2294         if (mdp && mdp[0] != '\0'
2295                 && descp && descp[0] != '\0')
2296                 return;
2297 
2298         if (phba->lmt & LMT_64Gb)
2299                 max_speed = 64;
2300         else if (phba->lmt & LMT_32Gb)
2301                 max_speed = 32;
2302         else if (phba->lmt & LMT_16Gb)
2303                 max_speed = 16;
2304         else if (phba->lmt & LMT_10Gb)
2305                 max_speed = 10;
2306         else if (phba->lmt & LMT_8Gb)
2307                 max_speed = 8;
2308         else if (phba->lmt & LMT_4Gb)
2309                 max_speed = 4;
2310         else if (phba->lmt & LMT_2Gb)
2311                 max_speed = 2;
2312         else if (phba->lmt & LMT_1Gb)
2313                 max_speed = 1;
2314         else
2315                 max_speed = 0;
2316 
2317         vp = &phba->vpd;
2318 
2319         switch (dev_id) {
2320         case PCI_DEVICE_ID_FIREFLY:
2321                 m = (typeof(m)){"LP6000", "PCI",
2322                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2323                 break;
2324         case PCI_DEVICE_ID_SUPERFLY:
2325                 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2326                         m = (typeof(m)){"LP7000", "PCI", ""};
2327                 else
2328                         m = (typeof(m)){"LP7000E", "PCI", ""};
2329                 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2330                 break;
2331         case PCI_DEVICE_ID_DRAGONFLY:
2332                 m = (typeof(m)){"LP8000", "PCI",
2333                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2334                 break;
2335         case PCI_DEVICE_ID_CENTAUR:
2336                 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2337                         m = (typeof(m)){"LP9002", "PCI", ""};
2338                 else
2339                         m = (typeof(m)){"LP9000", "PCI", ""};
2340                 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2341                 break;
2342         case PCI_DEVICE_ID_RFLY:
2343                 m = (typeof(m)){"LP952", "PCI",
2344                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2345                 break;
2346         case PCI_DEVICE_ID_PEGASUS:
2347                 m = (typeof(m)){"LP9802", "PCI-X",
2348                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2349                 break;
2350         case PCI_DEVICE_ID_THOR:
2351                 m = (typeof(m)){"LP10000", "PCI-X",
2352                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2353                 break;
2354         case PCI_DEVICE_ID_VIPER:
2355                 m = (typeof(m)){"LPX1000",  "PCI-X",
2356                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2357                 break;
2358         case PCI_DEVICE_ID_PFLY:
2359                 m = (typeof(m)){"LP982", "PCI-X",
2360                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2361                 break;
2362         case PCI_DEVICE_ID_TFLY:
2363                 m = (typeof(m)){"LP1050", "PCI-X",
2364                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2365                 break;
2366         case PCI_DEVICE_ID_HELIOS:
2367                 m = (typeof(m)){"LP11000", "PCI-X2",
2368                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2369                 break;
2370         case PCI_DEVICE_ID_HELIOS_SCSP:
2371                 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2372                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2373                 break;
2374         case PCI_DEVICE_ID_HELIOS_DCSP:
2375                 m = (typeof(m)){"LP11002-SP",  "PCI-X2",
2376                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2377                 break;
2378         case PCI_DEVICE_ID_NEPTUNE:
2379                 m = (typeof(m)){"LPe1000", "PCIe",
2380                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2381                 break;
2382         case PCI_DEVICE_ID_NEPTUNE_SCSP:
2383                 m = (typeof(m)){"LPe1000-SP", "PCIe",
2384                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2385                 break;
2386         case PCI_DEVICE_ID_NEPTUNE_DCSP:
2387                 m = (typeof(m)){"LPe1002-SP", "PCIe",
2388                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2389                 break;
2390         case PCI_DEVICE_ID_BMID:
2391                 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2392                 break;
2393         case PCI_DEVICE_ID_BSMB:
2394                 m = (typeof(m)){"LP111", "PCI-X2",
2395                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2396                 break;
2397         case PCI_DEVICE_ID_ZEPHYR:
2398                 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2399                 break;
2400         case PCI_DEVICE_ID_ZEPHYR_SCSP:
2401                 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2402                 break;
2403         case PCI_DEVICE_ID_ZEPHYR_DCSP:
2404                 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2405                 GE = 1;
2406                 break;
2407         case PCI_DEVICE_ID_ZMID:
2408                 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2409                 break;
2410         case PCI_DEVICE_ID_ZSMB:
2411                 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2412                 break;
2413         case PCI_DEVICE_ID_LP101:
2414                 m = (typeof(m)){"LP101", "PCI-X",
2415                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2416                 break;
2417         case PCI_DEVICE_ID_LP10000S:
2418                 m = (typeof(m)){"LP10000-S", "PCI",
2419                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2420                 break;
2421         case PCI_DEVICE_ID_LP11000S:
2422                 m = (typeof(m)){"LP11000-S", "PCI-X2",
2423                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2424                 break;
2425         case PCI_DEVICE_ID_LPE11000S:
2426                 m = (typeof(m)){"LPe11000-S", "PCIe",
2427                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2428                 break;
2429         case PCI_DEVICE_ID_SAT:
2430                 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2431                 break;
2432         case PCI_DEVICE_ID_SAT_MID:
2433                 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2434                 break;
2435         case PCI_DEVICE_ID_SAT_SMB:
2436                 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2437                 break;
2438         case PCI_DEVICE_ID_SAT_DCSP:
2439                 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2440                 break;
2441         case PCI_DEVICE_ID_SAT_SCSP:
2442                 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2443                 break;
2444         case PCI_DEVICE_ID_SAT_S:
2445                 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2446                 break;
2447         case PCI_DEVICE_ID_HORNET:
2448                 m = (typeof(m)){"LP21000", "PCIe",
2449                                 "Obsolete, Unsupported FCoE Adapter"};
2450                 GE = 1;
2451                 break;
2452         case PCI_DEVICE_ID_PROTEUS_VF:
2453                 m = (typeof(m)){"LPev12000", "PCIe IOV",
2454                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2455                 break;
2456         case PCI_DEVICE_ID_PROTEUS_PF:
2457                 m = (typeof(m)){"LPev12000", "PCIe IOV",
2458                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2459                 break;
2460         case PCI_DEVICE_ID_PROTEUS_S:
2461                 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2462                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2463                 break;
2464         case PCI_DEVICE_ID_TIGERSHARK:
2465                 oneConnect = 1;
2466                 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2467                 break;
2468         case PCI_DEVICE_ID_TOMCAT:
2469                 oneConnect = 1;
2470                 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2471                 break;
2472         case PCI_DEVICE_ID_FALCON:
2473                 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2474                                 "EmulexSecure Fibre"};
2475                 break;
2476         case PCI_DEVICE_ID_BALIUS:
2477                 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2478                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2479                 break;
2480         case PCI_DEVICE_ID_LANCER_FC:
2481                 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2482                 break;
2483         case PCI_DEVICE_ID_LANCER_FC_VF:
2484                 m = (typeof(m)){"LPe16000", "PCIe",
2485                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2486                 break;
2487         case PCI_DEVICE_ID_LANCER_FCOE:
2488                 oneConnect = 1;
2489                 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2490                 break;
2491         case PCI_DEVICE_ID_LANCER_FCOE_VF:
2492                 oneConnect = 1;
2493                 m = (typeof(m)){"OCe15100", "PCIe",
2494                                 "Obsolete, Unsupported FCoE"};
2495                 break;
2496         case PCI_DEVICE_ID_LANCER_G6_FC:
2497                 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2498                 break;
2499         case PCI_DEVICE_ID_LANCER_G7_FC:
2500                 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2501                 break;
2502         case PCI_DEVICE_ID_SKYHAWK:
2503         case PCI_DEVICE_ID_SKYHAWK_VF:
2504                 oneConnect = 1;
2505                 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2506                 break;
2507         default:
2508                 m = (typeof(m)){"Unknown", "", ""};
2509                 break;
2510         }
2511 
2512         if (mdp && mdp[0] == '\0')
2513                 snprintf(mdp, 79,"%s", m.name);
2514         /*
2515          * oneConnect hba requires special processing, they are all initiators
2516          * and we put the port number on the end
2517          */
2518         if (descp && descp[0] == '\0') {
2519                 if (oneConnect)
2520                         snprintf(descp, 255,
2521                                 "Emulex OneConnect %s, %s Initiator %s",
2522                                 m.name, m.function,
2523                                 phba->Port);
2524                 else if (max_speed == 0)
2525                         snprintf(descp, 255,
2526                                 "Emulex %s %s %s",
2527                                 m.name, m.bus, m.function);
2528                 else
2529                         snprintf(descp, 255,
2530                                 "Emulex %s %d%s %s %s",
2531                                 m.name, max_speed, (GE) ? "GE" : "Gb",
2532                                 m.bus, m.function);
2533         }
2534 }
2535 
2536 /**
2537  * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2538  * @phba: pointer to lpfc hba data structure.
2539  * @pring: pointer to a IOCB ring.
2540  * @cnt: the number of IOCBs to be posted to the IOCB ring.
2541  *
2542  * This routine posts a given number of IOCBs with the associated DMA buffer
2543  * descriptors specified by the cnt argument to the given IOCB ring.
2544  *
2545  * Return codes
2546  *   The number of IOCBs NOT able to be posted to the IOCB ring.
2547  **/
2548 int
2549 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2550 {
2551         IOCB_t *icmd;
2552         struct lpfc_iocbq *iocb;
2553         struct lpfc_dmabuf *mp1, *mp2;
2554 
2555         cnt += pring->missbufcnt;
2556 
2557         /* While there are buffers to post */
2558         while (cnt > 0) {
2559                 /* Allocate buffer for  command iocb */
2560                 iocb = lpfc_sli_get_iocbq(phba);
2561                 if (iocb == NULL) {
2562                         pring->missbufcnt = cnt;
2563                         return cnt;
2564                 }
2565                 icmd = &iocb->iocb;
2566 
2567                 /* 2 buffers can be posted per command */
2568                 /* Allocate buffer to post */
2569                 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2570                 if (mp1)
2571                     mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2572                 if (!mp1 || !mp1->virt) {
2573                         kfree(mp1);
2574                         lpfc_sli_release_iocbq(phba, iocb);
2575                         pring->missbufcnt = cnt;
2576                         return cnt;
2577                 }
2578 
2579                 INIT_LIST_HEAD(&mp1->list);
2580                 /* Allocate buffer to post */
2581                 if (cnt > 1) {
2582                         mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2583                         if (mp2)
2584                                 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2585                                                             &mp2->phys);
2586                         if (!mp2 || !mp2->virt) {
2587                                 kfree(mp2);
2588                                 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2589                                 kfree(mp1);
2590                                 lpfc_sli_release_iocbq(phba, iocb);
2591                                 pring->missbufcnt = cnt;
2592                                 return cnt;
2593                         }
2594 
2595                         INIT_LIST_HEAD(&mp2->list);
2596                 } else {
2597                         mp2 = NULL;
2598                 }
2599 
2600                 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2601                 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2602                 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2603                 icmd->ulpBdeCount = 1;
2604                 cnt--;
2605                 if (mp2) {
2606                         icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2607                         icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2608                         icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2609                         cnt--;
2610                         icmd->ulpBdeCount = 2;
2611                 }
2612 
2613                 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2614                 icmd->ulpLe = 1;
2615 
2616                 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2617                     IOCB_ERROR) {
2618                         lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2619                         kfree(mp1);
2620                         cnt++;
2621                         if (mp2) {
2622                                 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2623                                 kfree(mp2);
2624                                 cnt++;
2625                         }
2626                         lpfc_sli_release_iocbq(phba, iocb);
2627                         pring->missbufcnt = cnt;
2628                         return cnt;
2629                 }
2630                 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2631                 if (mp2)
2632                         lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2633         }
2634         pring->missbufcnt = 0;
2635         return 0;
2636 }
2637 
2638 /**
2639  * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2640  * @phba: pointer to lpfc hba data structure.
2641  *
2642  * This routine posts initial receive IOCB buffers to the ELS ring. The
2643  * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2644  * set to 64 IOCBs. SLI3 only.
2645  *
2646  * Return codes
2647  *   0 - success (currently always success)
2648  **/
2649 static int
2650 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2651 {
2652         struct lpfc_sli *psli = &phba->sli;
2653 
2654         /* Ring 0, ELS / CT buffers */
2655         lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2656         /* Ring 2 - FCP no buffers needed */
2657 
2658         return 0;
2659 }
2660 
2661 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2662 
2663 /**
2664  * lpfc_sha_init - Set up initial array of hash table entries
2665  * @HashResultPointer: pointer to an array as hash table.
2666  *
2667  * This routine sets up the initial values to the array of hash table entries
2668  * for the LC HBAs.
2669  **/
2670 static void
2671 lpfc_sha_init(uint32_t * HashResultPointer)
2672 {
2673         HashResultPointer[0] = 0x67452301;
2674         HashResultPointer[1] = 0xEFCDAB89;
2675         HashResultPointer[2] = 0x98BADCFE;
2676         HashResultPointer[3] = 0x10325476;
2677         HashResultPointer[4] = 0xC3D2E1F0;
2678 }
2679 
2680 /**
2681  * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2682  * @HashResultPointer: pointer to an initial/result hash table.
2683  * @HashWorkingPointer: pointer to an working hash table.
2684  *
2685  * This routine iterates an initial hash table pointed by @HashResultPointer
2686  * with the values from the working hash table pointeed by @HashWorkingPointer.
2687  * The results are putting back to the initial hash table, returned through
2688  * the @HashResultPointer as the result hash table.
2689  **/
2690 static void
2691 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2692 {
2693         int t;
2694         uint32_t TEMP;
2695         uint32_t A, B, C, D, E;
2696         t = 16;
2697         do {
2698                 HashWorkingPointer[t] =
2699                     S(1,
2700                       HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2701                                                                      8] ^
2702                       HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2703         } while (++t <= 79);
2704         t = 0;
2705         A = HashResultPointer[0];
2706         B = HashResultPointer[1];
2707         C = HashResultPointer[2];
2708         D = HashResultPointer[3];
2709         E = HashResultPointer[4];
2710 
2711         do {
2712                 if (t < 20) {
2713                         TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2714                 } else if (t < 40) {
2715                         TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2716                 } else if (t < 60) {
2717                         TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2718                 } else {
2719                         TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2720                 }
2721                 TEMP += S(5, A) + E + HashWorkingPointer[t];
2722                 E = D;
2723                 D = C;
2724                 C = S(30, B);
2725                 B = A;
2726                 A = TEMP;
2727         } while (++t <= 79);
2728 
2729         HashResultPointer[0] += A;
2730         HashResultPointer[1] += B;
2731         HashResultPointer[2] += C;
2732         HashResultPointer[3] += D;
2733         HashResultPointer[4] += E;
2734 
2735 }
2736 
2737 /**
2738  * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2739  * @RandomChallenge: pointer to the entry of host challenge random number array.
2740  * @HashWorking: pointer to the entry of the working hash array.
2741  *
2742  * This routine calculates the working hash array referred by @HashWorking
2743  * from the challenge random numbers associated with the host, referred by
2744  * @RandomChallenge. The result is put into the entry of the working hash
2745  * array and returned by reference through @HashWorking.
2746  **/
2747 static void
2748 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2749 {
2750         *HashWorking = (*RandomChallenge ^ *HashWorking);
2751 }
2752 
2753 /**
2754  * lpfc_hba_init - Perform special handling for LC HBA initialization
2755  * @phba: pointer to lpfc hba data structure.
2756  * @hbainit: pointer to an array of unsigned 32-bit integers.
2757  *
2758  * This routine performs the special handling for LC HBA initialization.
2759  **/
2760 void
2761 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2762 {
2763         int t;
2764         uint32_t *HashWorking;
2765         uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2766 
2767         HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2768         if (!HashWorking)
2769                 return;
2770 
2771         HashWorking[0] = HashWorking[78] = *pwwnn++;
2772         HashWorking[1] = HashWorking[79] = *pwwnn;
2773 
2774         for (t = 0; t < 7; t++)
2775                 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2776 
2777         lpfc_sha_init(hbainit);
2778         lpfc_sha_iterate(hbainit, HashWorking);
2779         kfree(HashWorking);
2780 }
2781 
2782 /**
2783  * lpfc_cleanup - Performs vport cleanups before deleting a vport
2784  * @vport: pointer to a virtual N_Port data structure.
2785  *
2786  * This routine performs the necessary cleanups before deleting the @vport.
2787  * It invokes the discovery state machine to perform necessary state
2788  * transitions and to release the ndlps associated with the @vport. Note,
2789  * the physical port is treated as @vport 0.
2790  **/
2791 void
2792 lpfc_cleanup(struct lpfc_vport *vport)
2793 {
2794         struct lpfc_hba   *phba = vport->phba;
2795         struct lpfc_nodelist *ndlp, *next_ndlp;
2796         int i = 0;
2797 
2798         if (phba->link_state > LPFC_LINK_DOWN)
2799                 lpfc_port_link_failure(vport);
2800 
2801         list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2802                 if (!NLP_CHK_NODE_ACT(ndlp)) {
2803                         ndlp = lpfc_enable_node(vport, ndlp,
2804                                                 NLP_STE_UNUSED_NODE);
2805                         if (!ndlp)
2806                                 continue;
2807                         spin_lock_irq(&phba->ndlp_lock);
2808                         NLP_SET_FREE_REQ(ndlp);
2809                         spin_unlock_irq(&phba->ndlp_lock);
2810                         /* Trigger the release of the ndlp memory */
2811                         lpfc_nlp_put(ndlp);
2812                         continue;
2813                 }
2814                 spin_lock_irq(&phba->ndlp_lock);
2815                 if (NLP_CHK_FREE_REQ(ndlp)) {
2816                         /* The ndlp should not be in memory free mode already */
2817                         spin_unlock_irq(&phba->ndlp_lock);
2818                         continue;
2819                 } else
2820                         /* Indicate request for freeing ndlp memory */
2821                         NLP_SET_FREE_REQ(ndlp);
2822                 spin_unlock_irq(&phba->ndlp_lock);
2823 
2824                 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2825                     ndlp->nlp_DID == Fabric_DID) {
2826                         /* Just free up ndlp with Fabric_DID for vports */
2827                         lpfc_nlp_put(ndlp);
2828                         continue;
2829                 }
2830 
2831                 /* take care of nodes in unused state before the state
2832                  * machine taking action.
2833                  */
2834                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2835                         lpfc_nlp_put(ndlp);
2836                         continue;
2837                 }
2838 
2839                 if (ndlp->nlp_type & NLP_FABRIC)
2840                         lpfc_disc_state_machine(vport, ndlp, NULL,
2841                                         NLP_EVT_DEVICE_RECOVERY);
2842 
2843                 lpfc_disc_state_machine(vport, ndlp, NULL,
2844                                              NLP_EVT_DEVICE_RM);
2845         }
2846 
2847         /* At this point, ALL ndlp's should be gone
2848          * because of the previous NLP_EVT_DEVICE_RM.
2849          * Lets wait for this to happen, if needed.
2850          */
2851         while (!list_empty(&vport->fc_nodes)) {
2852                 if (i++ > 3000) {
2853                         lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2854                                 "0233 Nodelist not empty\n");
2855                         list_for_each_entry_safe(ndlp, next_ndlp,
2856                                                 &vport->fc_nodes, nlp_listp) {
2857                                 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2858                                                 LOG_NODE,
2859                                                 "0282 did:x%x ndlp:x%px "
2860                                                 "usgmap:x%x refcnt:%d\n",
2861                                                 ndlp->nlp_DID, (void *)ndlp,
2862                                                 ndlp->nlp_usg_map,
2863                                                 kref_read(&ndlp->kref));
2864                         }
2865                         break;
2866                 }
2867 
2868                 /* Wait for any activity on ndlps to settle */
2869                 msleep(10);
2870         }
2871         lpfc_cleanup_vports_rrqs(vport, NULL);
2872 }
2873 
2874 /**
2875  * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2876  * @vport: pointer to a virtual N_Port data structure.
2877  *
2878  * This routine stops all the timers associated with a @vport. This function
2879  * is invoked before disabling or deleting a @vport. Note that the physical
2880  * port is treated as @vport 0.
2881  **/
2882 void
2883 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2884 {
2885         del_timer_sync(&vport->els_tmofunc);
2886         del_timer_sync(&vport->delayed_disc_tmo);
2887         lpfc_can_disctmo(vport);
2888         return;
2889 }
2890 
2891 /**
2892  * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2893  * @phba: pointer to lpfc hba data structure.
2894  *
2895  * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2896  * caller of this routine should already hold the host lock.
2897  **/
2898 void
2899 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2900 {
2901         /* Clear pending FCF rediscovery wait flag */
2902         phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2903 
2904         /* Now, try to stop the timer */
2905         del_timer(&phba->fcf.redisc_wait);
2906 }
2907 
2908 /**
2909  * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2910  * @phba: pointer to lpfc hba data structure.
2911  *
2912  * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2913  * checks whether the FCF rediscovery wait timer is pending with the host
2914  * lock held before proceeding with disabling the timer and clearing the
2915  * wait timer pendig flag.
2916  **/
2917 void
2918 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2919 {
2920         spin_lock_irq(&phba->hbalock);
2921         if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2922                 /* FCF rediscovery timer already fired or stopped */
2923                 spin_unlock_irq(&phba->hbalock);
2924                 return;
2925         }
2926         __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2927         /* Clear failover in progress flags */
2928         phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2929         spin_unlock_irq(&phba->hbalock);
2930 }
2931 
2932 /**
2933  * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2934  * @phba: pointer to lpfc hba data structure.
2935  *
2936  * This routine stops all the timers associated with a HBA. This function is
2937  * invoked before either putting a HBA offline or unloading the driver.
2938  **/
2939 void
2940 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2941 {
2942         if (phba->pport)
2943                 lpfc_stop_vport_timers(phba->pport);
2944         cancel_delayed_work_sync(&phba->eq_delay_work);
2945         del_timer_sync(&phba->sli.mbox_tmo);
2946         del_timer_sync(&phba->fabric_block_timer);
2947         del_timer_sync(&phba->eratt_poll);
2948         del_timer_sync(&phba->hb_tmofunc);
2949         if (phba->sli_rev == LPFC_SLI_REV4) {
2950                 del_timer_sync(&phba->rrq_tmr);
2951                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2952         }
2953         phba->hb_outstanding = 0;
2954 
2955         switch (phba->pci_dev_grp) {
2956         case LPFC_PCI_DEV_LP:
2957                 /* Stop any LightPulse device specific driver timers */
2958                 del_timer_sync(&phba->fcp_poll_timer);
2959                 break;
2960         case LPFC_PCI_DEV_OC:
2961                 /* Stop any OneConnect device specific driver timers */
2962                 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2963                 break;
2964         default:
2965                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2966                                 "0297 Invalid device group (x%x)\n",
2967                                 phba->pci_dev_grp);
2968                 break;
2969         }
2970         return;
2971 }
2972 
2973 /**
2974  * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2975  * @phba: pointer to lpfc hba data structure.
2976  *
2977  * This routine marks a HBA's management interface as blocked. Once the HBA's
2978  * management interface is marked as blocked, all the user space access to
2979  * the HBA, whether they are from sysfs interface or libdfc interface will
2980  * all be blocked. The HBA is set to block the management interface when the
2981  * driver prepares the HBA interface for online or offline.
2982  **/
2983 static void
2984 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2985 {
2986         unsigned long iflag;
2987         uint8_t actcmd = MBX_HEARTBEAT;
2988         unsigned long timeout;
2989 
2990         spin_lock_irqsave(&phba->hbalock, iflag);
2991         phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2992         spin_unlock_irqrestore(&phba->hbalock, iflag);
2993         if (mbx_action == LPFC_MBX_NO_WAIT)
2994                 return;
2995         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2996         spin_lock_irqsave(&phba->hbalock, iflag);
2997         if (phba->sli.mbox_active) {
2998                 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2999                 /* Determine how long we might wait for the active mailbox
3000                  * command to be gracefully completed by firmware.
3001                  */
3002                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3003                                 phba->sli.mbox_active) * 1000) + jiffies;
3004         }
3005         spin_unlock_irqrestore(&phba->hbalock, iflag);
3006 
3007         /* Wait for the outstnading mailbox command to complete */
3008         while (phba->sli.mbox_active) {
3009                 /* Check active mailbox complete status every 2ms */
3010                 msleep(2);
3011                 if (time_after(jiffies, timeout)) {
3012                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3013                                 "2813 Mgmt IO is Blocked %x "
3014                                 "- mbox cmd %x still active\n",
3015                                 phba->sli.sli_flag, actcmd);
3016                         break;
3017                 }
3018         }
3019 }
3020 
3021 /**
3022  * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3023  * @phba: pointer to lpfc hba data structure.
3024  *
3025  * Allocate RPIs for all active remote nodes. This is needed whenever
3026  * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3027  * is to fixup the temporary rpi assignments.
3028  **/
3029 void
3030 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3031 {
3032         struct lpfc_nodelist  *ndlp, *next_ndlp;
3033         struct lpfc_vport **vports;
3034         int i, rpi;
3035         unsigned long flags;
3036 
3037         if (phba->sli_rev != LPFC_SLI_REV4)
3038                 return;
3039 
3040         vports = lpfc_create_vport_work_array(phba);
3041         if (vports == NULL)
3042                 return;
3043 
3044         for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3045                 if (vports[i]->load_flag & FC_UNLOADING)
3046                         continue;
3047 
3048                 list_for_each_entry_safe(ndlp, next_ndlp,
3049                                          &vports[i]->fc_nodes,
3050                                          nlp_listp) {
3051                         if (!NLP_CHK_NODE_ACT(ndlp))
3052                                 continue;
3053                         rpi = lpfc_sli4_alloc_rpi(phba);
3054                         if (rpi == LPFC_RPI_ALLOC_ERROR) {
3055                                 spin_lock_irqsave(&phba->ndlp_lock, flags);
3056                                 NLP_CLR_NODE_ACT(ndlp);
3057                                 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3058                                 continue;
3059                         }
3060                         ndlp->nlp_rpi = rpi;
3061                         lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3062                                          "0009 rpi:%x DID:%x "
3063                                          "flg:%x map:%x x%px\n", ndlp->nlp_rpi,
3064                                          ndlp->nlp_DID, ndlp->nlp_flag,
3065                                          ndlp->nlp_usg_map, ndlp);
3066                 }
3067         }
3068         lpfc_destroy_vport_work_array(phba, vports);
3069 }
3070 
3071 /**
3072  * lpfc_create_expedite_pool - create expedite pool
3073  * @phba: pointer to lpfc hba data structure.
3074  *
3075  * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3076  * to expedite pool. Mark them as expedite.
3077  **/
3078 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3079 {
3080         struct lpfc_sli4_hdw_queue *qp;
3081         struct lpfc_io_buf *lpfc_ncmd;
3082         struct lpfc_io_buf *lpfc_ncmd_next;
3083         struct lpfc_epd_pool *epd_pool;
3084         unsigned long iflag;
3085 
3086         epd_pool = &phba->epd_pool;
3087         qp = &phba->sli4_hba.hdwq[0];
3088 
3089         spin_lock_init(&epd_pool->lock);
3090         spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3091         spin_lock(&epd_pool->lock);
3092         INIT_LIST_HEAD(&epd_pool->list);
3093         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3094                                  &qp->lpfc_io_buf_list_put, list) {
3095                 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3096                 lpfc_ncmd->expedite = true;
3097                 qp->put_io_bufs--;
3098                 epd_pool->count++;
3099                 if (epd_pool->count >= XRI_BATCH)
3100                         break;
3101         }
3102         spin_unlock(&epd_pool->lock);
3103         spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3104 }
3105 
3106 /**
3107  * lpfc_destroy_expedite_pool - destroy expedite pool
3108  * @phba: pointer to lpfc hba data structure.
3109  *
3110  * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3111  * of HWQ 0. Clear the mark.
3112  **/
3113 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3114 {
3115         struct lpfc_sli4_hdw_queue *qp;
3116         struct lpfc_io_buf *lpfc_ncmd;
3117         struct lpfc_io_buf *lpfc_ncmd_next;
3118         struct lpfc_epd_pool *epd_pool;
3119         unsigned long iflag;
3120 
3121         epd_pool = &phba->epd_pool;
3122         qp = &phba->sli4_hba.hdwq[0];
3123 
3124         spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3125         spin_lock(&epd_pool->lock);
3126         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3127                                  &epd_pool->list, list) {
3128                 list_move_tail(&lpfc_ncmd->list,
3129                                &qp->lpfc_io_buf_list_put);
3130                 lpfc_ncmd->flags = false;
3131                 qp->put_io_bufs++;
3132                 epd_pool->count--;
3133         }
3134         spin_unlock(&epd_pool->lock);
3135         spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3136 }
3137 
3138 /**
3139  * lpfc_create_multixri_pools - create multi-XRI pools
3140  * @phba: pointer to lpfc hba data structure.
3141  *
3142  * This routine initialize public, private per HWQ. Then, move XRIs from
3143  * lpfc_io_buf_list_put to public pool. High and low watermark are also
3144  * Initialized.
3145  **/
3146 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3147 {
3148         u32 i, j;
3149         u32 hwq_count;
3150         u32 count_per_hwq;
3151         struct lpfc_io_buf *lpfc_ncmd;
3152         struct lpfc_io_buf *lpfc_ncmd_next;
3153         unsigned long iflag;
3154         struct lpfc_sli4_hdw_queue *qp;
3155         struct lpfc_multixri_pool *multixri_pool;
3156         struct lpfc_pbl_pool *pbl_pool;
3157         struct lpfc_pvt_pool *pvt_pool;
3158 
3159         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3160                         "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3161                         phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3162                         phba->sli4_hba.io_xri_cnt);
3163 
3164         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3165                 lpfc_create_expedite_pool(phba);
3166 
3167         hwq_count = phba->cfg_hdw_queue;
3168         count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3169 
3170         for (i = 0; i < hwq_count; i++) {
3171                 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3172 
3173                 if (!multixri_pool) {
3174                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3175                                         "1238 Failed to allocate memory for "
3176                                         "multixri_pool\n");
3177 
3178                         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3179                                 lpfc_destroy_expedite_pool(phba);
3180 
3181                         j = 0;
3182                         while (j < i) {
3183                                 qp = &phba->sli4_hba.hdwq[j];
3184                                 kfree(qp->p_multixri_pool);
3185                                 j++;
3186                         }
3187                         phba->cfg_xri_rebalancing = 0;
3188                         return;
3189                 }
3190 
3191                 qp = &phba->sli4_hba.hdwq[i];
3192                 qp->p_multixri_pool = multixri_pool;
3193 
3194                 multixri_pool->xri_limit = count_per_hwq;
3195                 multixri_pool->rrb_next_hwqid = i;
3196 
3197                 /* Deal with public free xri pool */
3198                 pbl_pool = &multixri_pool->pbl_pool;
3199                 spin_lock_init(&pbl_pool->lock);
3200                 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3201                 spin_lock(&pbl_pool->lock);
3202                 INIT_LIST_HEAD(&pbl_pool->list);
3203                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3204                                          &qp->lpfc_io_buf_list_put, list) {
3205                         list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3206                         qp->put_io_bufs--;
3207                         pbl_pool->count++;
3208                 }
3209                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3210                                 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3211                                 pbl_pool->count, i);
3212                 spin_unlock(&pbl_pool->lock);
3213                 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3214 
3215                 /* Deal with private free xri pool */
3216                 pvt_pool = &multixri_pool->pvt_pool;
3217                 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3218                 pvt_pool->low_watermark = XRI_BATCH;
3219                 spin_lock_init(&pvt_pool->lock);
3220                 spin_lock_irqsave(&pvt_pool->lock, iflag);
3221                 INIT_LIST_HEAD(&pvt_pool->list);
3222                 pvt_pool->count = 0;
3223                 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3224         }
3225 }
3226 
3227 /**
3228  * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3229  * @phba: pointer to lpfc hba data structure.
3230  *
3231  * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3232  **/
3233 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3234 {
3235         u32 i;
3236         u32 hwq_count;
3237         struct lpfc_io_buf *lpfc_ncmd;
3238         struct lpfc_io_buf *lpfc_ncmd_next;
3239         unsigned long iflag;
3240         struct lpfc_sli4_hdw_queue *qp;
3241         struct lpfc_multixri_pool *multixri_pool;
3242         struct lpfc_pbl_pool *pbl_pool;
3243         struct lpfc_pvt_pool *pvt_pool;
3244 
3245         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3246                 lpfc_destroy_expedite_pool(phba);
3247 
3248         if (!(phba->pport->load_flag & FC_UNLOADING))
3249                 lpfc_sli_flush_io_rings(phba);
3250 
3251         hwq_count = phba->cfg_hdw_queue;
3252 
3253         for (i = 0; i < hwq_count; i++) {
3254                 qp = &phba->sli4_hba.hdwq[i];
3255                 multixri_pool = qp->p_multixri_pool;
3256                 if (!multixri_pool)
3257                         continue;
3258 
3259                 qp->p_multixri_pool = NULL;
3260 
3261                 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3262 
3263                 /* Deal with public free xri pool */
3264                 pbl_pool = &multixri_pool->pbl_pool;
3265                 spin_lock(&pbl_pool->lock);
3266 
3267                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3268                                 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3269                                 pbl_pool->count, i);
3270 
3271                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3272                                          &pbl_pool->list, list) {
3273                         list_move_tail(&lpfc_ncmd->list,
3274                                        &qp->lpfc_io_buf_list_put);
3275                         qp->put_io_bufs++;
3276                         pbl_pool->count--;
3277                 }
3278 
3279                 INIT_LIST_HEAD(&pbl_pool->list);
3280                 pbl_pool->count = 0;
3281 
3282                 spin_unlock(&pbl_pool->lock);
3283 
3284                 /* Deal with private free xri pool */
3285                 pvt_pool = &multixri_pool->pvt_pool;
3286                 spin_lock(&pvt_pool->lock);
3287 
3288                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3289                                 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3290                                 pvt_pool->count, i);
3291 
3292                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3293                                          &pvt_pool->list, list) {
3294                         list_move_tail(&lpfc_ncmd->list,
3295                                        &qp->lpfc_io_buf_list_put);
3296                         qp->put_io_bufs++;
3297                         pvt_pool->count--;
3298                 }
3299 
3300                 INIT_LIST_HEAD(&pvt_pool->list);
3301                 pvt_pool->count = 0;
3302 
3303                 spin_unlock(&pvt_pool->lock);
3304                 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3305 
3306                 kfree(multixri_pool);
3307         }
3308 }
3309 
3310 /**
3311  * lpfc_online - Initialize and bring a HBA online
3312  * @phba: pointer to lpfc hba data structure.
3313  *
3314  * This routine initializes the HBA and brings a HBA online. During this
3315  * process, the management interface is blocked to prevent user space access
3316  * to the HBA interfering with the driver initialization.
3317  *
3318  * Return codes
3319  *   0 - successful
3320  *   1 - failed
3321  **/
3322 int
3323 lpfc_online(struct lpfc_hba *phba)
3324 {
3325         struct lpfc_vport *vport;
3326         struct lpfc_vport **vports;
3327         int i, error = 0;
3328         bool vpis_cleared = false;
3329 
3330         if (!phba)
3331                 return 0;
3332         vport = phba->pport;
3333 
3334         if (!(vport->fc_flag & FC_OFFLINE_MODE))
3335                 return 0;
3336 
3337         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3338                         "0458 Bring Adapter online\n");
3339 
3340         lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3341 
3342         if (phba->sli_rev == LPFC_SLI_REV4) {
3343                 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3344                         lpfc_unblock_mgmt_io(phba);
3345                         return 1;
3346                 }
3347                 spin_lock_irq(&phba->hbalock);
3348                 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3349                         vpis_cleared = true;
3350                 spin_unlock_irq(&phba->hbalock);
3351 
3352                 /* Reestablish the local initiator port.
3353                  * The offline process destroyed the previous lport.
3354                  */
3355                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3356                                 !phba->nvmet_support) {
3357                         error = lpfc_nvme_create_localport(phba->pport);
3358                         if (error)
3359                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3360                                         "6132 NVME restore reg failed "
3361                                         "on nvmei error x%x\n", error);
3362                 }
3363         } else {
3364                 lpfc_sli_queue_init(phba);
3365                 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3366                         lpfc_unblock_mgmt_io(phba);
3367                         return 1;
3368                 }
3369         }
3370 
3371         vports = lpfc_create_vport_work_array(phba);
3372         if (vports != NULL) {
3373                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3374                         struct Scsi_Host *shost;
3375                         shost = lpfc_shost_from_vport(vports[i]);
3376                         spin_lock_irq(shost->host_lock);
3377                         vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3378                         if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3379                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3380                         if (phba->sli_rev == LPFC_SLI_REV4) {
3381                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3382                                 if ((vpis_cleared) &&
3383                                     (vports[i]->port_type !=
3384                                         LPFC_PHYSICAL_PORT))
3385                                         vports[i]->vpi = 0;
3386                         }
3387                         spin_unlock_irq(shost->host_lock);
3388                 }
3389         }
3390         lpfc_destroy_vport_work_array(phba, vports);
3391 
3392         if (phba->cfg_xri_rebalancing)
3393                 lpfc_create_multixri_pools(phba);
3394 
3395         lpfc_cpuhp_add(phba);
3396 
3397         lpfc_unblock_mgmt_io(phba);
3398         return 0;
3399 }
3400 
3401 /**
3402  * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3403  * @phba: pointer to lpfc hba data structure.
3404  *
3405  * This routine marks a HBA's management interface as not blocked. Once the
3406  * HBA's management interface is marked as not blocked, all the user space
3407  * access to the HBA, whether they are from sysfs interface or libdfc
3408  * interface will be allowed. The HBA is set to block the management interface
3409  * when the driver prepares the HBA interface for online or offline and then
3410  * set to unblock the management interface afterwards.
3411  **/
3412 void
3413 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3414 {
3415         unsigned long iflag;
3416 
3417         spin_lock_irqsave(&phba->hbalock, iflag);
3418         phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3419         spin_unlock_irqrestore(&phba->hbalock, iflag);
3420 }
3421 
3422 /**
3423  * lpfc_offline_prep - Prepare a HBA to be brought offline
3424  * @phba: pointer to lpfc hba data structure.
3425  *
3426  * This routine is invoked to prepare a HBA to be brought offline. It performs
3427  * unregistration login to all the nodes on all vports and flushes the mailbox
3428  * queue to make it ready to be brought offline.
3429  **/
3430 void
3431 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3432 {
3433         struct lpfc_vport *vport = phba->pport;
3434         struct lpfc_nodelist  *ndlp, *next_ndlp;
3435         struct lpfc_vport **vports;
3436         struct Scsi_Host *shost;
3437         int i;
3438 
3439         if (vport->fc_flag & FC_OFFLINE_MODE)
3440                 return;
3441 
3442         lpfc_block_mgmt_io(phba, mbx_action);
3443 
3444         lpfc_linkdown(phba);
3445 
3446         /* Issue an unreg_login to all nodes on all vports */
3447         vports = lpfc_create_vport_work_array(phba);
3448         if (vports != NULL) {
3449                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3450                         if (vports[i]->load_flag & FC_UNLOADING)
3451                                 continue;
3452                         shost = lpfc_shost_from_vport(vports[i]);
3453                         spin_lock_irq(shost->host_lock);
3454                         vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3455                         vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3456                         vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3457                         spin_unlock_irq(shost->host_lock);
3458 
3459                         shost = lpfc_shost_from_vport(vports[i]);
3460                         list_for_each_entry_safe(ndlp, next_ndlp,
3461                                                  &vports[i]->fc_nodes,
3462                                                  nlp_listp) {
3463                                 if (!NLP_CHK_NODE_ACT(ndlp))
3464                                         continue;
3465                                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
3466                                         continue;
3467                                 if (ndlp->nlp_type & NLP_FABRIC) {
3468                                         lpfc_disc_state_machine(vports[i], ndlp,
3469                                                 NULL, NLP_EVT_DEVICE_RECOVERY);
3470                                         lpfc_disc_state_machine(vports[i], ndlp,
3471                                                 NULL, NLP_EVT_DEVICE_RM);
3472                                 }
3473                                 spin_lock_irq(shost->host_lock);
3474                                 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3475                                 spin_unlock_irq(shost->host_lock);
3476                                 /*
3477                                  * Whenever an SLI4 port goes offline, free the
3478                                  * RPI. Get a new RPI when the adapter port
3479                                  * comes back online.
3480                                  */
3481                                 if (phba->sli_rev == LPFC_SLI_REV4) {
3482                                         lpfc_printf_vlog(ndlp->vport,
3483                                                          KERN_INFO, LOG_NODE,
3484                                                          "0011 lpfc_offline: "
3485                                                          "ndlp:x%px did %x "
3486                                                          "usgmap:x%x rpi:%x\n",
3487                                                          ndlp, ndlp->nlp_DID,
3488                                                          ndlp->nlp_usg_map,
3489                                                          ndlp->nlp_rpi);
3490 
3491                                         lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3492                                 }
3493                                 lpfc_unreg_rpi(vports[i], ndlp);
3494                         }
3495                 }
3496         }
3497         lpfc_destroy_vport_work_array(phba, vports);
3498 
3499         lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3500 
3501         if (phba->wq)
3502                 flush_workqueue(phba->wq);
3503 }
3504 
3505 /**
3506  * lpfc_offline - Bring a HBA offline
3507  * @phba: pointer to lpfc hba data structure.
3508  *
3509  * This routine actually brings a HBA offline. It stops all the timers
3510  * associated with the HBA, brings down the SLI layer, and eventually
3511  * marks the HBA as in offline state for the upper layer protocol.
3512  **/
3513 void
3514 lpfc_offline(struct lpfc_hba *phba)
3515 {
3516         struct Scsi_Host  *shost;
3517         struct lpfc_vport **vports;
3518         int i;
3519 
3520         if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3521                 return;
3522 
3523         /* stop port and all timers associated with this hba */
3524         lpfc_stop_port(phba);
3525 
3526         /* Tear down the local and target port registrations.  The
3527          * nvme transports need to cleanup.
3528          */
3529         lpfc_nvmet_destroy_targetport(phba);
3530         lpfc_nvme_destroy_localport(phba->pport);
3531 
3532         vports = lpfc_create_vport_work_array(phba);
3533         if (vports != NULL)
3534                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3535                         lpfc_stop_vport_timers(vports[i]);
3536         lpfc_destroy_vport_work_array(phba, vports);
3537         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3538                         "0460 Bring Adapter offline\n");
3539         /* Bring down the SLI Layer and cleanup.  The HBA is offline
3540            now.  */
3541         lpfc_sli_hba_down(phba);
3542         spin_lock_irq(&phba->hbalock);
3543         phba->work_ha = 0;
3544         spin_unlock_irq(&phba->hbalock);
3545         vports = lpfc_create_vport_work_array(phba);
3546         if (vports != NULL)
3547                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3548                         shost = lpfc_shost_from_vport(vports[i]);
3549                         spin_lock_irq(shost->host_lock);
3550                         vports[i]->work_port_events = 0;
3551                         vports[i]->fc_flag |= FC_OFFLINE_MODE;
3552                         spin_unlock_irq(shost->host_lock);
3553                 }
3554         lpfc_destroy_vport_work_array(phba, vports);
3555         __lpfc_cpuhp_remove(phba);
3556 
3557         if (phba->cfg_xri_rebalancing)
3558                 lpfc_destroy_multixri_pools(phba);
3559 }
3560 
3561 /**
3562  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3563  * @phba: pointer to lpfc hba data structure.
3564  *
3565  * This routine is to free all the SCSI buffers and IOCBs from the driver
3566  * list back to kernel. It is called from lpfc_pci_remove_one to free
3567  * the internal resources before the device is removed from the system.
3568  **/
3569 static void
3570 lpfc_scsi_free(struct lpfc_hba *phba)
3571 {
3572         struct lpfc_io_buf *sb, *sb_next;
3573 
3574         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3575                 return;
3576 
3577         spin_lock_irq(&phba->hbalock);
3578 
3579         /* Release all the lpfc_scsi_bufs maintained by this host. */
3580 
3581         spin_lock(&phba->scsi_buf_list_put_lock);
3582         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3583                                  list) {
3584                 list_del(&sb->list);
3585                 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3586                               sb->dma_handle);
3587                 kfree(sb);
3588                 phba->total_scsi_bufs--;
3589         }
3590         spin_unlock(&phba->scsi_buf_list_put_lock);
3591 
3592         spin_lock(&phba->scsi_buf_list_get_lock);
3593         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3594                                  list) {
3595                 list_del(&sb->list);
3596                 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3597                               sb->dma_handle);
3598                 kfree(sb);
3599                 phba->total_scsi_bufs--;
3600         }
3601         spin_unlock(&phba->scsi_buf_list_get_lock);
3602         spin_unlock_irq(&phba->hbalock);
3603 }
3604 
3605 /**
3606  * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3607  * @phba: pointer to lpfc hba data structure.
3608  *
3609  * This routine is to free all the IO buffers and IOCBs from the driver
3610  * list back to kernel. It is called from lpfc_pci_remove_one to free
3611  * the internal resources before the device is removed from the system.
3612  **/
3613 void
3614 lpfc_io_free(struct lpfc_hba *phba)
3615 {
3616         struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3617         struct lpfc_sli4_hdw_queue *qp;
3618         int idx;
3619 
3620         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3621                 qp = &phba->sli4_hba.hdwq[idx];
3622                 /* Release all the lpfc_nvme_bufs maintained by this host. */
3623                 spin_lock(&qp->io_buf_list_put_lock);
3624                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3625                                          &qp->lpfc_io_buf_list_put,
3626                                          list) {
3627                         list_del(&lpfc_ncmd->list);
3628                         qp->put_io_bufs--;
3629                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3630                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3631                         if (phba->cfg_xpsgl && !phba->nvmet_support)
3632                                 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3633                         lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3634                         kfree(lpfc_ncmd);
3635                         qp->total_io_bufs--;
3636                 }
3637                 spin_unlock(&qp->io_buf_list_put_lock);
3638 
3639                 spin_lock(&qp->io_buf_list_get_lock);
3640                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3641                                          &qp->lpfc_io_buf_list_get,
3642                                          list) {
3643                         list_del(&lpfc_ncmd->list);
3644                         qp->get_io_bufs--;
3645                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3646                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3647                         if (phba->cfg_xpsgl && !phba->nvmet_support)
3648                                 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3649                         lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3650                         kfree(lpfc_ncmd);
3651                         qp->total_io_bufs--;
3652                 }
3653                 spin_unlock(&qp->io_buf_list_get_lock);
3654         }
3655 }
3656 
3657 /**
3658  * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
3659  * @phba: pointer to lpfc hba data structure.
3660  *
3661  * This routine first calculates the sizes of the current els and allocated
3662  * scsi sgl lists, and then goes through all sgls to updates the physical
3663  * XRIs assigned due to port function reset. During port initialization, the
3664  * current els and allocated scsi sgl lists are 0s.
3665  *
3666  * Return codes
3667  *   0 - successful (for now, it always returns 0)
3668  **/
3669 int
3670 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3671 {
3672         struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3673         uint16_t i, lxri, xri_cnt, els_xri_cnt;
3674         LIST_HEAD(els_sgl_list);
3675         int rc;
3676 
3677         /*
3678          * update on pci function's els xri-sgl list
3679          */
3680         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3681 
3682         if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3683                 /* els xri-sgl expanded */
3684                 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3685                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3686                                 "3157 ELS xri-sgl count increased from "
3687                                 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3688                                 els_xri_cnt);
3689                 /* allocate the additional els sgls */
3690                 for (i = 0; i < xri_cnt; i++) {
3691                         sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3692                                              GFP_KERNEL);
3693                         if (sglq_entry == NULL) {
3694                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3695                                                 "2562 Failure to allocate an "
3696                                                 "ELS sgl entry:%d\n", i);
3697                                 rc = -ENOMEM;
3698                                 goto out_free_mem;
3699                         }
3700                         sglq_entry->buff_type = GEN_BUFF_TYPE;
3701                         sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3702                                                            &sglq_entry->phys);
3703                         if (sglq_entry->virt == NULL) {
3704                                 kfree(sglq_entry);
3705                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3706                                                 "2563 Failure to allocate an "
3707                                                 "ELS mbuf:%d\n", i);
3708                                 rc = -ENOMEM;
3709                                 goto out_free_mem;
3710                         }
3711                         sglq_entry->sgl = sglq_entry->virt;
3712                         memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3713                         sglq_entry->state = SGL_FREED;
3714                         list_add_tail(&sglq_entry->list, &els_sgl_list);
3715                 }
3716                 spin_lock_irq(&phba->hbalock);
3717                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3718                 list_splice_init(&els_sgl_list,
3719                                  &phba->sli4_hba.lpfc_els_sgl_list);
3720                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3721                 spin_unlock_irq(&phba->hbalock);
3722         } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3723                 /* els xri-sgl shrinked */
3724                 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3725                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3726                                 "3158 ELS xri-sgl count decreased from "
3727                                 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3728                                 els_xri_cnt);
3729                 spin_lock_irq(&phba->hbalock);
3730                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3731                 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3732                                  &els_sgl_list);
3733                 /* release extra els sgls from list */
3734                 for (i = 0; i < xri_cnt; i++) {
3735                         list_remove_head(&els_sgl_list,
3736                                          sglq_entry, struct lpfc_sglq, list);
3737                         if (sglq_entry) {
3738                                 __lpfc_mbuf_free(phba, sglq_entry->virt,
3739                                                  sglq_entry->phys);
3740                                 kfree(sglq_entry);
3741                         }
3742                 }
3743                 list_splice_init(&els_sgl_list,
3744                                  &phba->sli4_hba.lpfc_els_sgl_list);
3745                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3746                 spin_unlock_irq(&phba->hbalock);
3747         } else
3748                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3749                                 "3163 ELS xri-sgl count unchanged: %d\n",
3750                                 els_xri_cnt);
3751         phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3752 
3753         /* update xris to els sgls on the list */
3754         sglq_entry = NULL;
3755         sglq_entry_next = NULL;
3756         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3757                                  &phba->sli4_hba.lpfc_els_sgl_list, list) {
3758                 lxri = lpfc_sli4_next_xritag(phba);
3759                 if (lxri == NO_XRI) {
3760                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3761                                         "2400 Failed to allocate xri for "
3762                                         "ELS sgl\n");
3763                         rc = -ENOMEM;
3764                         goto out_free_mem;
3765                 }
3766                 sglq_entry->sli4_lxritag = lxri;
3767                 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3768         }
3769         return 0;
3770 
3771 out_free_mem:
3772         lpfc_free_els_sgl_list(phba);
3773         return rc;
3774 }
3775 
3776 /**
3777  * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
3778  * @phba: pointer to lpfc hba data structure.
3779  *
3780  * This routine first calculates the sizes of the current els and allocated
3781  * scsi sgl lists, and then goes through all sgls to updates the physical
3782  * XRIs assigned due to port function reset. During port initialization, the
3783  * current els and allocated scsi sgl lists are 0s.
3784  *
3785  * Return codes
3786  *   0 - successful (for now, it always returns 0)
3787  **/
3788 int
3789 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3790 {
3791         struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3792         uint16_t i, lxri, xri_cnt, els_xri_cnt;
3793         uint16_t nvmet_xri_cnt;
3794         LIST_HEAD(nvmet_sgl_list);
3795         int rc;
3796 
3797         /*
3798          * update on pci function's nvmet xri-sgl list
3799          */
3800         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3801 
3802         /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3803         nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3804         if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3805                 /* els xri-sgl expanded */
3806                 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3807                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3808                                 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3809                                 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3810                 /* allocate the additional nvmet sgls */
3811                 for (i = 0; i < xri_cnt; i++) {
3812                         sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3813                                              GFP_KERNEL);
3814                         if (sglq_entry == NULL) {
3815                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3816                                                 "6303 Failure to allocate an "
3817                                                 "NVMET sgl entry:%d\n", i);
3818                                 rc = -ENOMEM;
3819                                 goto out_free_mem;
3820                         }
3821                         sglq_entry->buff_type = NVMET_BUFF_TYPE;
3822                         sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3823                                                            &sglq_entry->phys);
3824                         if (sglq_entry->virt == NULL) {
3825                                 kfree(sglq_entry);
3826                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3827                                                 "6304 Failure to allocate an "
3828                                                 "NVMET buf:%d\n", i);
3829                                 rc = -ENOMEM;
3830                                 goto out_free_mem;
3831                         }
3832                         sglq_entry->sgl = sglq_entry->virt;
3833                         memset(sglq_entry->sgl, 0,
3834                                phba->cfg_sg_dma_buf_size);
3835                         sglq_entry->state = SGL_FREED;
3836                         list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3837                 }
3838                 spin_lock_irq(&phba->hbalock);
3839                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3840                 list_splice_init(&nvmet_sgl_list,
3841                                  &phba->sli4_hba.lpfc_nvmet_sgl_list);
3842                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3843                 spin_unlock_irq(&phba->hbalock);
3844         } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3845                 /* nvmet xri-sgl shrunk */
3846                 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3847                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3848                                 "6305 NVMET xri-sgl count decreased from "
3849                                 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3850                                 nvmet_xri_cnt);
3851                 spin_lock_irq(&phba->hbalock);
3852                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3853                 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3854                                  &nvmet_sgl_list);
3855                 /* release extra nvmet sgls from list */
3856                 for (i = 0; i < xri_cnt; i++) {
3857                         list_remove_head(&nvmet_sgl_list,
3858                                          sglq_entry, struct lpfc_sglq, list);
3859                         if (sglq_entry) {
3860                                 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3861                                                     sglq_entry->phys);
3862                                 kfree(sglq_entry);
3863                         }
3864                 }
3865                 list_splice_init(&nvmet_sgl_list,
3866                                  &phba->sli4_hba.lpfc_nvmet_sgl_list);
3867                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3868                 spin_unlock_irq(&phba->hbalock);
3869         } else
3870                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3871                                 "6306 NVMET xri-sgl count unchanged: %d\n",
3872                                 nvmet_xri_cnt);
3873         phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3874 
3875         /* update xris to nvmet sgls on the list */
3876         sglq_entry = NULL;
3877         sglq_entry_next = NULL;
3878         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3879                                  &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3880                 lxri = lpfc_sli4_next_xritag(phba);
3881                 if (lxri == NO_XRI) {
3882                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3883                                         "6307 Failed to allocate xri for "
3884                                         "NVMET sgl\n");
3885                         rc = -ENOMEM;
3886                         goto out_free_mem;
3887                 }
3888                 sglq_entry->sli4_lxritag = lxri;
3889                 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3890         }
3891         return 0;
3892 
3893 out_free_mem:
3894         lpfc_free_nvmet_sgl_list(phba);
3895         return rc;
3896 }
3897 
3898 int
3899 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3900 {
3901         LIST_HEAD(blist);
3902         struct lpfc_sli4_hdw_queue *qp;
3903         struct lpfc_io_buf *lpfc_cmd;
3904         struct lpfc_io_buf *iobufp, *prev_iobufp;
3905         int idx, cnt, xri, inserted;
3906 
3907         cnt = 0;
3908         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3909                 qp = &phba->sli4_hba.hdwq[idx];
3910                 spin_lock_irq(&qp->io_buf_list_get_lock);
3911                 spin_lock(&qp->io_buf_list_put_lock);
3912 
3913                 /* Take everything off the get and put lists */
3914                 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
3915                 list_splice(&qp->lpfc_io_buf_list_put, &blist);
3916                 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
3917                 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
3918                 cnt += qp->get_io_bufs + qp->put_io_bufs;
3919                 qp->get_io_bufs = 0;
3920                 qp->put_io_bufs = 0;
3921                 qp->total_io_bufs = 0;
3922                 spin_unlock(&qp->io_buf_list_put_lock);
3923                 spin_unlock_irq(&qp->io_buf_list_get_lock);
3924         }
3925 
3926         /*
3927          * Take IO buffers off blist and put on cbuf sorted by XRI.
3928          * This is because POST_SGL takes a sequential range of XRIs
3929          * to post to the firmware.
3930          */
3931         for (idx = 0; idx < cnt; idx++) {
3932                 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
3933                 if (!lpfc_cmd)
3934                         return cnt;
3935                 if (idx == 0) {
3936                         list_add_tail(&lpfc_cmd->list, cbuf);
3937                         continue;
3938                 }
3939                 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
3940                 inserted = 0;
3941                 prev_iobufp = NULL;
3942                 list_for_each_entry(iobufp, cbuf, list) {
3943                         if (xri < iobufp->cur_iocbq.sli4_xritag) {
3944                                 if (prev_iobufp)
3945                                         list_add(&lpfc_cmd->list,
3946                                                  &prev_iobufp->list);
3947                                 else
3948                                         list_add(&lpfc_cmd->list, cbuf);
3949                                 inserted = 1;
3950                                 break;
3951                         }
3952                         prev_iobufp = iobufp;
3953                 }
3954                 if (!inserted)
3955                         list_add_tail(&lpfc_cmd->list, cbuf);
3956         }
3957         return cnt;
3958 }
3959 
3960 int
3961 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
3962 {
3963         struct lpfc_sli4_hdw_queue *qp;
3964         struct lpfc_io_buf *lpfc_cmd;
3965         int idx, cnt;
3966 
3967         qp = phba->sli4_hba.hdwq;
3968         cnt = 0;
3969         while (!list_empty(cbuf)) {
3970                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3971                         list_remove_head(cbuf, lpfc_cmd,
3972                                          struct lpfc_io_buf, list);
3973                         if (!lpfc_cmd)
3974                                 return cnt;
3975                         cnt++;
3976                         qp = &phba->sli4_hba.hdwq[idx];
3977                         lpfc_cmd->hdwq_no = idx;
3978                         lpfc_cmd->hdwq = qp;
3979                         lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
3980                         lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
3981                         spin_lock(&qp->io_buf_list_put_lock);
3982                         list_add_tail(&lpfc_cmd->list,
3983                                       &qp->lpfc_io_buf_list_put);
3984                         qp->put_io_bufs++;
3985                         qp->total_io_bufs++;
3986                         spin_unlock(&qp->io_buf_list_put_lock);
3987                 }
3988         }
3989         return cnt;
3990 }
3991 
3992 /**
3993  * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
3994  * @phba: pointer to lpfc hba data structure.
3995  *
3996  * This routine first calculates the sizes of the current els and allocated
3997  * scsi sgl lists, and then goes through all sgls to updates the physical
3998  * XRIs assigned due to port function reset. During port initialization, the
3999  * current els and allocated scsi sgl lists are 0s.
4000  *
4001  * Return codes
4002  *   0 - successful (for now, it always returns 0)
4003  **/
4004 int
4005 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4006 {
4007         struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4008         uint16_t i, lxri, els_xri_cnt;
4009         uint16_t io_xri_cnt, io_xri_max;
4010         LIST_HEAD(io_sgl_list);
4011         int rc, cnt;
4012 
4013         /*
4014          * update on pci function's allocated nvme xri-sgl list
4015          */
4016 
4017         /* maximum number of xris available for nvme buffers */
4018         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4019         io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4020         phba->sli4_hba.io_xri_max = io_xri_max;
4021 
4022         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4023                         "6074 Current allocated XRI sgl count:%d, "
4024                         "maximum XRI count:%d\n",
4025                         phba->sli4_hba.io_xri_cnt,
4026                         phba->sli4_hba.io_xri_max);
4027 
4028         cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4029 
4030         if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4031                 /* max nvme xri shrunk below the allocated nvme buffers */
4032                 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4033                                         phba->sli4_hba.io_xri_max;
4034                 /* release the extra allocated nvme buffers */
4035                 for (i = 0; i < io_xri_cnt; i++) {
4036                         list_remove_head(&io_sgl_list, lpfc_ncmd,
4037                                          struct lpfc_io_buf, list);
4038                         if (lpfc_ncmd) {
4039                                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4040                                               lpfc_ncmd->data,
4041                                               lpfc_ncmd->dma_handle);
4042                                 kfree(lpfc_ncmd);
4043                         }
4044                 }
4045                 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4046         }
4047 
4048         /* update xris associated to remaining allocated nvme buffers */
4049         lpfc_ncmd = NULL;
4050         lpfc_ncmd_next = NULL;
4051         phba->sli4_hba.io_xri_cnt = cnt;
4052         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4053                                  &io_sgl_list, list) {
4054                 lxri = lpfc_sli4_next_xritag(phba);
4055                 if (lxri == NO_XRI) {
4056                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4057                                         "6075 Failed to allocate xri for "
4058                                         "nvme buffer\n");
4059                         rc = -ENOMEM;
4060                         goto out_free_mem;
4061                 }
4062                 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4063                 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4064         }
4065         cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4066         return 0;
4067 
4068 out_free_mem:
4069         lpfc_io_free(phba);
4070         return rc;
4071 }
4072 
4073 /**
4074  * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4075  * @vport: The virtual port for which this call being executed.
4076  * @num_to_allocate: The requested number of buffers to allocate.
4077  *
4078  * This routine allocates nvme buffers for device with SLI-4 interface spec,
4079  * the nvme buffer contains all the necessary information needed to initiate
4080  * an I/O. After allocating up to @num_to_allocate IO buffers and put
4081  * them on a list, it post them to the port by using SGL block post.
4082  *
4083  * Return codes:
4084  *   int - number of IO buffers that were allocated and posted.
4085  *   0 = failure, less than num_to_alloc is a partial failure.
4086  **/
4087 int
4088 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4089 {
4090         struct lpfc_io_buf *lpfc_ncmd;
4091         struct lpfc_iocbq *pwqeq;
4092         uint16_t iotag, lxri = 0;
4093         int bcnt, num_posted;
4094         LIST_HEAD(prep_nblist);
4095         LIST_HEAD(post_nblist);
4096         LIST_HEAD(nvme_nblist);
4097 
4098         phba->sli4_hba.io_xri_cnt = 0;
4099         for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4100                 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4101                 if (!lpfc_ncmd)
4102                         break;
4103                 /*
4104                  * Get memory from the pci pool to map the virt space to
4105                  * pci bus space for an I/O. The DMA buffer includes the
4106                  * number of SGE's necessary to support the sg_tablesize.
4107                  */
4108                 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4109                                                   GFP_KERNEL,
4110                                                   &lpfc_ncmd->dma_handle);
4111                 if (!lpfc_ncmd->data) {
4112                         kfree(lpfc_ncmd);
4113                         break;
4114                 }
4115 
4116                 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4117                         INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4118                 } else {
4119                         /*
4120                          * 4K Page alignment is CRITICAL to BlockGuard, double
4121                          * check to be sure.
4122                          */
4123                         if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4124                             (((unsigned long)(lpfc_ncmd->data) &
4125                             (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4126                                 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
4127                                                 "3369 Memory alignment err: "
4128                                                 "addr=%lx\n",
4129                                                 (unsigned long)lpfc_ncmd->data);
4130                                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4131                                               lpfc_ncmd->data,
4132                                               lpfc_ncmd->dma_handle);
4133                                 kfree(lpfc_ncmd);
4134                                 break;
4135                         }
4136                 }
4137 
4138                 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4139 
4140                 lxri = lpfc_sli4_next_xritag(phba);
4141                 if (lxri == NO_XRI) {
4142                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4143                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4144                         kfree(lpfc_ncmd);
4145                         break;
4146                 }
4147                 pwqeq = &lpfc_ncmd->cur_iocbq;
4148 
4149                 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4150                 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4151                 if (iotag == 0) {
4152                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4153                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4154                         kfree(lpfc_ncmd);
4155                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
4156                                         "6121 Failed to allocate IOTAG for"
4157                                         " XRI:0x%x\n", lxri);
4158                         lpfc_sli4_free_xri(phba, lxri);
4159                         break;
4160                 }
4161                 pwqeq->sli4_lxritag = lxri;
4162                 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4163                 pwqeq->context1 = lpfc_ncmd;
4164 
4165                 /* Initialize local short-hand pointers. */
4166                 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4167                 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4168                 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4169                 spin_lock_init(&lpfc_ncmd->buf_lock);
4170 
4171                 /* add the nvme buffer to a post list */
4172                 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4173                 phba->sli4_hba.io_xri_cnt++;
4174         }
4175         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4176                         "6114 Allocate %d out of %d requested new NVME "
4177                         "buffers\n", bcnt, num_to_alloc);
4178 
4179         /* post the list of nvme buffer sgls to port if available */
4180         if (!list_empty(&post_nblist))
4181                 num_posted = lpfc_sli4_post_io_sgl_list(
4182                                 phba, &post_nblist, bcnt);
4183         else
4184                 num_posted = 0;
4185 
4186         return num_posted;
4187 }
4188 
4189 static uint64_t
4190 lpfc_get_wwpn(struct lpfc_hba *phba)
4191 {
4192         uint64_t wwn;
4193         int rc;
4194         LPFC_MBOXQ_t *mboxq;
4195         MAILBOX_t *mb;
4196 
4197         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4198                                                 GFP_KERNEL);
4199         if (!mboxq)
4200                 return (uint64_t)-1;
4201 
4202         /* First get WWN of HBA instance */
4203         lpfc_read_nv(phba, mboxq);
4204         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4205         if (rc != MBX_SUCCESS) {
4206                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4207                                 "6019 Mailbox failed , mbxCmd x%x "
4208                                 "READ_NV, mbxStatus x%x\n",
4209                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4210                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4211                 mempool_free(mboxq, phba->mbox_mem_pool);
4212                 return (uint64_t) -1;
4213         }
4214         mb = &mboxq->u.mb;
4215         memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4216         /* wwn is WWPN of HBA instance */
4217         mempool_free(mboxq, phba->mbox_mem_pool);
4218         if (phba->sli_rev == LPFC_SLI_REV4)
4219                 return be64_to_cpu(wwn);
4220         else
4221                 return rol64(wwn, 32);
4222 }
4223 
4224 /**
4225  * lpfc_create_port - Create an FC port
4226  * @phba: pointer to lpfc hba data structure.
4227  * @instance: a unique integer ID to this FC port.
4228  * @dev: pointer to the device data structure.
4229  *
4230  * This routine creates a FC port for the upper layer protocol. The FC port
4231  * can be created on top of either a physical port or a virtual port provided
4232  * by the HBA. This routine also allocates a SCSI host data structure (shost)
4233  * and associates the FC port created before adding the shost into the SCSI
4234  * layer.
4235  *
4236  * Return codes
4237  *   @vport - pointer to the virtual N_Port data structure.
4238  *   NULL - port create failed.
4239  **/
4240 struct lpfc_vport *
4241 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4242 {
4243         struct lpfc_vport *vport;
4244         struct Scsi_Host  *shost = NULL;
4245         int error = 0;
4246         int i;
4247         uint64_t wwn;
4248         bool use_no_reset_hba = false;
4249         int rc;
4250 
4251         if (lpfc_no_hba_reset_cnt) {
4252                 if (phba->sli_rev < LPFC_SLI_REV4 &&
4253                     dev == &phba->pcidev->dev) {
4254                         /* Reset the port first */
4255                         lpfc_sli_brdrestart(phba);
4256                         rc = lpfc_sli_chipset_init(phba);
4257                         if (rc)
4258                                 return NULL;
4259                 }
4260                 wwn = lpfc_get_wwpn(phba);
4261         }
4262 
4263         for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4264                 if (wwn == lpfc_no_hba_reset[i]) {
4265                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4266                                         "6020 Setting use_no_reset port=%llx\n",
4267                                         wwn);
4268                         use_no_reset_hba = true;
4269                         break;
4270                 }
4271         }
4272 
4273         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4274                 if (dev != &phba->pcidev->dev) {
4275                         shost = scsi_host_alloc(&lpfc_vport_template,
4276                                                 sizeof(struct lpfc_vport));
4277                 } else {
4278                         if (!use_no_reset_hba)
4279                                 shost = scsi_host_alloc(&lpfc_template,
4280                                                 sizeof(struct lpfc_vport));
4281                         else
4282                                 shost = scsi_host_alloc(&lpfc_template_no_hr,
4283                                                 sizeof(struct lpfc_vport));
4284                 }
4285         } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
4286                 shost = scsi_host_alloc(&lpfc_template_nvme,
4287                                         sizeof(struct lpfc_vport));
4288         }
4289         if (!shost)
4290                 goto out;
4291 
4292         vport = (struct lpfc_vport *) shost->hostdata;
4293         vport->phba = phba;
4294         vport->load_flag |= FC_LOADING;
4295         vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4296         vport->fc_rscn_flush = 0;
4297         lpfc_get_vport_cfgparam(vport);
4298 
4299         /* Adjust value in vport */
4300         vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4301 
4302         shost->unique_id = instance;
4303         shost->max_id = LPFC_MAX_TARGET;
4304         shost->max_lun = vport->cfg_max_luns;
4305         shost->this_id = -1;
4306         shost->max_cmd_len = 16;
4307 
4308         if (phba->sli_rev == LPFC_SLI_REV4) {
4309                 if (!phba->cfg_fcp_mq_threshold ||
4310                     phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4311                         phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4312 
4313                 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4314                                             phba->cfg_fcp_mq_threshold);
4315 
4316                 shost->dma_boundary =
4317                         phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4318 
4319                 if (phba->cfg_xpsgl && !phba->nvmet_support)
4320                         shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4321                 else
4322                         shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4323         } else
4324                 /* SLI-3 has a limited number of hardware queues (3),
4325                  * thus there is only one for FCP processing.
4326                  */
4327                 shost->nr_hw_queues = 1;
4328 
4329         /*
4330          * Set initial can_queue value since 0 is no longer supported and
4331          * scsi_add_host will fail. This will be adjusted later based on the
4332          * max xri value determined in hba setup.
4333          */
4334         shost->can_queue = phba->cfg_hba_queue_depth - 10;
4335         if (dev != &phba->pcidev->dev) {
4336                 shost->transportt = lpfc_vport_transport_template;
4337                 vport->port_type = LPFC_NPIV_PORT;
4338         } else {
4339                 shost->transportt = lpfc_transport_template;
4340                 vport->port_type = LPFC_PHYSICAL_PORT;
4341         }
4342 
4343         /* Initialize all internally managed lists. */
4344         INIT_LIST_HEAD(&vport->fc_nodes);
4345         INIT_LIST_HEAD(&vport->rcv_buffer_list);
4346         spin_lock_init(&vport->work_port_lock);
4347 
4348         timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4349 
4350         timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4351 
4352         timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4353 
4354         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4355                 lpfc_setup_bg(phba, shost);
4356 
4357         error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4358         if (error)
4359                 goto out_put_shost;
4360 
4361         spin_lock_irq(&phba->port_list_lock);
4362         list_add_tail(&vport->listentry, &phba->port_list);
4363         spin_unlock_irq(&phba->port_list_lock);
4364         return vport;
4365 
4366 out_put_shost:
4367         scsi_host_put(shost);
4368 out:
4369         return NULL;
4370 }
4371 
4372 /**
4373  * destroy_port -  destroy an FC port
4374  * @vport: pointer to an lpfc virtual N_Port data structure.
4375  *
4376  * This routine destroys a FC port from the upper layer protocol. All the
4377  * resources associated with the port are released.
4378  **/
4379 void
4380 destroy_port(struct lpfc_vport *vport)
4381 {
4382         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4383         struct lpfc_hba  *phba = vport->phba;
4384 
4385         lpfc_debugfs_terminate(vport);
4386         fc_remove_host(shost);
4387         scsi_remove_host(shost);
4388 
4389         spin_lock_irq(&phba->port_list_lock);
4390         list_del_init(&vport->listentry);
4391         spin_unlock_irq(&phba->port_list_lock);
4392 
4393         lpfc_cleanup(vport);
4394         return;
4395 }
4396 
4397 /**
4398  * lpfc_get_instance - Get a unique integer ID
4399  *
4400  * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4401  * uses the kernel idr facility to perform the task.
4402  *
4403  * Return codes:
4404  *   instance - a unique integer ID allocated as the new instance.
4405  *   -1 - lpfc get instance failed.
4406  **/
4407 int
4408 lpfc_get_instance(void)
4409 {
4410         int ret;
4411 
4412         ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4413         return ret < 0 ? -1 : ret;
4414 }
4415 
4416 /**
4417  * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4418  * @shost: pointer to SCSI host data structure.
4419  * @time: elapsed time of the scan in jiffies.
4420  *
4421  * This routine is called by the SCSI layer with a SCSI host to determine
4422  * whether the scan host is finished.
4423  *
4424  * Note: there is no scan_start function as adapter initialization will have
4425  * asynchronously kicked off the link initialization.
4426  *
4427  * Return codes
4428  *   0 - SCSI host scan is not over yet.
4429  *   1 - SCSI host scan is over.
4430  **/
4431 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4432 {
4433         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4434         struct lpfc_hba   *phba = vport->phba;
4435         int stat = 0;
4436 
4437         spin_lock_irq(shost->host_lock);
4438 
4439         if (vport->load_flag & FC_UNLOADING) {
4440                 stat = 1;
4441                 goto finished;
4442         }
4443         if (time >= msecs_to_jiffies(30 * 1000)) {
4444                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4445                                 "0461 Scanning longer than 30 "
4446                                 "seconds.  Continuing initialization\n");
4447                 stat = 1;
4448                 goto finished;
4449         }
4450         if (time >= msecs_to_jiffies(15 * 1000) &&
4451             phba->link_state <= LPFC_LINK_DOWN) {
4452                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4453                                 "0465 Link down longer than 15 "
4454                                 "seconds.  Continuing initialization\n");
4455                 stat = 1;
4456                 goto finished;
4457         }
4458 
4459         if (vport->port_state != LPFC_VPORT_READY)
4460                 goto finished;
4461         if (vport->num_disc_nodes || vport->fc_prli_sent)
4462                 goto finished;
4463         if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4464                 goto finished;
4465         if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4466                 goto finished;
4467 
4468         stat = 1;
4469 
4470 finished:
4471         spin_unlock_irq(shost->host_lock);
4472         return stat;
4473 }
4474 
4475 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4476 {
4477         struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4478         struct lpfc_hba   *phba = vport->phba;
4479 
4480         fc_host_supported_speeds(shost) = 0;
4481         if (phba->lmt & LMT_128Gb)
4482                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4483         if (phba->lmt & LMT_64Gb)
4484                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4485         if (phba->lmt & LMT_32Gb)
4486                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4487         if (phba->lmt & LMT_16Gb)
4488                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4489         if (phba->lmt & LMT_10Gb)
4490                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4491         if (phba->lmt & LMT_8Gb)
4492                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4493         if (phba->lmt & LMT_4Gb)
4494                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4495         if (phba->lmt & LMT_2Gb)
4496                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4497         if (phba->lmt & LMT_1Gb)
4498                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4499 }
4500 
4501 /**
4502  * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4503  * @shost: pointer to SCSI host data structure.
4504  *
4505  * This routine initializes a given SCSI host attributes on a FC port. The
4506  * SCSI host can be either on top of a physical port or a virtual port.
4507  **/
4508 void lpfc_host_attrib_init(struct Scsi_Host *shost)
4509 {
4510         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4511         struct lpfc_hba   *phba = vport->phba;
4512         /*
4513          * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
4514          */
4515 
4516         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4517         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4518         fc_host_supported_classes(shost) = FC_COS_CLASS3;
4519 
4520         memset(fc_host_supported_fc4s(shost), 0,
4521                sizeof(fc_host_supported_fc4s(shost)));
4522         fc_host_supported_fc4s(shost)[2] = 1;
4523         fc_host_supported_fc4s(shost)[7] = 1;
4524 
4525         lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4526                                  sizeof fc_host_symbolic_name(shost));
4527 
4528         lpfc_host_supported_speeds_set(shost);
4529 
4530         fc_host_maxframe_size(shost) =
4531                 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4532                 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4533 
4534         fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4535 
4536         /* This value is also unchanging */
4537         memset(fc_host_active_fc4s(shost), 0,
4538                sizeof(fc_host_active_fc4s(shost)));
4539         fc_host_active_fc4s(shost)[2] = 1;
4540         fc_host_active_fc4s(shost)[7] = 1;
4541 
4542         fc_host_max_npiv_vports(shost) = phba->max_vpi;
4543         spin_lock_irq(shost->host_lock);
4544         vport->load_flag &= ~FC_LOADING;
4545         spin_unlock_irq(shost->host_lock);
4546 }
4547 
4548 /**
4549  * lpfc_stop_port_s3 - Stop SLI3 device port
4550  * @phba: pointer to lpfc hba data structure.
4551  *
4552  * This routine is invoked to stop an SLI3 device port, it stops the device
4553  * from generating interrupts and stops the device driver's timers for the
4554  * device.
4555  **/
4556 static void
4557 lpfc_stop_port_s3(struct lpfc_hba *phba)
4558 {
4559         /* Clear all interrupt enable conditions */
4560         writel(0, phba->HCregaddr);
4561         readl(phba->HCregaddr); /* flush */
4562         /* Clear all pending interrupts */
4563         writel(0xffffffff, phba->HAregaddr);
4564         readl(phba->HAregaddr); /* flush */
4565 
4566         /* Reset some HBA SLI setup states */
4567         lpfc_stop_hba_timers(phba);
4568         phba->pport->work_port_events = 0;
4569 }
4570 
4571 /**
4572  * lpfc_stop_port_s4 - Stop SLI4 device port
4573  * @phba: pointer to lpfc hba data structure.
4574  *
4575  * This routine is invoked to stop an SLI4 device port, it stops the device
4576  * from generating interrupts and stops the device driver's timers for the
4577  * device.
4578  **/
4579 static void
4580 lpfc_stop_port_s4(struct lpfc_hba *phba)
4581 {
4582         /* Reset some HBA SLI4 setup states */
4583         lpfc_stop_hba_timers(phba);
4584         if (phba->pport)
4585                 phba->pport->work_port_events = 0;
4586         phba->sli4_hba.intr_enable = 0;
4587 }
4588 
4589 /**
4590  * lpfc_stop_port - Wrapper function for stopping hba port
4591  * @phba: Pointer to HBA context object.
4592  *
4593  * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4594  * the API jump table function pointer from the lpfc_hba struct.
4595  **/
4596 void
4597 lpfc_stop_port(struct lpfc_hba *phba)
4598 {
4599         phba->lpfc_stop_port(phba);
4600 
4601         if (phba->wq)
4602                 flush_workqueue(phba->wq);
4603 }
4604 
4605 /**
4606  * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4607  * @phba: Pointer to hba for which this call is being executed.
4608  *
4609  * This routine starts the timer waiting for the FCF rediscovery to complete.
4610  **/
4611 void
4612 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4613 {
4614         unsigned long fcf_redisc_wait_tmo =
4615                 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4616         /* Start fcf rediscovery wait period timer */
4617         mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4618         spin_lock_irq(&phba->hbalock);
4619         /* Allow action to new fcf asynchronous event */
4620         phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4621         /* Mark the FCF rediscovery pending state */
4622         phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4623         spin_unlock_irq(&phba->hbalock);
4624 }
4625 
4626 /**
4627  * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4628  * @ptr: Map to lpfc_hba data structure pointer.
4629  *
4630  * This routine is invoked when waiting for FCF table rediscover has been
4631  * timed out. If new FCF record(s) has (have) been discovered during the
4632  * wait period, a new FCF event shall be added to the FCOE async event
4633  * list, and then worker thread shall be waked up for processing from the
4634  * worker thread context.
4635  **/
4636 static void
4637 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4638 {
4639         struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4640 
4641         /* Don't send FCF rediscovery event if timer cancelled */
4642         spin_lock_irq(&phba->hbalock);
4643         if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4644                 spin_unlock_irq(&phba->hbalock);
4645                 return;
4646         }
4647         /* Clear FCF rediscovery timer pending flag */
4648         phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4649         /* FCF rediscovery event to worker thread */
4650         phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4651         spin_unlock_irq(&phba->hbalock);
4652         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4653                         "2776 FCF rediscover quiescent timer expired\n");
4654         /* wake up worker thread */
4655         lpfc_worker_wake_up(phba);
4656 }
4657 
4658 /**
4659  * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
4660  * @phba: pointer to lpfc hba data structure.
4661  * @acqe_link: pointer to the async link completion queue entry.
4662  *
4663  * This routine is to parse the SLI4 link-attention link fault code.
4664  **/
4665 static void
4666 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4667                            struct lpfc_acqe_link *acqe_link)
4668 {
4669         switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4670         case LPFC_ASYNC_LINK_FAULT_NONE:
4671         case LPFC_ASYNC_LINK_FAULT_LOCAL:
4672         case LPFC_ASYNC_LINK_FAULT_REMOTE:
4673         case LPFC_ASYNC_LINK_FAULT_LR_LRR:
4674                 break;
4675         default:
4676                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4677                                 "0398 Unknown link fault code: x%x\n",
4678                                 bf_get(lpfc_acqe_link_fault, acqe_link));
4679                 break;
4680         }
4681 }
4682 
4683 /**
4684  * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
4685  * @phba: pointer to lpfc hba data structure.
4686  * @acqe_link: pointer to the async link completion queue entry.
4687  *
4688  * This routine is to parse the SLI4 link attention type and translate it
4689  * into the base driver's link attention type coding.
4690  *
4691  * Return: Link attention type in terms of base driver's coding.
4692  **/
4693 static uint8_t
4694 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4695                           struct lpfc_acqe_link *acqe_link)
4696 {
4697         uint8_t att_type;
4698 
4699         switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4700         case LPFC_ASYNC_LINK_STATUS_DOWN:
4701         case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
4702                 att_type = LPFC_ATT_LINK_DOWN;
4703                 break;
4704         case LPFC_ASYNC_LINK_STATUS_UP:
4705                 /* Ignore physical link up events - wait for logical link up */
4706                 att_type = LPFC_ATT_RESERVED;
4707                 break;
4708         case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
4709                 att_type = LPFC_ATT_LINK_UP;
4710                 break;
4711         default:
4712                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4713                                 "0399 Invalid link attention type: x%x\n",
4714                                 bf_get(lpfc_acqe_link_status, acqe_link));
4715                 att_type = LPFC_ATT_RESERVED;
4716                 break;
4717         }
4718         return att_type;
4719 }
4720 
4721 /**
4722  * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
4723  * @phba: pointer to lpfc hba data structure.
4724  *
4725  * This routine is to get an SLI3 FC port's link speed in Mbps.
4726  *
4727  * Return: link speed in terms of Mbps.
4728  **/
4729 uint32_t
4730 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4731 {
4732         uint32_t link_speed;
4733 
4734         if (!lpfc_is_link_up(phba))
4735                 return 0;
4736 
4737         if (phba->sli_rev <= LPFC_SLI_REV3) {
4738                 switch (phba->fc_linkspeed) {
4739                 case LPFC_LINK_SPEED_1GHZ:
4740                         link_speed = 1000;
4741                         break;
4742                 case LPFC_LINK_SPEED_2GHZ:
4743                         link_speed = 2000;
4744                         break;
4745                 case LPFC_LINK_SPEED_4GHZ:
4746                         link_speed = 4000;
4747                         break;
4748                 case LPFC_LINK_SPEED_8GHZ:
4749                         link_speed = 8000;
4750                         break;
4751                 case LPFC_LINK_SPEED_10GHZ:
4752                         link_speed = 10000;
4753                         break;
4754                 case LPFC_LINK_SPEED_16GHZ:
4755                         link_speed = 16000;
4756                         break;
4757                 default:
4758                         link_speed = 0;
4759                 }
4760         } else {
4761                 if (phba->sli4_hba.link_state.logical_speed)
4762                         link_speed =
4763                               phba->sli4_hba.link_state.logical_speed;
4764                 else
4765                         link_speed = phba->sli4_hba.link_state.speed;
4766         }
4767         return link_speed;
4768 }
4769 
4770 /**
4771  * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
4772  * @phba: pointer to lpfc hba data structure.
4773  * @evt_code: asynchronous event code.
4774  * @speed_code: asynchronous event link speed code.
4775  *
4776  * This routine is to parse the giving SLI4 async event link speed code into
4777  * value of Mbps for the link speed.
4778  *
4779  * Return: link speed in terms of Mbps.
4780  **/
4781 static uint32_t
4782 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4783                            uint8_t speed_code)
4784 {
4785         uint32_t port_speed;
4786 
4787         switch (evt_code) {
4788         case LPFC_TRAILER_CODE_LINK:
4789                 switch (speed_code) {
4790                 case LPFC_ASYNC_LINK_SPEED_ZERO:
4791                         port_speed = 0;
4792                         break;
4793                 case LPFC_ASYNC_LINK_SPEED_10MBPS:
4794                         port_speed = 10;
4795                         break;
4796                 case LPFC_ASYNC_LINK_SPEED_100MBPS:
4797                         port_speed = 100;
4798                         break;
4799                 case LPFC_ASYNC_LINK_SPEED_1GBPS:
4800                         port_speed = 1000;
4801                         break;
4802                 case LPFC_ASYNC_LINK_SPEED_10GBPS:
4803                         port_speed = 10000;
4804                         break;
4805                 case LPFC_ASYNC_LINK_SPEED_20GBPS:
4806                         port_speed = 20000;
4807                         break;
4808                 case LPFC_ASYNC_LINK_SPEED_25GBPS:
4809                         port_speed = 25000;
4810                         break;
4811                 case LPFC_ASYNC_LINK_SPEED_40GBPS:
4812                         port_speed = 40000;
4813                         break;
4814                 default:
4815                         port_speed = 0;
4816                 }
4817                 break;
4818         case LPFC_TRAILER_CODE_FC:
4819                 switch (speed_code) {
4820                 case LPFC_FC_LA_SPEED_UNKNOWN:
4821                         port_speed = 0;
4822                         break;
4823                 case LPFC_FC_LA_SPEED_1G:
4824                         port_speed = 1000;
4825                         break;
4826                 case LPFC_FC_LA_SPEED_2G:
4827                         port_speed = 2000;
4828                         break;
4829                 case LPFC_FC_LA_SPEED_4G:
4830                         port_speed = 4000;
4831                         break;
4832                 case LPFC_FC_LA_SPEED_8G:
4833                         port_speed = 8000;
4834                         break;
4835                 case LPFC_FC_LA_SPEED_10G:
4836                         port_speed = 10000;
4837                         break;
4838                 case LPFC_FC_LA_SPEED_16G:
4839                         port_speed = 16000;
4840                         break;
4841                 case LPFC_FC_LA_SPEED_32G:
4842                         port_speed = 32000;
4843                         break;
4844                 case LPFC_FC_LA_SPEED_64G:
4845                         port_speed = 64000;
4846                         break;
4847                 case LPFC_FC_LA_SPEED_128G:
4848                         port_speed = 128000;
4849                         break;
4850                 default:
4851                         port_speed = 0;
4852                 }
4853                 break;
4854         default:
4855                 port_speed = 0;
4856         }
4857         return port_speed;
4858 }
4859 
4860 /**
4861  * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
4862  * @phba: pointer to lpfc hba data structure.
4863  * @acqe_link: pointer to the async link completion queue entry.
4864  *
4865  * This routine is to handle the SLI4 asynchronous FCoE link event.
4866  **/
4867 static void
4868 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
4869                          struct lpfc_acqe_link *acqe_link)
4870 {
4871         struct lpfc_dmabuf *mp;
4872         LPFC_MBOXQ_t *pmb;
4873         MAILBOX_t *mb;
4874         struct lpfc_mbx_read_top *la;
4875         uint8_t att_type;
4876         int rc;
4877 
4878         att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
4879         if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
4880                 return;
4881         phba->fcoe_eventtag = acqe_link->event_tag;
4882         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4883         if (!pmb) {
4884                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4885                                 "0395 The mboxq allocation failed\n");
4886                 return;
4887         }
4888         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4889         if (!mp) {
4890                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4891                                 "0396 The lpfc_dmabuf allocation failed\n");
4892                 goto out_free_pmb;
4893         }
4894         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4895         if (!mp->virt) {
4896                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4897                                 "0397 The mbuf allocation failed\n");
4898                 goto out_free_dmabuf;
4899         }
4900 
4901         /* Cleanup any outstanding ELS commands */
4902         lpfc_els_flush_all_cmd(phba);
4903 
4904         /* Block ELS IOCBs until we have done process link event */
4905         phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
4906 
4907         /* Update link event statistics */
4908         phba->sli.slistat.link_event++;
4909 
4910         /* Create lpfc_handle_latt mailbox command from link ACQE */
4911         lpfc_read_topology(phba, pmb, mp);
4912         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4913         pmb->vport = phba->pport;
4914 
4915         /* Keep the link status for extra SLI4 state machine reference */
4916         phba->sli4_hba.link_state.speed =
4917                         lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
4918                                 bf_get(lpfc_acqe_link_speed, acqe_link));
4919         phba->sli4_hba.link_state.duplex =
4920                                 bf_get(lpfc_acqe_link_duplex, acqe_link);
4921         phba->sli4_hba.link_state.status =
4922                                 bf_get(lpfc_acqe_link_status, acqe_link);
4923         phba->sli4_hba.link_state.type =
4924                                 bf_get(lpfc_acqe_link_type, acqe_link);
4925         phba->sli4_hba.link_state.number =
4926                                 bf_get(lpfc_acqe_link_number, acqe_link);
4927         phba->sli4_hba.link_state.fault =
4928                                 bf_get(lpfc_acqe_link_fault, acqe_link);
4929         phba->sli4_hba.link_state.logical_speed =
4930                         bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
4931 
4932         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4933                         "2900 Async FC/FCoE Link event - Speed:%dGBit "
4934                         "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
4935                         "Logical speed:%dMbps Fault:%d\n",
4936                         phba->sli4_hba.link_state.speed,
4937                         phba->sli4_hba.link_state.topology,
4938                         phba->sli4_hba.link_state.status,
4939                         phba->sli4_hba.link_state.type,
4940                         phba->sli4_hba.link_state.number,
4941                         phba->sli4_hba.link_state.logical_speed,
4942                         phba->sli4_hba.link_state.fault);
4943         /*
4944          * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
4945          * topology info. Note: Optional for non FC-AL ports.
4946          */
4947         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
4948                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4949                 if (rc == MBX_NOT_FINISHED)
4950                         goto out_free_dmabuf;
4951                 return;
4952         }
4953         /*
4954          * For FCoE Mode: fill in all the topology information we need and call
4955          * the READ_TOPOLOGY completion routine to continue without actually
4956          * sending the READ_TOPOLOGY mailbox command to the port.
4957          */
4958         /* Initialize completion status */
4959         mb = &pmb->u.mb;
4960         mb->mbxStatus = MBX_SUCCESS;
4961 
4962         /* Parse port fault information field */
4963         lpfc_sli4_parse_latt_fault(phba, acqe_link);
4964 
4965         /* Parse and translate link attention fields */
4966         la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
4967         la->eventTag = acqe_link->event_tag;
4968         bf_set(lpfc_mbx_read_top_att_type, la, att_type);
4969         bf_set(lpfc_mbx_read_top_link_spd, la,
4970                (bf_get(lpfc_acqe_link_speed, acqe_link)));
4971 
4972         /* Fake the the following irrelvant fields */
4973         bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
4974         bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
4975         bf_set(lpfc_mbx_read_top_il, la, 0);
4976         bf_set(lpfc_mbx_read_top_pb, la, 0);
4977         bf_set(lpfc_mbx_read_top_fa, la, 0);
4978         bf_set(lpfc_mbx_read_top_mm, la, 0);
4979 
4980         /* Invoke the lpfc_handle_latt mailbox command callback function */
4981         lpfc_mbx_cmpl_read_topology(phba, pmb);
4982 
4983         return;
4984 
4985 out_free_dmabuf:
4986         kfree(mp);
4987 out_free_pmb:
4988         mempool_free(pmb, phba->mbox_mem_pool);
4989 }
4990 
4991 /**
4992  * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
4993  * topology.
4994  * @phba: pointer to lpfc hba data structure.
4995  * @evt_code: asynchronous event code.
4996  * @speed_code: asynchronous event link speed code.
4997  *
4998  * This routine is to parse the giving SLI4 async event link speed code into
4999  * value of Read topology link speed.
5000  *
5001  * Return: link speed in terms of Read topology.
5002  **/
5003 static uint8_t
5004 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5005 {
5006         uint8_t port_speed;
5007 
5008         switch (speed_code) {
5009         case LPFC_FC_LA_SPEED_1G:
5010                 port_speed = LPFC_LINK_SPEED_1GHZ;
5011                 break;
5012         case LPFC_FC_LA_SPEED_2G:
5013                 port_speed = LPFC_LINK_SPEED_2GHZ;
5014                 break;
5015         case LPFC_FC_LA_SPEED_4G:
5016                 port_speed = LPFC_LINK_SPEED_4GHZ;
5017                 break;
5018         case LPFC_FC_LA_SPEED_8G:
5019                 port_speed = LPFC_LINK_SPEED_8GHZ;
5020                 break;
5021         case LPFC_FC_LA_SPEED_16G:
5022                 port_speed = LPFC_LINK_SPEED_16GHZ;
5023                 break;
5024         case LPFC_FC_LA_SPEED_32G:
5025                 port_speed = LPFC_LINK_SPEED_32GHZ;
5026                 break;
5027         case LPFC_FC_LA_SPEED_64G:
5028                 port_speed = LPFC_LINK_SPEED_64GHZ;
5029                 break;
5030         case LPFC_FC_LA_SPEED_128G:
5031                 port_speed = LPFC_LINK_SPEED_128GHZ;
5032                 break;
5033         case LPFC_FC_LA_SPEED_256G:
5034                 port_speed = LPFC_LINK_SPEED_256GHZ;
5035                 break;
5036         default:
5037                 port_speed = 0;
5038                 break;
5039         }
5040 
5041         return port_speed;
5042 }
5043 
5044 #define trunk_link_status(__idx)\
5045         bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5046                ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5047                 "Link up" : "Link down") : "NA"
5048 /* Did port __idx reported an error */
5049 #define trunk_port_fault(__idx)\
5050         bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5051                (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5052 
5053 static void
5054 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5055                               struct lpfc_acqe_fc_la *acqe_fc)
5056 {
5057         uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5058         uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5059 
5060         phba->sli4_hba.link_state.speed =
5061                 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5062                                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5063 
5064         phba->sli4_hba.link_state.logical_speed =
5065                                 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5066         /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
5067         phba->fc_linkspeed =
5068                  lpfc_async_link_speed_to_read_top(
5069                                 phba,
5070                                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5071 
5072         if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5073                 phba->trunk_link.link0.state =
5074                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5075                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5076                 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
5077         }
5078         if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5079                 phba->trunk_link.link1.state =
5080                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5081                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5082                 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
5083         }
5084         if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5085                 phba->trunk_link.link2.state =
5086                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5087                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5088                 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
5089         }
5090         if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5091                 phba->trunk_link.link3.state =
5092                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5093                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5094                 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
5095         }
5096 
5097         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5098                         "2910 Async FC Trunking Event - Speed:%d\n"
5099                         "\tLogical speed:%d "
5100                         "port0: %s port1: %s port2: %s port3: %s\n",
5101                         phba->sli4_hba.link_state.speed,
5102                         phba->sli4_hba.link_state.logical_speed,
5103                         trunk_link_status(0), trunk_link_status(1),
5104                         trunk_link_status(2), trunk_link_status(3));
5105 
5106         if (port_fault)
5107                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5108                                 "3202 trunk error:0x%x (%s) seen on port0:%s "
5109                                 /*
5110                                  * SLI-4: We have only 0xA error codes
5111                                  * defined as of now. print an appropriate
5112                                  * message in case driver needs to be updated.
5113                                  */
5114                                 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5115                                 "UNDEFINED. update driver." : trunk_errmsg[err],
5116                                 trunk_port_fault(0), trunk_port_fault(1),
5117                                 trunk_port_fault(2), trunk_port_fault(3));
5118 }
5119 
5120 
5121 /**
5122  * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
5123  * @phba: pointer to lpfc hba data structure.
5124  * @acqe_fc: pointer to the async fc completion queue entry.
5125  *
5126  * This routine is to handle the SLI4 asynchronous FC event. It will simply log
5127  * that the event was received and then issue a read_topology mailbox command so
5128  * that the rest of the driver will treat it the same as SLI3.
5129  **/
5130 static void
5131 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
5132 {
5133         struct lpfc_dmabuf *mp;
5134         LPFC_MBOXQ_t *pmb;
5135         MAILBOX_t *mb;
5136         struct lpfc_mbx_read_top *la;
5137         int rc;
5138 
5139         if (bf_get(lpfc_trailer_type, acqe_fc) !=
5140             LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
5141                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5142                                 "2895 Non FC link Event detected.(%d)\n",
5143                                 bf_get(lpfc_trailer_type, acqe_fc));
5144                 return;
5145         }
5146 
5147         if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5148             LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5149                 lpfc_update_trunk_link_status(phba, acqe_fc);
5150                 return;
5151         }
5152 
5153         /* Keep the link status for extra SLI4 state machine reference */
5154         phba->sli4_hba.link_state.speed =
5155                         lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5156                                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5157         phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
5158         phba->sli4_hba.link_state.topology =
5159                                 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
5160         phba->sli4_hba.link_state.status =
5161                                 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
5162         phba->sli4_hba.link_state.type =
5163                                 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
5164         phba->sli4_hba.link_state.number =
5165                                 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
5166         phba->sli4_hba.link_state.fault =
5167                                 bf_get(lpfc_acqe_link_fault, acqe_fc);
5168 
5169         if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5170             LPFC_FC_LA_TYPE_LINK_DOWN)
5171                 phba->sli4_hba.link_state.logical_speed = 0;
5172         else if (!phba->sli4_hba.conf_trunk)
5173                 phba->sli4_hba.link_state.logical_speed =
5174                                 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5175 
5176         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5177                         "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5178                         "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5179                         "%dMbps Fault:%d\n",
5180                         phba->sli4_hba.link_state.speed,
5181                         phba->sli4_hba.link_state.topology,
5182                         phba->sli4_hba.link_state.status,
5183                         phba->sli4_hba.link_state.type,
5184                         phba->sli4_hba.link_state.number,
5185                         phba->sli4_hba.link_state.logical_speed,
5186                         phba->sli4_hba.link_state.fault);
5187         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5188         if (!pmb) {
5189                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5190                                 "2897 The mboxq allocation failed\n");
5191                 return;
5192         }
5193         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5194         if (!mp) {
5195                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5196                                 "2898 The lpfc_dmabuf allocation failed\n");
5197                 goto out_free_pmb;
5198         }
5199         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5200         if (!mp->virt) {
5201                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5202                                 "2899 The mbuf allocation failed\n");
5203                 goto out_free_dmabuf;
5204         }
5205 
5206         /* Cleanup any outstanding ELS commands */
5207         lpfc_els_flush_all_cmd(phba);
5208 
5209         /* Block ELS IOCBs until we have done process link event */
5210         phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5211 
5212         /* Update link event statistics */
5213         phba->sli.slistat.link_event++;
5214 
5215         /* Create lpfc_handle_latt mailbox command from link ACQE */
5216         lpfc_read_topology(phba, pmb, mp);
5217         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5218         pmb->vport = phba->pport;
5219 
5220         if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
5221                 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
5222 
5223                 switch (phba->sli4_hba.link_state.status) {
5224                 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
5225                         phba->link_flag |= LS_MDS_LINK_DOWN;
5226                         break;
5227                 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
5228                         phba->link_flag |= LS_MDS_LOOPBACK;
5229                         break;
5230                 default:
5231                         break;
5232                 }
5233 
5234                 /* Initialize completion status */
5235                 mb = &pmb->u.mb;
5236                 mb->mbxStatus = MBX_SUCCESS;
5237 
5238                 /* Parse port fault information field */
5239                 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
5240 
5241                 /* Parse and translate link attention fields */
5242                 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
5243                 la->eventTag = acqe_fc->event_tag;
5244 
5245                 if (phba->sli4_hba.link_state.status ==
5246                     LPFC_FC_LA_TYPE_UNEXP_WWPN) {
5247                         bf_set(lpfc_mbx_read_top_att_type, la,
5248                                LPFC_FC_LA_TYPE_UNEXP_WWPN);
5249                 } else {
5250                         bf_set(lpfc_mbx_read_top_att_type, la,
5251                                LPFC_FC_LA_TYPE_LINK_DOWN);
5252                 }
5253                 /* Invoke the mailbox command callback function */
5254                 lpfc_mbx_cmpl_read_topology(phba, pmb);
5255 
5256                 return;
5257         }
5258 
5259         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5260         if (rc == MBX_NOT_FINISHED)
5261                 goto out_free_dmabuf;
5262         return;
5263 
5264 out_free_dmabuf:
5265         kfree(mp);
5266 out_free_pmb:
5267         mempool_free(pmb, phba->mbox_mem_pool);
5268 }
5269 
5270 /**
5271  * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
5272  * @phba: pointer to lpfc hba data structure.
5273  * @acqe_fc: pointer to the async SLI completion queue entry.
5274  *
5275  * This routine is to handle the SLI4 asynchronous SLI events.
5276  **/
5277 static void
5278 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
5279 {
5280         char port_name;
5281         char message[128];
5282         uint8_t status;
5283         uint8_t evt_type;
5284         uint8_t operational = 0;
5285         struct temp_event temp_event_data;
5286         struct lpfc_acqe_misconfigured_event *misconfigured;
5287         struct Scsi_Host  *shost;
5288         struct lpfc_vport **vports;
5289         int rc, i;
5290 
5291         evt_type = bf_get(lpfc_trailer_type, acqe_sli);
5292 
5293         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5294                         "2901 Async SLI event - Event Data1:x%08x Event Data2:"
5295                         "x%08x SLI Event Type:%d\n",
5296                         acqe_sli->event_data1, acqe_sli->event_data2,
5297                         evt_type);
5298 
5299         port_name = phba->Port[0];
5300         if (port_name == 0x00)
5301                 port_name = '?'; /* get port name is empty */
5302 
5303         switch (evt_type) {
5304         case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
5305                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5306                 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
5307                 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5308 
5309                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5310                                 "3190 Over Temperature:%d Celsius- Port Name %c\n",
5311                                 acqe_sli->event_data1, port_name);
5312 
5313                 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
5314                 shost = lpfc_shost_from_vport(phba->pport);
5315                 fc_host_post_vendor_event(shost, fc_get_event_number(),
5316                                           sizeof(temp_event_data),
5317                                           (char *)&temp_event_data,
5318                                           SCSI_NL_VID_TYPE_PCI
5319                                           | PCI_VENDOR_ID_EMULEX);
5320                 break;
5321         case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
5322                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5323                 temp_event_data.event_code = LPFC_NORMAL_TEMP;
5324                 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5325 
5326                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5327                                 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5328                                 acqe_sli->event_data1, port_name);
5329 
5330                 shost = lpfc_shost_from_vport(phba->pport);
5331                 fc_host_post_vendor_event(shost, fc_get_event_number(),
5332                                           sizeof(temp_event_data),
5333                                           (char *)&temp_event_data,
5334                                           SCSI_NL_VID_TYPE_PCI
5335                                           | PCI_VENDOR_ID_EMULEX);
5336                 break;
5337         case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
5338                 misconfigured = (struct lpfc_acqe_misconfigured_event *)
5339                                         &acqe_sli->event_data1;
5340 
5341                 /* fetch the status for this port */
5342                 switch (phba->sli4_hba.lnk_info.lnk_no) {
5343                 case LPFC_LINK_NUMBER_0:
5344                         status = bf_get(lpfc_sli_misconfigured_port0_state,
5345                                         &misconfigured->theEvent);
5346                         operational = bf_get(lpfc_sli_misconfigured_port0_op,
5347                                         &misconfigured->theEvent);
5348                         break;
5349                 case LPFC_LINK_NUMBER_1:
5350                         status = bf_get(lpfc_sli_misconfigured_port1_state,
5351                                         &misconfigured->theEvent);
5352                         operational = bf_get(lpfc_sli_misconfigured_port1_op,
5353                                         &misconfigured->theEvent);
5354                         break;
5355                 case LPFC_LINK_NUMBER_2:
5356                         status = bf_get(lpfc_sli_misconfigured_port2_state,
5357                                         &misconfigured->theEvent);
5358                         operational = bf_get(lpfc_sli_misconfigured_port2_op,
5359                                         &misconfigured->theEvent);
5360                         break;
5361                 case LPFC_LINK_NUMBER_3:
5362                         status = bf_get(lpfc_sli_misconfigured_port3_state,
5363                                         &misconfigured->theEvent);
5364                         operational = bf_get(lpfc_sli_misconfigured_port3_op,
5365                                         &misconfigured->theEvent);
5366                         break;
5367                 default:
5368                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5369                                         "3296 "
5370                                         "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5371                                         "event: Invalid link %d",
5372                                         phba->sli4_hba.lnk_info.lnk_no);
5373                         return;
5374                 }
5375 
5376                 /* Skip if optic state unchanged */
5377                 if (phba->sli4_hba.lnk_info.optic_state == status)
5378                         return;
5379 
5380                 switch (status) {
5381                 case LPFC_SLI_EVENT_STATUS_VALID:
5382                         sprintf(message, "Physical Link is functional");
5383                         break;
5384                 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
5385                         sprintf(message, "Optics faulted/incorrectly "
5386                                 "installed/not installed - Reseat optics, "
5387                                 "if issue not resolved, replace.");
5388                         break;
5389                 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
5390                         sprintf(message,
5391                                 "Optics of two types installed - Remove one "
5392                                 "optic or install matching pair of optics.");
5393                         break;
5394                 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
5395                         sprintf(message, "Incompatible optics - Replace with "
5396                                 "compatible optics for card to function.");
5397                         break;
5398                 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
5399                         sprintf(message, "Unqualified optics - Replace with "
5400                                 "Avago optics for Warranty and Technical "
5401                                 "Support - Link is%s operational",
5402                                 (operational) ? " not" : "");
5403                         break;
5404                 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
5405                         sprintf(message, "Uncertified optics - Replace with "
5406                                 "Avago-certified optics to enable link "
5407                                 "operation - Link is%s operational",
5408                                 (operational) ? " not" : "");
5409                         break;
5410                 default:
5411                         /* firmware is reporting a status we don't know about */
5412                         sprintf(message, "Unknown event status x%02x", status);
5413                         break;
5414                 }
5415 
5416                 /* Issue READ_CONFIG mbox command to refresh supported speeds */
5417                 rc = lpfc_sli4_read_config(phba);
5418                 if (rc) {
5419                         phba->lmt = 0;
5420                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5421                                         "3194 Unable to retrieve supported "
5422                                         "speeds, rc = 0x%x\n", rc);
5423                 }
5424                 vports = lpfc_create_vport_work_array(phba);
5425                 if (vports != NULL) {
5426                         for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5427                                         i++) {
5428                                 shost = lpfc_shost_from_vport(vports[i]);
5429                                 lpfc_host_supported_speeds_set(shost);
5430                         }
5431                 }
5432                 lpfc_destroy_vport_work_array(phba, vports);
5433 
5434                 phba->sli4_hba.lnk_info.optic_state = status;
5435                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5436                                 "3176 Port Name %c %s\n", port_name, message);
5437                 break;
5438         case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
5439                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5440                                 "3192 Remote DPort Test Initiated - "
5441                                 "Event Data1:x%08x Event Data2: x%08x\n",
5442                                 acqe_sli->event_data1, acqe_sli->event_data2);
5443                 break;
5444         default:
5445                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5446                                 "3193 Async SLI event - Event Data1:x%08x Event Data2:"
5447                                 "x%08x SLI Event Type:%d\n",
5448                                 acqe_sli->event_data1, acqe_sli->event_data2,
5449                                 evt_type);
5450                 break;
5451         }
5452 }
5453 
5454 /**
5455  * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
5456  * @vport: pointer to vport data structure.
5457  *
5458  * This routine is to perform Clear Virtual Link (CVL) on a vport in
5459  * response to a CVL event.
5460  *
5461  * Return the pointer to the ndlp with the vport if successful, otherwise
5462  * return NULL.
5463  **/
5464 static struct lpfc_nodelist *
5465 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
5466 {
5467         struct lpfc_nodelist *ndlp;
5468         struct Scsi_Host *shost;
5469         struct lpfc_hba *phba;
5470 
5471         if (!vport)
5472                 return NULL;
5473         phba = vport->phba;
5474         if (!phba)
5475                 return NULL;
5476         ndlp = lpfc_findnode_did(vport, Fabric_DID);
5477         if (!ndlp) {
5478                 /* Cannot find existing Fabric ndlp, so allocate a new one */
5479                 ndlp = lpfc_nlp_init(vport, Fabric_DID);
5480                 if (!ndlp)
5481                         return 0;
5482                 /* Set the node type */
5483                 ndlp->nlp_type |= NLP_FABRIC;
5484                 /* Put ndlp onto node list */
5485                 lpfc_enqueue_node(vport, ndlp);
5486         } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5487                 /* re-setup ndlp without removing from node list */
5488                 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
5489                 if (!ndlp)
5490                         return 0;
5491         }
5492         if ((phba->pport->port_state < LPFC_FLOGI) &&
5493                 (phba->pport->port_state != LPFC_VPORT_FAILED))
5494                 return NULL;
5495         /* If virtual link is not yet instantiated ignore CVL */
5496         if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
5497                 && (vport->port_state != LPFC_VPORT_FAILED))
5498                 return NULL;
5499         shost = lpfc_shost_from_vport(vport);
5500         if (!shost)
5501                 return NULL;
5502         lpfc_linkdown_port(vport);
5503         lpfc_cleanup_pending_mbox(vport);
5504         spin_lock_irq(shost->host_lock);
5505         vport->fc_flag |= FC_VPORT_CVL_RCVD;
5506         spin_unlock_irq(shost->host_lock);
5507 
5508         return ndlp;
5509 }
5510 
5511 /**
5512  * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
5513  * @vport: pointer to lpfc hba data structure.
5514  *
5515  * This routine is to perform Clear Virtual Link (CVL) on all vports in
5516  * response to a FCF dead event.
5517  **/
5518 static void
5519 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
5520 {
5521         struct lpfc_vport **vports;
5522         int i;
5523 
5524         vports = lpfc_create_vport_work_array(phba);
5525         if (vports)
5526                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
5527                         lpfc_sli4_perform_vport_cvl(vports[i]);
5528         lpfc_destroy_vport_work_array(phba, vports);
5529 }
5530 
5531 /**
5532  * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
5533  * @phba: pointer to lpfc hba data structure.
5534  * @acqe_link: pointer to the async fcoe completion queue entry.
5535  *
5536  * This routine is to handle the SLI4 asynchronous fcoe event.
5537  **/
5538 static void
5539 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
5540                         struct lpfc_acqe_fip *acqe_fip)
5541 {
5542         uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
5543         int rc;
5544         struct lpfc_vport *vport;
5545         struct lpfc_nodelist *ndlp;
5546         struct Scsi_Host  *shost;
5547         int active_vlink_present;
5548         struct lpfc_vport **vports;
5549         int i;
5550 
5551         phba->fc_eventTag = acqe_fip->event_tag;
5552         phba->fcoe_eventtag = acqe_fip->event_tag;
5553         switch (event_type) {
5554         case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5555         case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5556                 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
5557                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5558                                         LOG_DISCOVERY,
5559                                         "2546 New FCF event, evt_tag:x%x, "
5560                                         "index:x%x\n",
5561                                         acqe_fip->event_tag,
5562                                         acqe_fip->index);
5563                 else
5564                         lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5565                                         LOG_DISCOVERY,
5566                                         "2788 FCF param modified event, "
5567                                         "evt_tag:x%x, index:x%x\n",
5568                                         acqe_fip->event_tag,
5569                                         acqe_fip->index);
5570                 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5571                         /*
5572                          * During period of FCF discovery, read the FCF
5573                          * table record indexed by the event to update
5574                          * FCF roundrobin failover eligible FCF bmask.
5575                          */
5576                         lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5577                                         LOG_DISCOVERY,
5578                                         "2779 Read FCF (x%x) for updating "
5579                                         "roundrobin FCF failover bmask\n",
5580                                         acqe_fip->index);
5581                         rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
5582                 }
5583 
5584                 /* If the FCF discovery is in progress, do nothing. */
5585                 spin_lock_irq(&phba->hbalock);
5586                 if (phba->hba_flag & FCF_TS_INPROG) {
5587                         spin_unlock_irq(&phba->hbalock);
5588                         break;
5589                 }
5590                 /* If fast FCF failover rescan event is pending, do nothing */
5591                 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
5592                         spin_unlock_irq(&phba->hbalock);
5593                         break;
5594                 }
5595 
5596                 /* If the FCF has been in discovered state, do nothing. */
5597                 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
5598                         spin_unlock_irq(&phba->hbalock);
5599                         break;
5600                 }
5601                 spin_unlock_irq(&phba->hbalock);
5602 
5603                 /* Otherwise, scan the entire FCF table and re-discover SAN */
5604                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5605                                 "2770 Start FCF table scan per async FCF "
5606                                 "event, evt_tag:x%x, index:x%x\n",
5607                                 acqe_fip->event_tag, acqe_fip->index);
5608                 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5609                                                      LPFC_FCOE_FCF_GET_FIRST);
5610                 if (rc)
5611                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5612                                         "2547 Issue FCF scan read FCF mailbox "
5613                                         "command failed (x%x)\n", rc);
5614                 break;
5615 
5616         case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
5617                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5618                         "2548 FCF Table full count 0x%x tag 0x%x\n",
5619                         bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5620                         acqe_fip->event_tag);
5621                 break;
5622 
5623         case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
5624                 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5625                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5626                         "2549 FCF (x%x) disconnected from network, "
5627                         "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
5628                 /*
5629                  * If we are in the middle of FCF failover process, clear
5630                  * the corresponding FCF bit in the roundrobin bitmap.
5631                  */
5632                 spin_lock_irq(&phba->hbalock);
5633                 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5634                     (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
5635                         spin_unlock_irq(&phba->hbalock);
5636                         /* Update FLOGI FCF failover eligible FCF bmask */
5637                         lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
5638                         break;
5639                 }
5640                 spin_unlock_irq(&phba->hbalock);
5641 
5642                 /* If the event is not for currently used fcf do nothing */
5643                 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
5644                         break;
5645 
5646                 /*
5647                  * Otherwise, request the port to rediscover the entire FCF
5648                  * table for a fast recovery from case that the current FCF
5649                  * is no longer valid as we are not in the middle of FCF
5650                  * failover process already.
5651                  */
5652                 spin_lock_irq(&phba->hbalock);
5653                 /* Mark the fast failover process in progress */
5654                 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5655                 spin_unlock_irq(&phba->hbalock);
5656 
5657                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5658                                 "2771 Start FCF fast failover process due to "
5659                                 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5660                                 "\n", acqe_fip->event_tag, acqe_fip->index);
5661                 rc = lpfc_sli4_redisc_fcf_table(phba);
5662                 if (rc) {
5663                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5664                                         LOG_DISCOVERY,
5665                                         "2772 Issue FCF rediscover mailbox "
5666                                         "command failed, fail through to FCF "
5667                                         "dead event\n");
5668                         spin_lock_irq(&phba->hbalock);
5669                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5670                         spin_unlock_irq(&phba->hbalock);
5671                         /*
5672                          * Last resort will fail over by treating this
5673                          * as a link down to FCF registration.
5674                          */
5675                         lpfc_sli4_fcf_dead_failthrough(phba);
5676                 } else {
5677                         /* Reset FCF roundrobin bmask for new discovery */
5678                         lpfc_sli4_clear_fcf_rr_bmask(phba);
5679                         /*
5680                          * Handling fast FCF failover to a DEAD FCF event is
5681                          * considered equalivant to receiving CVL to all vports.
5682                          */
5683                         lpfc_sli4_perform_all_vport_cvl(phba);
5684                 }
5685                 break;
5686         case LPFC_FIP_EVENT_TYPE_CVL:
5687                 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5688                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5689                         "2718 Clear Virtual Link Received for VPI 0x%x"
5690                         " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
5691 
5692                 vport = lpfc_find_vport_by_vpid(phba,
5693                                                 acqe_fip->index);
5694                 ndlp = lpfc_sli4_perform_vport_cvl(vport);
5695                 if (!ndlp)
5696                         break;
5697                 active_vlink_present = 0;
5698 
5699                 vports = lpfc_create_vport_work_array(phba);
5700                 if (vports) {
5701                         for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5702                                         i++) {
5703                                 if ((!(vports[i]->fc_flag &
5704                                         FC_VPORT_CVL_RCVD)) &&
5705                                         (vports[i]->port_state > LPFC_FDISC)) {
5706                                         active_vlink_present = 1;
5707                                         break;
5708                                 }
5709                         }
5710                         lpfc_destroy_vport_work_array(phba, vports);
5711                 }
5712 
5713                 /*
5714                  * Don't re-instantiate if vport is marked for deletion.
5715                  * If we are here first then vport_delete is going to wait
5716                  * for discovery to complete.
5717                  */
5718                 if (!(vport->load_flag & FC_UNLOADING) &&
5719                                         active_vlink_present) {
5720                         /*
5721                          * If there are other active VLinks present,
5722                          * re-instantiate the Vlink using FDISC.
5723                          */
5724                         mod_timer(&ndlp->nlp_delayfunc,
5725                                   jiffies + msecs_to_jiffies(1000));
5726                         shost = lpfc_shost_from_vport(vport);
5727                         spin_lock_irq(shost->host_lock);
5728                         ndlp->nlp_flag |= NLP_DELAY_TMO;
5729                         spin_unlock_irq(shost->host_lock);
5730                         ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5731                         vport->port_state = LPFC_FDISC;
5732                 } else {
5733                         /*
5734                          * Otherwise, we request port to rediscover
5735                          * the entire FCF table for a fast recovery
5736                          * from possible case that the current FCF
5737                          * is no longer valid if we are not already
5738                          * in the FCF failover process.
5739                          */
5740                         spin_lock_irq(&phba->hbalock);
5741                         if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5742                                 spin_unlock_irq(&phba->hbalock);
5743                                 break;
5744                         }
5745                         /* Mark the fast failover process in progress */
5746                         phba->fcf.fcf_flag |= FCF_ACVL_DISC;
5747                         spin_unlock_irq(&phba->hbalock);
5748                         lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5749                                         LOG_DISCOVERY,
5750                                         "2773 Start FCF failover per CVL, "
5751                                         "evt_tag:x%x\n", acqe_fip->event_tag);
5752                         rc = lpfc_sli4_redisc_fcf_table(phba);
5753                         if (rc) {
5754                                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5755                                                 LOG_DISCOVERY,
5756                                                 "2774 Issue FCF rediscover "
5757                                                 "mailbox command failed, "
5758                                                 "through to CVL event\n");
5759                                 spin_lock_irq(&phba->hbalock);
5760                                 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
5761                                 spin_unlock_irq(&phba->hbalock);
5762                                 /*
5763                                  * Last resort will be re-try on the
5764                                  * the current registered FCF entry.
5765                                  */
5766                                 lpfc_retry_pport_discovery(phba);
5767                         } else
5768                                 /*
5769                                  * Reset FCF roundrobin bmask for new
5770                                  * discovery.
5771                                  */
5772                                 lpfc_sli4_clear_fcf_rr_bmask(phba);
5773                 }
5774                 break;
5775         default:
5776                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5777                         "0288 Unknown FCoE event type 0x%x event tag "
5778                         "0x%x\n", event_type, acqe_fip->event_tag);
5779                 break;
5780         }
5781 }
5782 
5783 /**
5784  * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
5785  * @phba: pointer to lpfc hba data structure.
5786  * @acqe_link: pointer to the async dcbx completion queue entry.
5787  *
5788  * This routine is to handle the SLI4 asynchronous dcbx event.
5789  **/
5790 static void
5791 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
5792                          struct lpfc_acqe_dcbx *acqe_dcbx)
5793 {
5794         phba->fc_eventTag = acqe_dcbx->event_tag;
5795         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5796                         "0290 The SLI4 DCBX asynchronous event is not "
5797                         "handled yet\n");
5798 }
5799 
5800 /**
5801  * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
5802  * @phba: pointer to lpfc hba data structure.
5803  * @acqe_link: pointer to the async grp5 completion queue entry.
5804  *
5805  * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
5806  * is an asynchronous notified of a logical link speed change.  The Port
5807  * reports the logical link speed in units of 10Mbps.
5808  **/
5809 static void
5810 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
5811                          struct lpfc_acqe_grp5 *acqe_grp5)
5812 {
5813         uint16_t prev_ll_spd;
5814 
5815         phba->fc_eventTag = acqe_grp5->event_tag;
5816         phba->fcoe_eventtag = acqe_grp5->event_tag;
5817         prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
5818         phba->sli4_hba.link_state.logical_speed =
5819                 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
5820         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5821                         "2789 GRP5 Async Event: Updating logical link speed "
5822                         "from %dMbps to %dMbps\n", prev_ll_spd,
5823                         phba->sli4_hba.link_state.logical_speed);
5824 }
5825 
5826 /**
5827  * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
5828  * @phba: pointer to lpfc hba data structure.
5829  *
5830  * This routine is invoked by the worker thread to process all the pending
5831  * SLI4 asynchronous events.
5832  **/
5833 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
5834 {
5835         struct lpfc_cq_event *cq_event;
5836 
5837         /* First, declare the async event has been handled */
5838         spin_lock_irq(&phba->hbalock);
5839         phba->hba_flag &= ~ASYNC_EVENT;
5840         spin_unlock_irq(&phba->hbalock);
5841         /* Now, handle all the async events */
5842         while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
5843                 /* Get the first event from the head of the event queue */
5844                 spin_lock_irq(&phba->hbalock);
5845                 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
5846                                  cq_event, struct lpfc_cq_event, list);
5847                 spin_unlock_irq(&phba->hbalock);
5848                 /* Process the asynchronous event */
5849                 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
5850                 case LPFC_TRAILER_CODE_LINK:
5851                         lpfc_sli4_async_link_evt(phba,
5852                                                  &cq_event->cqe.acqe_link);
5853                         break;
5854                 case LPFC_TRAILER_CODE_FCOE:
5855                         lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
5856                         break;
5857                 case LPFC_TRAILER_CODE_DCBX:
5858                         lpfc_sli4_async_dcbx_evt(phba,
5859                                                  &cq_event->cqe.acqe_dcbx);
5860                         break;
5861                 case LPFC_TRAILER_CODE_GRP5:
5862                         lpfc_sli4_async_grp5_evt(phba,
5863                                                  &cq_event->cqe.acqe_grp5);
5864                         break;
5865                 case LPFC_TRAILER_CODE_FC:
5866                         lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
5867                         break;
5868                 case LPFC_TRAILER_CODE_SLI:
5869                         lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
5870                         break;
5871                 default:
5872                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5873                                         "1804 Invalid asynchrous event code: "
5874                                         "x%x\n", bf_get(lpfc_trailer_code,
5875                                         &cq_event->cqe.mcqe_cmpl));
5876                         break;
5877                 }
5878                 /* Free the completion event processed to the free pool */
5879                 lpfc_sli4_cq_event_release(phba, cq_event);
5880         }
5881 }
5882 
5883 /**
5884  * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
5885  * @phba: pointer to lpfc hba data structure.
5886  *
5887  * This routine is invoked by the worker thread to process FCF table
5888  * rediscovery pending completion event.
5889  **/
5890 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
5891 {
5892         int rc;
5893 
5894         spin_lock_irq(&phba->hbalock);
5895         /* Clear FCF rediscovery timeout event */
5896         phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
5897         /* Clear driver fast failover FCF record flag */
5898         phba->fcf.failover_rec.flag = 0;
5899         /* Set state for FCF fast failover */
5900         phba->fcf.fcf_flag |= FCF_REDISC_FOV;
5901         spin_unlock_irq(&phba->hbalock);
5902 
5903         /* Scan FCF table from the first entry to re-discover SAN */
5904         lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5905                         "2777 Start post-quiescent FCF table scan\n");
5906         rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
5907         if (rc)
5908                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5909                                 "2747 Issue FCF scan read FCF mailbox "
5910                                 "command failed 0x%x\n", rc);
5911 }
5912 
5913 /**
5914  * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
5915  * @phba: pointer to lpfc hba data structure.
5916  * @dev_grp: The HBA PCI-Device group number.
5917  *
5918  * This routine is invoked to set up the per HBA PCI-Device group function
5919  * API jump table entries.
5920  *
5921  * Return: 0 if success, otherwise -ENODEV
5922  **/
5923 int
5924 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5925 {
5926         int rc;
5927 
5928         /* Set up lpfc PCI-device group */
5929         phba->pci_dev_grp = dev_grp;
5930 
5931         /* The LPFC_PCI_DEV_OC uses SLI4 */
5932         if (dev_grp == LPFC_PCI_DEV_OC)
5933                 phba->sli_rev = LPFC_SLI_REV4;
5934 
5935         /* Set up device INIT API function jump table */
5936         rc = lpfc_init_api_table_setup(phba, dev_grp);
5937         if (rc)
5938                 return -ENODEV;
5939         /* Set up SCSI API function jump table */
5940         rc = lpfc_scsi_api_table_setup(phba, dev_grp);
5941         if (rc)
5942                 return -ENODEV;
5943         /* Set up SLI API function jump table */
5944         rc = lpfc_sli_api_table_setup(phba, dev_grp);
5945         if (rc)
5946                 return -ENODEV;
5947         /* Set up MBOX API function jump table */
5948         rc = lpfc_mbox_api_table_setup(phba, dev_grp);
5949         if (rc)
5950                 return -ENODEV;
5951 
5952         return 0;
5953 }
5954 
5955 /**
5956  * lpfc_log_intr_mode - Log the active interrupt mode
5957  * @phba: pointer to lpfc hba data structure.
5958  * @intr_mode: active interrupt mode adopted.
5959  *
5960  * This routine it invoked to log the currently used active interrupt mode
5961  * to the device.
5962  **/
5963 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
5964 {
5965         switch (intr_mode) {
5966         case 0:
5967                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5968                                 "0470 Enable INTx interrupt mode.\n");
5969                 break;
5970         case 1:
5971                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5972                                 "0481 Enabled MSI interrupt mode.\n");
5973                 break;
5974         case 2:
5975                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5976                                 "0480 Enabled MSI-X interrupt mode.\n");
5977                 break;
5978         default:
5979                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5980                                 "0482 Illegal interrupt mode.\n");
5981                 break;
5982         }
5983         return;
5984 }
5985 
5986 /**
5987  * lpfc_enable_pci_dev - Enable a generic PCI device.
5988  * @phba: pointer to lpfc hba data structure.
5989  *
5990  * This routine is invoked to enable the PCI device that is common to all
5991  * PCI devices.
5992  *
5993  * Return codes
5994  *      0 - successful
5995  *      other values - error
5996  **/
5997 static int
5998 lpfc_enable_pci_dev(struct lpfc_hba *phba)
5999 {
6000         struct pci_dev *pdev;
6001 
6002         /* Obtain PCI device reference */
6003         if (!phba->pcidev)
6004                 goto out_error;
6005         else
6006                 pdev = phba->pcidev;
6007         /* Enable PCI device */
6008         if (pci_enable_device_mem(pdev))
6009                 goto out_error;
6010         /* Request PCI resource for the device */
6011         if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
6012                 goto out_disable_device;
6013         /* Set up device as PCI master and save state for EEH */
6014         pci_set_master(pdev);
6015         pci_try_set_mwi(pdev);
6016         pci_save_state(pdev);
6017 
6018         /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6019         if (pci_is_pcie(pdev))
6020                 pdev->needs_freset = 1;
6021 
6022         return 0;
6023 
6024 out_disable_device:
6025         pci_disable_device(pdev);
6026 out_error:
6027         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6028                         "1401 Failed to enable pci device\n");
6029         return -ENODEV;
6030 }
6031 
6032 /**
6033  * lpfc_disable_pci_dev - Disable a generic PCI device.
6034  * @phba: pointer to lpfc hba data structure.
6035  *
6036  * This routine is invoked to disable the PCI device that is common to all
6037  * PCI devices.
6038  **/
6039 static void
6040 lpfc_disable_pci_dev(struct lpfc_hba *phba)
6041 {
6042         struct pci_dev *pdev;
6043 
6044         /* Obtain PCI device reference */
6045         if (!phba->pcidev)
6046                 return;
6047         else
6048                 pdev = phba->pcidev;
6049         /* Release PCI resource and disable PCI device */
6050         pci_release_mem_regions(pdev);
6051         pci_disable_device(pdev);
6052 
6053         return;
6054 }
6055 
6056 /**
6057  * lpfc_reset_hba - Reset a hba
6058  * @phba: pointer to lpfc hba data structure.
6059  *
6060  * This routine is invoked to reset a hba device. It brings the HBA
6061  * offline, performs a board restart, and then brings the board back
6062  * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
6063  * on outstanding mailbox commands.
6064  **/
6065 void
6066 lpfc_reset_hba(struct lpfc_hba *phba)
6067 {
6068         /* If resets are disabled then set error state and return. */
6069         if (!phba->cfg_enable_hba_reset) {
6070                 phba->link_state = LPFC_HBA_ERROR;
6071                 return;
6072         }
6073         if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
6074                 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6075         else
6076                 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
6077         lpfc_offline(phba);
6078         lpfc_sli_brdrestart(phba);
6079         lpfc_online(phba);
6080         lpfc_unblock_mgmt_io(phba);
6081 }
6082 
6083 /**
6084  * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
6085  * @phba: pointer to lpfc hba data structure.
6086  *
6087  * This function enables the PCI SR-IOV virtual functions to a physical
6088  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6089  * enable the number of virtual functions to the physical function. As
6090  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6091  * API call does not considered as an error condition for most of the device.
6092  **/
6093 uint16_t
6094 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6095 {
6096         struct pci_dev *pdev = phba->pcidev;
6097         uint16_t nr_virtfn;
6098         int pos;
6099 
6100         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6101         if (pos == 0)
6102                 return 0;
6103 
6104         pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6105         return nr_virtfn;
6106 }
6107 
6108 /**
6109  * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
6110  * @phba: pointer to lpfc hba data structure.
6111  * @nr_vfn: number of virtual functions to be enabled.
6112  *
6113  * This function enables the PCI SR-IOV virtual functions to a physical
6114  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6115  * enable the number of virtual functions to the physical function. As
6116  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6117  * API call does not considered as an error condition for most of the device.
6118  **/
6119 int
6120 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6121 {
6122         struct pci_dev *pdev = phba->pcidev;
6123         uint16_t max_nr_vfn;
6124         int rc;
6125 
6126         max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6127         if (nr_vfn > max_nr_vfn) {
6128                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6129                                 "3057 Requested vfs (%d) greater than "
6130                                 "supported vfs (%d)", nr_vfn, max_nr_vfn);
6131                 return -EINVAL;
6132         }
6133 
6134         rc = pci_enable_sriov(pdev, nr_vfn);
6135         if (rc) {
6136                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6137                                 "2806 Failed to enable sriov on this device "
6138                                 "with vfn number nr_vf:%d, rc:%d\n",
6139                                 nr_vfn, rc);
6140         } else
6141                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6142                                 "2807 Successful enable sriov on this device "
6143                                 "with vfn number nr_vf:%d\n", nr_vfn);
6144         return rc;
6145 }
6146 
6147 /**
6148  * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
6149  * @phba: pointer to lpfc hba data structure.
6150  *
6151  * This routine is invoked to set up the driver internal resources before the
6152  * device specific resource setup to support the HBA device it attached to.
6153  *
6154  * Return codes
6155  *      0 - successful
6156  *      other values - error
6157  **/
6158 static int
6159 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
6160 {
6161         struct lpfc_sli *psli = &phba->sli;
6162 
6163         /*
6164          * Driver resources common to all SLI revisions
6165          */
6166         atomic_set(&phba->fast_event_count, 0);
6167         spin_lock_init(&phba->hbalock);
6168 
6169         /* Initialize ndlp management spinlock */
6170         spin_lock_init(&phba->ndlp_lock);
6171 
6172         /* Initialize port_list spinlock */
6173         spin_lock_init(&phba->port_list_lock);
6174         INIT_LIST_HEAD(&phba->port_list);
6175 
6176         INIT_LIST_HEAD(&phba->work_list);
6177         init_waitqueue_head(&phba->wait_4_mlo_m_q);
6178 
6179         /* Initialize the wait queue head for the kernel thread */
6180         init_waitqueue_head(&phba->work_waitq);
6181 
6182         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6183                         "1403 Protocols supported %s %s %s\n",
6184                         ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
6185                                 "SCSI" : " "),
6186                         ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
6187                                 "NVME" : " "),
6188                         (phba->nvmet_support ? "NVMET" : " "));
6189 
6190         /* Initialize the IO buffer list used by driver for SLI3 SCSI */
6191         spin_lock_init(&phba->scsi_buf_list_get_lock);
6192         INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6193         spin_lock_init(&phba->scsi_buf_list_put_lock);
6194         INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
6195 
6196         /* Initialize the fabric iocb list */
6197         INIT_LIST_HEAD(&phba->fabric_iocb_list);
6198 
6199         /* Initialize list to save ELS buffers */
6200         INIT_LIST_HEAD(&phba->elsbuf);
6201 
6202         /* Initialize FCF connection rec list */
6203         INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
6204 
6205         /* Initialize OAS configuration list */
6206         spin_lock_init(&phba->devicelock);
6207         INIT_LIST_HEAD(&phba->luns);
6208 
6209         /* MBOX heartbeat timer */
6210         timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
6211         /* Fabric block timer */
6212         timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
6213         /* EA polling mode timer */
6214         timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
6215         /* Heartbeat timer */
6216         timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
6217 
6218         INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6219 
6220         return 0;
6221 }
6222 
6223 /**
6224  * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
6225  * @phba: pointer to lpfc hba data structure.
6226  *
6227  * This routine is invoked to set up the driver internal resources specific to
6228  * support the SLI-3 HBA device it attached to.
6229  *
6230  * Return codes
6231  * 0 - successful
6232  * other values - error
6233  **/
6234 static int
6235 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6236 {
6237         int rc, entry_sz;
6238 
6239         /*
6240          * Initialize timers used by driver
6241          */
6242 
6243         /* FCP polling mode timer */
6244         timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
6245 
6246         /* Host attention work mask setup */
6247         phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
6248         phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
6249 
6250         /* Get all the module params for configuring this host */
6251         lpfc_get_cfgparam(phba);
6252         /* Set up phase-1 common device driver resources */
6253 
6254         rc = lpfc_setup_driver_resource_phase1(phba);
6255         if (rc)
6256                 return -ENODEV;
6257 
6258         if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
6259                 phba->menlo_flag |= HBA_MENLO_SUPPORT;
6260                 /* check for menlo minimum sg count */
6261                 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
6262                         phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
6263         }
6264 
6265         if (!phba->sli.sli3_ring)
6266                 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
6267                                               sizeof(struct lpfc_sli_ring),
6268                                               GFP_KERNEL);
6269         if (!phba->sli.sli3_ring)
6270                 return -ENOMEM;
6271 
6272         /*
6273          * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
6274          * used to create the sg_dma_buf_pool must be dynamically calculated.
6275          */
6276 
6277         /* Initialize the host templates the configured values. */
6278         lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
6279         lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
6280         lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
6281 
6282         if (phba->sli_rev == LPFC_SLI_REV4)
6283                 entry_sz = sizeof(struct sli4_sge);
6284         else
6285                 entry_sz = sizeof(struct ulp_bde64);
6286 
6287         /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
6288         if (phba->cfg_enable_bg) {
6289                 /*
6290                  * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
6291                  * the FCP rsp, and a BDE for each. Sice we have no control
6292                  * over how many protection data segments the SCSI Layer
6293                  * will hand us (ie: there could be one for every block
6294                  * in the IO), we just allocate enough BDEs to accomidate
6295                  * our max amount and we need to limit lpfc_sg_seg_cnt to
6296                  * minimize the risk of running out.
6297                  */
6298                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6299                         sizeof(struct fcp_rsp) +
6300                         (LPFC_MAX_SG_SEG_CNT * entry_sz);
6301 
6302                 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
6303                         phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
6304 
6305                 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
6306                 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
6307         } else {
6308                 /*
6309                  * The scsi_buf for a regular I/O will hold the FCP cmnd,
6310                  * the FCP rsp, a BDE for each, and a BDE for up to
6311                  * cfg_sg_seg_cnt data segments.
6312                  */
6313                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6314                         sizeof(struct fcp_rsp) +
6315                         ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
6316 
6317                 /* Total BDEs in BPL for scsi_sg_list */
6318                 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
6319         }
6320 
6321         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6322                         "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
6323                         phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6324                         phba->cfg_total_seg_cnt);
6325 
6326         phba->max_vpi = LPFC_MAX_VPI;
6327         /* This will be set to correct value after config_port mbox */
6328         phba->max_vports = 0;
6329 
6330         /*
6331          * Initialize the SLI Layer to run with lpfc HBAs.
6332          */
6333         lpfc_sli_setup(phba);
6334         lpfc_sli_queue_init(phba);
6335 
6336         /* Allocate device driver memory */
6337         if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6338                 return -ENOMEM;
6339 
6340         phba->lpfc_sg_dma_buf_pool =
6341                 dma_pool_create("lpfc_sg_dma_buf_pool",
6342                                 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
6343                                 BPL_ALIGN_SZ, 0);
6344 
6345         if (!phba->lpfc_sg_dma_buf_pool)
6346                 goto fail_free_mem;
6347 
6348         phba->lpfc_cmd_rsp_buf_pool =
6349                         dma_pool_create("lpfc_cmd_rsp_buf_pool",
6350                                         &phba->pcidev->dev,
6351                                         sizeof(struct fcp_cmnd) +
6352                                         sizeof(struct fcp_rsp),
6353                                         BPL_ALIGN_SZ, 0);
6354 
6355         if (!phba->lpfc_cmd_rsp_buf_pool)
6356                 goto fail_free_dma_buf_pool;
6357 
6358         /*
6359          * Enable sr-iov virtual functions if supported and configured
6360          * through the module parameter.
6361          */
6362         if (phba->cfg_sriov_nr_virtfn > 0) {
6363                 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6364                                                  phba->cfg_sriov_nr_virtfn);
6365                 if (rc) {
6366                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6367                                         "2808 Requested number of SR-IOV "
6368                                         "virtual functions (%d) is not "
6369                                         "supported\n",
6370                                         phba->cfg_sriov_nr_virtfn);
6371                         phba->cfg_sriov_nr_virtfn = 0;
6372                 }
6373         }
6374 
6375         return 0;
6376 
6377 fail_free_dma_buf_pool:
6378         dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6379         phba->lpfc_sg_dma_buf_pool = NULL;
6380 fail_free_mem:
6381         lpfc_mem_free(phba);
6382         return -ENOMEM;
6383 }
6384 
6385 /**
6386  * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
6387  * @phba: pointer to lpfc hba data structure.
6388  *
6389  * This routine is invoked to unset the driver internal resources set up
6390  * specific for supporting the SLI-3 HBA device it attached to.
6391  **/
6392 static void
6393 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
6394 {
6395         /* Free device driver memory allocated */
6396         lpfc_mem_free_all(phba);
6397 
6398         return;
6399 }
6400 
6401 /**
6402  * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
6403  * @phba: pointer to lpfc hba data structure.
6404  *
6405  * This routine is invoked to set up the driver internal resources specific to
6406  * support the SLI-4 HBA device it attached to.
6407  *
6408  * Return codes
6409  *      0 - successful
6410  *      other values - error
6411  **/
6412 static int
6413 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6414 {
6415         LPFC_MBOXQ_t *mboxq;
6416         MAILBOX_t *mb;
6417         int rc, i, max_buf_size;
6418         uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
6419         struct lpfc_mqe *mqe;
6420         int longs;
6421         int extra;
6422         uint64_t wwn;
6423         u32 if_type;
6424         u32 if_fam;
6425 
6426         phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6427         phba->sli4_hba.num_possible_cpu = num_possible_cpus();
6428         phba->sli4_hba.curr_disp_cpu = 0;
6429 
6430         /* Get all the module params for configuring this host */
6431         lpfc_get_cfgparam(phba);
6432 
6433         /* Set up phase-1 common device driver resources */
6434         rc = lpfc_setup_driver_resource_phase1(phba);
6435         if (rc)
6436                 return -ENODEV;
6437 
6438         /* Before proceed, wait for POST done and device ready */
6439         rc = lpfc_sli4_post_status_check(phba);
6440         if (rc)
6441                 return -ENODEV;
6442 
6443         /* Allocate all driver workqueues here */
6444 
6445         /* The lpfc_wq workqueue for deferred irq use */
6446         phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6447 
6448         /*
6449          * Initialize timers used by driver
6450          */
6451 
6452         timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
6453 
6454         /* FCF rediscover timer */
6455         timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
6456 
6457         /*
6458          * Control structure for handling external multi-buffer mailbox
6459          * command pass-through.
6460          */
6461         memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
6462                 sizeof(struct lpfc_mbox_ext_buf_ctx));
6463         INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
6464 
6465         phba->max_vpi = LPFC_MAX_VPI;
6466 
6467         /* This will be set to correct value after the read_config mbox */
6468         phba->max_vports = 0;
6469 
6470         /* Program the default value of vlan_id and fc_map */
6471         phba->valid_vlan = 0;
6472         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
6473         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
6474         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
6475 
6476         /*
6477          * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
6478          * we will associate a new ring, for each EQ/CQ/WQ tuple.
6479          * The WQ create will allocate the ring.
6480          */
6481 
6482         /* Initialize buffer queue management fields */
6483         INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
6484         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6485         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
6486 
6487         /*
6488          * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
6489          */
6490         /* Initialize the Abort buffer list used by driver */
6491         spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
6492         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
6493 
6494         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6495                 /* Initialize the Abort nvme buffer list used by driver */
6496                 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
6497                 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6498                 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
6499                 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6500                 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
6501         }
6502 
6503         /* This abort list used by worker thread */
6504         spin_lock_init(&phba->sli4_hba.sgl_list_lock);
6505         spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
6506 
6507         /*
6508          * Initialize driver internal slow-path work queues
6509          */
6510 
6511         /* Driver internel slow-path CQ Event pool */
6512         INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6513         /* Response IOCB work queue list */
6514         INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
6515         /* Asynchronous event CQ Event work queue list */
6516         INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
6517         /* Fast-path XRI aborted CQ Event work queue list */
6518         INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
6519         /* Slow-path XRI aborted CQ Event work queue list */
6520         INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6521         /* Receive queue CQ Event work queue list */
6522         INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6523 
6524         /* Initialize extent block lists. */
6525         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6526         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6527         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6528         INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6529 
6530         /* Initialize mboxq lists. If the early init routines fail
6531          * these lists need to be correctly initialized.
6532          */
6533         INIT_LIST_HEAD(&phba->sli.mboxq);
6534         INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6535 
6536         /* initialize optic_state to 0xFF */
6537         phba->sli4_hba.lnk_info.optic_state = 0xff;
6538 
6539         /* Allocate device driver memory */
6540         rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6541         if (rc)
6542                 return -ENOMEM;
6543 
6544         /* IF Type 2 ports get initialized now. */
6545         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
6546             LPFC_SLI_INTF_IF_TYPE_2) {
6547                 rc = lpfc_pci_function_reset(phba);
6548                 if (unlikely(rc)) {
6549                         rc = -ENODEV;
6550                         goto out_free_mem;
6551                 }
6552                 phba->temp_sensor_support = 1;
6553         }
6554 
6555         /* Create the bootstrap mailbox command */
6556         rc = lpfc_create_bootstrap_mbox(phba);
6557         if (unlikely(rc))
6558                 goto out_free_mem;
6559 
6560         /* Set up the host's endian order with the device. */
6561         rc = lpfc_setup_endian_order(phba);
6562         if (unlikely(rc))
6563                 goto out_free_bsmbx;
6564 
6565         /* Set up the hba's configuration parameters. */
6566         rc = lpfc_sli4_read_config(phba);
6567         if (unlikely(rc))
6568                 goto out_free_bsmbx;
6569         rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
6570         if (unlikely(rc))
6571                 goto out_free_bsmbx;
6572 
6573         /* IF Type 0 ports get initialized now. */
6574         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6575             LPFC_SLI_INTF_IF_TYPE_0) {
6576                 rc = lpfc_pci_function_reset(phba);
6577                 if (unlikely(rc))
6578                         goto out_free_bsmbx;
6579         }
6580 
6581         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6582                                                        GFP_KERNEL);
6583         if (!mboxq) {
6584                 rc = -ENOMEM;
6585                 goto out_free_bsmbx;
6586         }
6587 
6588         /* Check for NVMET being configured */
6589         phba->nvmet_support = 0;
6590         if (lpfc_enable_nvmet_cnt) {
6591 
6592                 /* First get WWN of HBA instance */
6593                 lpfc_read_nv(phba, mboxq);
6594                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6595                 if (rc != MBX_SUCCESS) {
6596                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6597                                         "6016 Mailbox failed , mbxCmd x%x "
6598                                         "READ_NV, mbxStatus x%x\n",
6599                                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6600                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6601                         mempool_free(mboxq, phba->mbox_mem_pool);
6602                         rc = -EIO;
6603                         goto out_free_bsmbx;
6604                 }
6605                 mb = &mboxq->u.mb;
6606                 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6607                        sizeof(uint64_t));
6608                 wwn = cpu_to_be64(wwn);
6609                 phba->sli4_hba.wwnn.u.name = wwn;
6610                 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6611                        sizeof(uint64_t));
6612                 /* wwn is WWPN of HBA instance */
6613                 wwn = cpu_to_be64(wwn);
6614                 phba->sli4_hba.wwpn.u.name = wwn;
6615 
6616                 /* Check to see if it matches any module parameter */
6617                 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6618                         if (wwn == lpfc_enable_nvmet[i]) {
6619 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
6620                                 if (lpfc_nvmet_mem_alloc(phba))
6621                                         break;
6622 
6623                                 phba->nvmet_support = 1; /* a match */
6624 
6625                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6626                                                 "6017 NVME Target %016llx\n",
6627                                                 wwn);
6628 #else
6629                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6630                                                 "6021 Can't enable NVME Target."
6631                                                 " NVME_TARGET_FC infrastructure"
6632                                                 " is not in kernel\n");
6633 #endif
6634                                 /* Not supported for NVMET */
6635                                 phba->cfg_xri_rebalancing = 0;
6636                                 break;
6637                         }
6638                 }
6639         }
6640 
6641         lpfc_nvme_mod_param_dep(phba);
6642 
6643         /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
6644         lpfc_supported_pages(mboxq);
6645         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6646         if (!rc) {
6647                 mqe = &mboxq->u.mqe;
6648                 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
6649                        LPFC_MAX_SUPPORTED_PAGES);
6650                 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
6651                         switch (pn_page[i]) {
6652                         case LPFC_SLI4_PARAMETERS:
6653                                 phba->sli4_hba.pc_sli4_params.supported = 1;
6654                                 break;
6655                         default:
6656                                 break;
6657                         }
6658                 }
6659                 /* Read the port's SLI4 Parameters capabilities if supported. */
6660                 if (phba->sli4_hba.pc_sli4_params.supported)
6661                         rc = lpfc_pc_sli4_params_get(phba, mboxq);
6662                 if (rc) {
6663                         mempool_free(mboxq, phba->mbox_mem_pool);
6664                         rc = -EIO;
6665                         goto out_free_bsmbx;
6666                 }
6667         }
6668 
6669         /*
6670          * Get sli4 parameters that override parameters from Port capabilities.
6671          * If this call fails, it isn't critical unless the SLI4 parameters come
6672          * back in conflict.
6673          */
6674         rc = lpfc_get_sli4_parameters(phba, mboxq);
6675         if (rc) {
6676                 if_type = bf_get(lpfc_sli_intf_if_type,
6677                                  &phba->sli4_hba.sli_intf);
6678                 if_fam = bf_get(lpfc_sli_intf_sli_family,
6679                                 &phba->sli4_hba.sli_intf);
6680                 if (phba->sli4_hba.extents_in_use &&
6681                     phba->sli4_hba.rpi_hdrs_in_use) {
6682                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6683                                 "2999 Unsupported SLI4 Parameters "
6684                                 "Extents and RPI headers enabled.\n");
6685                         if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6686                             if_fam ==  LPFC_SLI_INTF_FAMILY_BE2) {
6687                                 mempool_free(mboxq, phba->mbox_mem_pool);
6688                                 rc = -EIO;
6689                                 goto out_free_bsmbx;
6690                         }
6691                 }
6692                 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6693                       if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6694                         mempool_free(mboxq, phba->mbox_mem_pool);
6695                         rc = -EIO;
6696                         goto out_free_bsmbx;
6697                 }
6698         }
6699 
6700         /*
6701          * 1 for cmd, 1 for rsp, NVME adds an extra one
6702          * for boundary conditions in its max_sgl_segment template.
6703          */
6704         extra = 2;
6705         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6706                 extra++;
6707 
6708         /*
6709          * It doesn't matter what family our adapter is in, we are
6710          * limited to 2 Pages, 512 SGEs, for our SGL.
6711          * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
6712          */
6713         max_buf_size = (2 * SLI4_PAGE_SIZE);
6714 
6715         /*
6716          * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
6717          * used to create the sg_dma_buf_pool must be calculated.
6718          */
6719         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6720                 /* Both cfg_enable_bg and cfg_external_dif code paths */
6721 
6722                 /*
6723                  * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
6724                  * the FCP rsp, and a SGE. Sice we have no control
6725                  * over how many protection segments the SCSI Layer
6726                  * will hand us (ie: there could be one for every block
6727                  * in the IO), just allocate enough SGEs to accomidate
6728                  * our max amount and we need to limit lpfc_sg_seg_cnt
6729                  * to minimize the risk of running out.
6730                  */
6731                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6732                                 sizeof(struct fcp_rsp) + max_buf_size;
6733 
6734                 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
6735                 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6736 
6737                 /*
6738                  * If supporting DIF, reduce the seg count for scsi to
6739                  * allow room for the DIF sges.
6740                  */
6741                 if (phba->cfg_enable_bg &&
6742                     phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6743                         phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6744                 else
6745                         phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6746 
6747         } else {
6748                 /*
6749                  * The scsi_buf for a regular I/O holds the FCP cmnd,
6750                  * the FCP rsp, a SGE for each, and a SGE for up to
6751                  * cfg_sg_seg_cnt data segments.
6752                  */
6753                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6754                                 sizeof(struct fcp_rsp) +
6755                                 ((phba->cfg_sg_seg_cnt + extra) *
6756                                 sizeof(struct sli4_sge));
6757 
6758                 /* Total SGEs for scsi_sg_list */
6759                 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
6760                 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6761 
6762                 /*
6763                  * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
6764                  * need to post 1 page for the SGL.
6765                  */
6766         }
6767 
6768         if (phba->cfg_xpsgl && !phba->nvmet_support)
6769                 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
6770         else if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
6771                 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
6772         else
6773                 phba->cfg_sg_dma_buf_size =
6774                                 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
6775 
6776         phba->border_sge_num = phba->cfg_sg_dma_buf_size /
6777                                sizeof(struct sli4_sge);
6778 
6779         /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
6780         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6781                 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
6782                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
6783                                         "6300 Reducing NVME sg segment "
6784                                         "cnt to %d\n",
6785                                         LPFC_MAX_NVME_SEG_CNT);
6786                         phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
6787                 } else
6788                         phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
6789         }
6790 
6791         /* Initialize the host templates with the updated values. */
6792         lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
6793         lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
6794         lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
6795 
6796         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6797                         "9087 sg_seg_cnt:%d dmabuf_size:%d "
6798                         "total:%d scsi:%d nvme:%d\n",
6799                         phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6800                         phba->cfg_total_seg_cnt,  phba->cfg_scsi_seg_cnt,
6801                         phba->cfg_nvme_seg_cnt);
6802 
6803         if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
6804                 i = phba->cfg_sg_dma_buf_size;
6805         else
6806                 i = SLI4_PAGE_SIZE;
6807 
6808         phba->lpfc_sg_dma_buf_pool =
6809                         dma_pool_create("lpfc_sg_dma_buf_pool",
6810                                         &phba->pcidev->dev,
6811                                         phba->cfg_sg_dma_buf_size,
6812                                         i, 0);
6813         if (!phba->lpfc_sg_dma_buf_pool)
6814                 goto out_free_bsmbx;
6815 
6816         phba->lpfc_cmd_rsp_buf_pool =
6817                         dma_pool_create("lpfc_cmd_rsp_buf_pool",
6818                                         &phba->pcidev->dev,
6819                                         sizeof(struct fcp_cmnd) +
6820                                         sizeof(struct fcp_rsp),
6821                                         i, 0);
6822         if (!phba->lpfc_cmd_rsp_buf_pool)
6823                 goto out_free_sg_dma_buf;
6824 
6825         mempool_free(mboxq, phba->mbox_mem_pool);
6826 
6827         /* Verify OAS is supported */
6828         lpfc_sli4_oas_verify(phba);
6829 
6830         /* Verify RAS support on adapter */
6831         lpfc_sli4_ras_init(phba);
6832 
6833         /* Verify all the SLI4 queues */
6834         rc = lpfc_sli4_queue_verify(phba);
6835         if (rc)
6836                 goto out_free_cmd_rsp_buf;
6837 
6838         /* Create driver internal CQE event pool */
6839         rc = lpfc_sli4_cq_event_pool_create(phba);
6840         if (rc)
6841                 goto out_free_cmd_rsp_buf;
6842 
6843         /* Initialize sgl lists per host */
6844         lpfc_init_sgl_list(phba);
6845 
6846         /* Allocate and initialize active sgl array */
6847         rc = lpfc_init_active_sgl_array(phba);
6848         if (rc) {
6849                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6850                                 "1430 Failed to initialize sgl list.\n");
6851                 goto out_destroy_cq_event_pool;
6852         }
6853         rc = lpfc_sli4_init_rpi_hdrs(phba);
6854         if (rc) {
6855                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6856                                 "1432 Failed to initialize rpi headers.\n");
6857                 goto out_free_active_sgl;
6858         }
6859 
6860         /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
6861         longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
6862         phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
6863                                          GFP_KERNEL);
6864         if (!phba->fcf.fcf_rr_bmask) {
6865                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6866                                 "2759 Failed allocate memory for FCF round "
6867                                 "robin failover bmask\n");
6868                 rc = -ENOMEM;
6869                 goto out_remove_rpi_hdrs;
6870         }
6871 
6872         phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
6873                                             sizeof(struct lpfc_hba_eq_hdl),
6874                                             GFP_KERNEL);
6875         if (!phba->sli4_hba.hba_eq_hdl) {
6876                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6877                                 "2572 Failed allocate memory for "
6878                                 "fast-path per-EQ handle array\n");
6879                 rc = -ENOMEM;
6880                 goto out_free_fcf_rr_bmask;
6881         }
6882 
6883         phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
6884                                         sizeof(struct lpfc_vector_map_info),
6885                                         GFP_KERNEL);
6886         if (!phba->sli4_hba.cpu_map) {
6887                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6888                                 "3327 Failed allocate memory for msi-x "
6889                                 "interrupt vector mapping\n");
6890                 rc = -ENOMEM;
6891                 goto out_free_hba_eq_hdl;
6892         }
6893 
6894         phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
6895         if (!phba->sli4_hba.eq_info) {
6896                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6897                                 "3321 Failed allocation for per_cpu stats\n");
6898                 rc = -ENOMEM;
6899                 goto out_free_hba_cpu_map;
6900         }
6901         /*
6902          * Enable sr-iov virtual functions if supported and configured
6903          * through the module parameter.
6904          */
6905         if (phba->cfg_sriov_nr_virtfn > 0) {
6906                 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6907                                                  phba->cfg_sriov_nr_virtfn);
6908                 if (rc) {
6909                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6910                                         "3020 Requested number of SR-IOV "
6911                                         "virtual functions (%d) is not "
6912                                         "supported\n",
6913                                         phba->cfg_sriov_nr_virtfn);
6914                         phba->cfg_sriov_nr_virtfn = 0;
6915                 }
6916         }
6917 
6918         return 0;
6919 
6920 out_free_hba_cpu_map:
6921         kfree(phba->sli4_hba.cpu_map);
6922 out_free_hba_eq_hdl:
6923         kfree(phba->sli4_hba.hba_eq_hdl);
6924 out_free_fcf_rr_bmask:
6925         kfree(phba->fcf.fcf_rr_bmask);
6926 out_remove_rpi_hdrs:
6927         lpfc_sli4_remove_rpi_hdrs(phba);
6928 out_free_active_sgl:
6929         lpfc_free_active_sgl(phba);
6930 out_destroy_cq_event_pool:
6931         lpfc_sli4_cq_event_pool_destroy(phba);
6932 out_free_cmd_rsp_buf:
6933         dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
6934         phba->lpfc_cmd_rsp_buf_pool = NULL;
6935 out_free_sg_dma_buf:
6936         dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6937         phba->lpfc_sg_dma_buf_pool = NULL;
6938 out_free_bsmbx:
6939         lpfc_destroy_bootstrap_mbox(phba);
6940 out_free_mem:
6941         lpfc_mem_free(phba);
6942         return rc;
6943 }
6944 
6945 /**
6946  * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
6947  * @phba: pointer to lpfc hba data structure.
6948  *
6949  * This routine is invoked to unset the driver internal resources set up
6950  * specific for supporting the SLI-4 HBA device it attached to.
6951  **/
6952 static void
6953 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
6954 {
6955         struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6956 
6957         free_percpu(phba->sli4_hba.eq_info);
6958 
6959         /* Free memory allocated for msi-x interrupt vector to CPU mapping */
6960         kfree(phba->sli4_hba.cpu_map);
6961         phba->sli4_hba.num_possible_cpu = 0;
6962         phba->sli4_hba.num_present_cpu = 0;
6963         phba->sli4_hba.curr_disp_cpu = 0;
6964 
6965         /* Free memory allocated for fast-path work queue handles */
6966         kfree(phba->sli4_hba.hba_eq_hdl);
6967 
6968         /* Free the allocated rpi headers. */
6969         lpfc_sli4_remove_rpi_hdrs(phba);
6970         lpfc_sli4_remove_rpis(phba);
6971 
6972         /* Free eligible FCF index bmask */
6973         kfree(phba->fcf.fcf_rr_bmask);
6974 
6975         /* Free the ELS sgl list */
6976         lpfc_free_active_sgl(phba);
6977         lpfc_free_els_sgl_list(phba);
6978         lpfc_free_nvmet_sgl_list(phba);
6979 
6980         /* Free the completion queue EQ event pool */
6981         lpfc_sli4_cq_event_release_all(phba);
6982         lpfc_sli4_cq_event_pool_destroy(phba);
6983 
6984         /* Release resource identifiers. */
6985         lpfc_sli4_dealloc_resource_identifiers(phba);
6986 
6987         /* Free the bsmbx region. */
6988         lpfc_destroy_bootstrap_mbox(phba);
6989 
6990         /* Free the SLI Layer memory with SLI4 HBAs */
6991         lpfc_mem_free_all(phba);
6992 
6993         /* Free the current connect table */
6994         list_for_each_entry_safe(conn_entry, next_conn_entry,
6995                 &phba->fcf_conn_rec_list, list) {
6996                 list_del_init(&conn_entry->list);
6997                 kfree(conn_entry);
6998         }
6999 
7000         return;
7001 }
7002 
7003 /**
7004  * lpfc_init_api_table_setup - Set up init api function jump table
7005  * @phba: The hba struct for which this call is being executed.
7006  * @dev_grp: The HBA PCI-Device group number.
7007  *
7008  * This routine sets up the device INIT interface API function jump table
7009  * in @phba struct.
7010  *
7011  * Returns: 0 - success, -ENODEV - failure.
7012  **/
7013 int
7014 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7015 {
7016         phba->lpfc_hba_init_link = lpfc_hba_init_link;
7017         phba->lpfc_hba_down_link = lpfc_hba_down_link;
7018         phba->lpfc_selective_reset = lpfc_selective_reset;
7019         switch (dev_grp) {
7020         case LPFC_PCI_DEV_LP:
7021                 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
7022                 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
7023                 phba->lpfc_stop_port = lpfc_stop_port_s3;
7024                 break;
7025         case LPFC_PCI_DEV_OC:
7026                 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
7027                 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
7028                 phba->lpfc_stop_port = lpfc_stop_port_s4;
7029                 break;
7030         default:
7031                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7032                                 "1431 Invalid HBA PCI-device group: 0x%x\n",
7033                                 dev_grp);
7034                 return -ENODEV;
7035                 break;
7036         }
7037         return 0;
7038 }
7039 
7040 /**
7041  * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
7042  * @phba: pointer to lpfc hba data structure.
7043  *
7044  * This routine is invoked to set up the driver internal resources after the
7045  * device specific resource setup to support the HBA device it attached to.
7046  *
7047  * Return codes
7048  *      0 - successful
7049  *      other values - error
7050  **/
7051 static int
7052 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
7053 {
7054         int error;
7055 
7056         /* Startup the kernel thread for this host adapter. */
7057         phba->worker_thread = kthread_run(lpfc_do_work, phba,
7058                                           "lpfc_worker_%d", phba->brd_no);
7059         if (IS_ERR(phba->worker_thread)) {
7060                 error = PTR_ERR(phba->worker_thread);
7061                 return error;
7062         }
7063 
7064         return 0;
7065 }
7066 
7067 /**
7068  * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
7069  * @phba: pointer to lpfc hba data structure.
7070  *
7071  * This routine is invoked to unset the driver internal resources set up after
7072  * the device specific resource setup for supporting the HBA device it
7073  * attached to.
7074  **/
7075 static void
7076 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
7077 {
7078         if (phba->wq) {
7079                 flush_workqueue(phba->wq);
7080                 destroy_workqueue(phba->wq);
7081                 phba->wq = NULL;
7082         }
7083 
7084         /* Stop kernel worker thread */
7085         if (phba->worker_thread)
7086                 kthread_stop(phba->worker_thread);
7087 }
7088 
7089 /**
7090  * lpfc_free_iocb_list - Free iocb list.
7091  * @phba: pointer to lpfc hba data structure.
7092  *
7093  * This routine is invoked to free the driver's IOCB list and memory.
7094  **/
7095 void
7096 lpfc_free_iocb_list(struct lpfc_hba *phba)
7097 {
7098         struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
7099 
7100         spin_lock_irq(&phba->hbalock);
7101         list_for_each_entry_safe(iocbq_entry, iocbq_next,
7102                                  &phba->lpfc_iocb_list, list) {
7103                 list_del(&iocbq_entry->list);
7104                 kfree(iocbq_entry);
7105                 phba->total_iocbq_bufs--;
7106         }
7107         spin_unlock_irq(&phba->hbalock);
7108 
7109         return;
7110 }
7111 
7112 /**
7113  * lpfc_init_iocb_list - Allocate and initialize iocb list.
7114  * @phba: pointer to lpfc hba data structure.
7115  *
7116  * This routine is invoked to allocate and initizlize the driver's IOCB
7117  * list and set up the IOCB tag array accordingly.
7118  *
7119  * Return codes
7120  *      0 - successful
7121  *      other values - error
7122  **/
7123 int
7124 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
7125 {
7126         struct lpfc_iocbq *iocbq_entry = NULL;
7127         uint16_t iotag;
7128         int i;
7129 
7130         /* Initialize and populate the iocb list per host.  */
7131         INIT_LIST_HEAD(&phba->lpfc_iocb_list);
7132         for (i = 0; i < iocb_count; i++) {
7133                 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
7134                 if (iocbq_entry == NULL) {
7135                         printk(KERN_ERR "%s: only allocated %d iocbs of "
7136                                 "expected %d count. Unloading driver.\n",
7137                                 __func__, i, LPFC_IOCB_LIST_CNT);
7138                         goto out_free_iocbq;
7139                 }
7140 
7141                 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
7142                 if (iotag == 0) {
7143                         kfree(iocbq_entry);
7144                         printk(KERN_ERR "%s: failed to allocate IOTAG. "
7145                                 "Unloading driver.\n", __func__);
7146                         goto out_free_iocbq;
7147                 }
7148                 iocbq_entry->sli4_lxritag = NO_XRI;
7149                 iocbq_entry->sli4_xritag = NO_XRI;
7150 
7151                 spin_lock_irq(&phba->hbalock);
7152                 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
7153                 phba->total_iocbq_bufs++;
7154                 spin_unlock_irq(&phba->hbalock);
7155         }
7156 
7157         return 0;
7158 
7159 out_free_iocbq:
7160         lpfc_free_iocb_list(phba);
7161 
7162         return -ENOMEM;
7163 }
7164 
7165 /**
7166  * lpfc_free_sgl_list - Free a given sgl list.
7167  * @phba: pointer to lpfc hba data structure.
7168  * @sglq_list: pointer to the head of sgl list.
7169  *
7170  * This routine is invoked to free a give sgl list and memory.
7171  **/
7172 void
7173 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
7174 {
7175         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7176 
7177         list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
7178                 list_del(&sglq_entry->list);
7179                 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
7180                 kfree(sglq_entry);
7181         }
7182 }
7183 
7184 /**
7185  * lpfc_free_els_sgl_list - Free els sgl list.
7186  * @phba: pointer to lpfc hba data structure.
7187  *
7188  * This routine is invoked to free the driver's els sgl list and memory.
7189  **/
7190 static void
7191 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
7192 {
7193         LIST_HEAD(sglq_list);
7194 
7195         /* Retrieve all els sgls from driver list */
7196         spin_lock_irq(&phba->hbalock);
7197         spin_lock(&phba->sli4_hba.sgl_list_lock);
7198         list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
7199         spin_unlock(&phba->sli4_hba.sgl_list_lock);
7200         spin_unlock_irq(&phba->hbalock);
7201 
7202         /* Now free the sgl list */
7203         lpfc_free_sgl_list(phba, &sglq_list);
7204 }
7205 
7206 /**
7207  * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
7208  * @phba: pointer to lpfc hba data structure.
7209  *
7210  * This routine is invoked to free the driver's nvmet sgl list and memory.
7211  **/
7212 static void
7213 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
7214 {
7215         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7216         LIST_HEAD(sglq_list);
7217 
7218         /* Retrieve all nvmet sgls from driver list */
7219         spin_lock_irq(&phba->hbalock);
7220         spin_lock(&phba->sli4_hba.sgl_list_lock);
7221         list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
7222         spin_unlock(&phba->sli4_hba.sgl_list_lock);
7223         spin_unlock_irq(&phba->hbalock);
7224 
7225         /* Now free the sgl list */
7226         list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
7227                 list_del(&sglq_entry->list);
7228                 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
7229                 kfree(sglq_entry);
7230         }
7231 
7232         /* Update the nvmet_xri_cnt to reflect no current sgls.
7233          * The next initialization cycle sets the count and allocates
7234          * the sgls over again.
7235          */
7236         phba->sli4_hba.nvmet_xri_cnt = 0;
7237 }
7238 
7239 /**
7240  * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
7241  * @phba: pointer to lpfc hba data structure.
7242  *
7243  * This routine is invoked to allocate the driver's active sgl memory.
7244  * This array will hold the sglq_entry's for active IOs.
7245  **/
7246 static int
7247 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
7248 {
7249         int size;
7250         size = sizeof(struct lpfc_sglq *);
7251         size *= phba->sli4_hba.max_cfg_param.max_xri;
7252 
7253         phba->sli4_hba.lpfc_sglq_active_list =
7254                 kzalloc(size, GFP_KERNEL);
7255         if (!phba->sli4_hba.lpfc_sglq_active_list)
7256                 return -ENOMEM;
7257         return 0;
7258 }
7259 
7260 /**
7261  * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
7262  * @phba: pointer to lpfc hba data structure.
7263  *
7264  * This routine is invoked to walk through the array of active sglq entries
7265  * and free all of the resources.
7266  * This is just a place holder for now.
7267  **/
7268 static void
7269 lpfc_free_active_sgl(struct lpfc_hba *phba)
7270 {
7271         kfree(phba->sli4_hba.lpfc_sglq_active_list);
7272 }
7273 
7274 /**
7275  * lpfc_init_sgl_list - Allocate and initialize sgl list.
7276  * @phba: pointer to lpfc hba data structure.
7277  *
7278  * This routine is invoked to allocate and initizlize the driver's sgl
7279  * list and set up the sgl xritag tag array accordingly.
7280  *
7281  **/
7282 static void
7283 lpfc_init_sgl_list(struct lpfc_hba *phba)
7284 {
7285         /* Initialize and populate the sglq list per host/VF. */
7286         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
7287         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7288         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
7289         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7290 
7291         /* els xri-sgl book keeping */
7292         phba->sli4_hba.els_xri_cnt = 0;
7293 
7294         /* nvme xri-buffer book keeping */
7295         phba->sli4_hba.io_xri_cnt = 0;
7296 }
7297 
7298 /**
7299  * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
7300  * @phba: pointer to lpfc hba data structure.
7301  *
7302  * This routine is invoked to post rpi header templates to the
7303  * port for those SLI4 ports that do not support extents.  This routine
7304  * posts a PAGE_SIZE memory region to the port to hold up to
7305  * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
7306  * and should be called only when interrupts are disabled.
7307  *
7308  * Return codes
7309  *      0 - successful
7310  *      -ERROR - otherwise.
7311  **/
7312 int
7313 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7314 {
7315         int rc = 0;
7316         struct lpfc_rpi_hdr *rpi_hdr;
7317 
7318         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
7319         if (!phba->sli4_hba.rpi_hdrs_in_use)
7320                 return rc;
7321         if (phba->sli4_hba.extents_in_use)
7322                 return -EIO;
7323 
7324         rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
7325         if (!rpi_hdr) {
7326                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7327                                 "0391 Error during rpi post operation\n");
7328                 lpfc_sli4_remove_rpis(phba);
7329                 rc = -ENODEV;
7330         }
7331 
7332         return rc;
7333 }
7334 
7335 /**
7336  * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
7337  * @phba: pointer to lpfc hba data structure.
7338  *
7339  * This routine is invoked to allocate a single 4KB memory region to
7340  * support rpis and stores them in the phba.  This single region
7341  * provides support for up to 64 rpis.  The region is used globally
7342  * by the device.
7343  *
7344  * Returns:
7345  *   A valid rpi hdr on success.
7346  *   A NULL pointer on any failure.
7347  **/
7348 struct lpfc_rpi_hdr *
7349 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
7350 {
7351         uint16_t rpi_limit, curr_rpi_range;
7352         struct lpfc_dmabuf *dmabuf;
7353         struct lpfc_rpi_hdr *rpi_hdr;
7354 
7355         /*
7356          * If the SLI4 port supports extents, posting the rpi header isn't
7357          * required.  Set the expected maximum count and let the actual value
7358          * get set when extents are fully allocated.
7359          */
7360         if (!phba->sli4_hba.rpi_hdrs_in_use)
7361                 return NULL;
7362         if (phba->sli4_hba.extents_in_use)
7363                 return NULL;
7364 
7365         /* The limit on the logical index is just the max_rpi count. */
7366         rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
7367 
7368         spin_lock_irq(&phba->hbalock);
7369         /*
7370          * Establish the starting RPI in this header block.  The starting
7371          * rpi is normalized to a zero base because the physical rpi is
7372          * port based.
7373          */
7374         curr_rpi_range = phba->sli4_hba.next_rpi;
7375         spin_unlock_irq(&phba->hbalock);
7376 
7377         /* Reached full RPI range */
7378         if (curr_rpi_range == rpi_limit)
7379                 return NULL;
7380 
7381         /*
7382          * First allocate the protocol header region for the port.  The
7383          * port expects a 4KB DMA-mapped memory region that is 4K aligned.
7384          */
7385         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7386         if (!dmabuf)
7387                 return NULL;
7388 
7389         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7390                                           LPFC_HDR_TEMPLATE_SIZE,
7391                                           &dmabuf->phys, GFP_KERNEL);
7392         if (!dmabuf->virt) {
7393                 rpi_hdr = NULL;
7394                 goto err_free_dmabuf;
7395         }
7396 
7397         if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
7398                 rpi_hdr = NULL;
7399                 goto err_free_coherent;
7400         }
7401 
7402         /* Save the rpi header data for cleanup later. */
7403         rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
7404         if (!rpi_hdr)
7405                 goto err_free_coherent;
7406 
7407         rpi_hdr->dmabuf = dmabuf;
7408         rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
7409         rpi_hdr->page_count = 1;
7410         spin_lock_irq(&phba->hbalock);
7411 
7412         /* The rpi_hdr stores the logical index only. */
7413         rpi_hdr->start_rpi = curr_rpi_range;
7414         rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
7415         list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
7416 
7417         spin_unlock_irq(&phba->hbalock);
7418         return rpi_hdr;
7419 
7420  err_free_coherent:
7421         dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
7422                           dmabuf->virt, dmabuf->phys);
7423  err_free_dmabuf:
7424         kfree(dmabuf);
7425         return NULL;
7426 }
7427 
7428 /**
7429  * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
7430  * @phba: pointer to lpfc hba data structure.
7431  *
7432  * This routine is invoked to remove all memory resources allocated
7433  * to support rpis for SLI4 ports not supporting extents. This routine
7434  * presumes the caller has released all rpis consumed by fabric or port
7435  * logins and is prepared to have the header pages removed.
7436  **/
7437 void
7438 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
7439 {
7440         struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
7441 
7442         if (!phba->sli4_hba.rpi_hdrs_in_use)
7443                 goto exit;
7444 
7445         list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
7446                                  &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
7447                 list_del(&rpi_hdr->list);
7448                 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
7449                                   rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
7450                 kfree(rpi_hdr->dmabuf);
7451                 kfree(rpi_hdr);
7452         }
7453  exit:
7454         /* There are no rpis available to the port now. */
7455         phba->sli4_hba.next_rpi = 0;
7456 }
7457 
7458 /**
7459  * lpfc_hba_alloc - Allocate driver hba data structure for a device.
7460  * @pdev: pointer to pci device data structure.
7461  *
7462  * This routine is invoked to allocate the driver hba data structure for an
7463  * HBA device. If the allocation is successful, the phba reference to the
7464  * PCI device data structure is set.
7465  *
7466  * Return codes
7467  *      pointer to @phba - successful
7468  *      NULL - error
7469  **/
7470 static struct lpfc_hba *
7471 lpfc_hba_alloc(struct pci_dev *pdev)
7472 {
7473         struct lpfc_hba *phba;
7474 
7475         /* Allocate memory for HBA structure */
7476         phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
7477         if (!phba) {
7478                 dev_err(&pdev->dev, "failed to allocate hba struct\n");
7479                 return NULL;
7480         }
7481 
7482         /* Set reference to PCI device in HBA structure */
7483         phba->pcidev = pdev;
7484 
7485         /* Assign an unused board number */
7486         phba->brd_no = lpfc_get_instance();
7487         if (phba->brd_no < 0) {
7488                 kfree(phba);
7489                 return NULL;
7490         }
7491         phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
7492 
7493         spin_lock_init(&phba->ct_ev_lock);
7494         INIT_LIST_HEAD(&phba->ct_ev_waiters);
7495 
7496         return phba;
7497 }
7498 
7499 /**
7500  * lpfc_hba_free - Free driver hba data structure with a device.
7501  * @phba: pointer to lpfc hba data structure.
7502  *
7503  * This routine is invoked to free the driver hba data structure with an
7504  * HBA device.
7505  **/
7506 static void
7507 lpfc_hba_free(struct lpfc_hba *phba)
7508 {
7509         if (phba->sli_rev == LPFC_SLI_REV4)
7510                 kfree(phba->sli4_hba.hdwq);
7511 
7512         /* Release the driver assigned board number */
7513         idr_remove(&lpfc_hba_index, phba->brd_no);
7514 
7515         /* Free memory allocated with sli3 rings */
7516         kfree(phba->sli.sli3_ring);
7517         phba->sli.sli3_ring = NULL;
7518 
7519         kfree(phba);
7520         return;
7521 }
7522 
7523 /**
7524  * lpfc_create_shost - Create hba physical port with associated scsi host.
7525  * @phba: pointer to lpfc hba data structure.
7526  *
7527  * This routine is invoked to create HBA physical port and associate a SCSI
7528  * host with it.
7529  *
7530  * Return codes
7531  *      0 - successful
7532  *      other values - error
7533  **/
7534 static int
7535 lpfc_create_shost(struct lpfc_hba *phba)
7536 {
7537         struct lpfc_vport *vport;
7538         struct Scsi_Host  *shost;
7539 
7540         /* Initialize HBA FC structure */
7541         phba->fc_edtov = FF_DEF_EDTOV;
7542         phba->fc_ratov = FF_DEF_RATOV;
7543         phba->fc_altov = FF_DEF_ALTOV;
7544         phba->fc_arbtov = FF_DEF_ARBTOV;
7545 
7546         atomic_set(&phba->sdev_cnt, 0);
7547         vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
7548         if (!vport)
7549                 return -ENODEV;
7550 
7551         shost = lpfc_shost_from_vport(vport);
7552         phba->pport = vport;
7553 
7554         if (phba->nvmet_support) {
7555                 /* Only 1 vport (pport) will support NVME target */
7556                 if (phba->txrdy_payload_pool == NULL) {
7557                         phba->txrdy_payload_pool = dma_pool_create(
7558                                 "txrdy_pool", &phba->pcidev->dev,
7559                                 TXRDY_PAYLOAD_LEN, 16, 0);
7560                         if (phba->txrdy_payload_pool) {
7561                                 phba->targetport = NULL;
7562                                 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7563                                 lpfc_printf_log(phba, KERN_INFO,
7564                                                 LOG_INIT | LOG_NVME_DISC,
7565                                                 "6076 NVME Target Found\n");
7566                         }
7567                 }
7568         }
7569 
7570         lpfc_debugfs_initialize(vport);
7571         /* Put reference to SCSI host to driver's device private data */
7572         pci_set_drvdata(phba->pcidev, shost);
7573 
7574         /*
7575          * At this point we are fully registered with PSA. In addition,
7576          * any initial discovery should be completed.
7577          */
7578         vport->load_flag |= FC_ALLOW_FDMI;
7579         if (phba->cfg_enable_SmartSAN ||
7580             (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
7581 
7582                 /* Setup appropriate attribute masks */
7583                 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
7584                 if (phba->cfg_enable_SmartSAN)
7585                         vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
7586                 else
7587                         vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7588         }
7589         return 0;
7590 }
7591 
7592 /**
7593  * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
7594  * @phba: pointer to lpfc hba data structure.
7595  *
7596  * This routine is invoked to destroy HBA physical port and the associated
7597  * SCSI host.
7598  **/
7599 static void
7600 lpfc_destroy_shost(struct lpfc_hba *phba)
7601 {
7602         struct lpfc_vport *vport = phba->pport;
7603 
7604         /* Destroy physical port that associated with the SCSI host */
7605         destroy_port(vport);
7606 
7607         return;
7608 }
7609 
7610 /**
7611  * lpfc_setup_bg - Setup Block guard structures and debug areas.
7612  * @phba: pointer to lpfc hba data structure.
7613  * @shost: the shost to be used to detect Block guard settings.
7614  *
7615  * This routine sets up the local Block guard protocol settings for @shost.
7616  * This routine also allocates memory for debugging bg buffers.
7617  **/
7618 static void
7619 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7620 {
7621         uint32_t old_mask;
7622         uint32_t old_guard;
7623 
7624         if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7625                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7626                                 "1478 Registering BlockGuard with the "
7627                                 "SCSI layer\n");
7628 
7629                 old_mask = phba->cfg_prot_mask;
7630                 old_guard = phba->cfg_prot_guard;
7631 
7632                 /* Only allow supported values */
7633                 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
7634                         SHOST_DIX_TYPE0_PROTECTION |
7635                         SHOST_DIX_TYPE1_PROTECTION);
7636                 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7637                                          SHOST_DIX_GUARD_CRC);
7638 
7639                 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
7640                 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7641                         phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
7642 
7643                 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7644                         if ((old_mask != phba->cfg_prot_mask) ||
7645                                 (old_guard != phba->cfg_prot_guard))
7646                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7647                                         "1475 Registering BlockGuard with the "
7648                                         "SCSI layer: mask %d  guard %d\n",
7649                                         phba->cfg_prot_mask,
7650                                         phba->cfg_prot_guard);
7651 
7652                         scsi_host_set_prot(shost, phba->cfg_prot_mask);
7653                         scsi_host_set_guard(shost, phba->cfg_prot_guard);
7654                 } else
7655                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7656                                 "1479 Not Registering BlockGuard with the SCSI "
7657                                 "layer, Bad protection parameters: %d %d\n",
7658                                 old_mask, old_guard);
7659         }
7660 }
7661 
7662 /**
7663  * lpfc_post_init_setup - Perform necessary device post initialization setup.
7664  * @phba: pointer to lpfc hba data structure.
7665  *
7666  * This routine is invoked to perform all the necessary post initialization
7667  * setup for the device.
7668  **/
7669 static void
7670 lpfc_post_init_setup(struct lpfc_hba *phba)
7671 {
7672         struct Scsi_Host  *shost;
7673         struct lpfc_adapter_event_header adapter_event;
7674 
7675         /* Get the default values for Model Name and Description */
7676         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7677 
7678         /*
7679          * hba setup may have changed the hba_queue_depth so we need to
7680          * adjust the value of can_queue.
7681          */
7682         shost = pci_get_drvdata(phba->pcidev);
7683         shost->can_queue = phba->cfg_hba_queue_depth - 10;
7684 
7685         lpfc_host_attrib_init(shost);
7686 
7687         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7688                 spin_lock_irq(shost->host_lock);
7689                 lpfc_poll_start_timer(phba);
7690                 spin_unlock_irq(shost->host_lock);
7691         }
7692 
7693         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7694                         "0428 Perform SCSI scan\n");
7695         /* Send board arrival event to upper layer */
7696         adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7697         adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7698         fc_host_post_vendor_event(shost, fc_get_event_number(),
7699                                   sizeof(adapter_event),
7700                                   (char *) &adapter_event,
7701                                   LPFC_NL_VENDOR_ID);
7702         return;
7703 }
7704 
7705 /**
7706  * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
7707  * @phba: pointer to lpfc hba data structure.
7708  *
7709  * This routine is invoked to set up the PCI device memory space for device
7710  * with SLI-3 interface spec.
7711  *
7712  * Return codes
7713  *      0 - successful
7714  *      other values - error
7715  **/
7716 static int
7717 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7718 {
7719         struct pci_dev *pdev = phba->pcidev;
7720         unsigned long bar0map_len, bar2map_len;
7721         int i, hbq_count;
7722         void *ptr;
7723         int error;
7724 
7725         if (!pdev)
7726                 return -ENODEV;
7727 
7728         /* Set the device DMA mask size */
7729         error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7730         if (error)
7731                 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7732         if (error)
7733                 return error;
7734         error = -ENODEV;
7735 
7736         /* Get the bus address of Bar0 and Bar2 and the number of bytes
7737          * required by each mapping.
7738          */
7739         phba->pci_bar0_map = pci_resource_start(pdev, 0);
7740         bar0map_len = pci_resource_len(pdev, 0);
7741 
7742         phba->pci_bar2_map = pci_resource_start(pdev, 2);
7743         bar2map_len = pci_resource_len(pdev, 2);
7744 
7745         /* Map HBA SLIM to a kernel virtual address. */
7746         phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7747         if (!phba->slim_memmap_p) {
7748                 dev_printk(KERN_ERR, &pdev->dev,
7749                            "ioremap failed for SLIM memory.\n");
7750                 goto out;
7751         }
7752 
7753         /* Map HBA Control Registers to a kernel virtual address. */
7754         phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
7755         if (!phba->ctrl_regs_memmap_p) {
7756                 dev_printk(KERN_ERR, &pdev->dev,
7757                            "ioremap failed for HBA control registers.\n");
7758                 goto out_iounmap_slim;
7759         }
7760 
7761         /* Allocate memory for SLI-2 structures */
7762         phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7763                                                &phba->slim2p.phys, GFP_KERNEL);
7764         if (!phba->slim2p.virt)
7765                 goto out_iounmap;
7766 
7767         phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7768         phba->mbox_ext = (phba->slim2p.virt +
7769                 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
7770         phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
7771         phba->IOCBs = (phba->slim2p.virt +
7772                        offsetof(struct lpfc_sli2_slim, IOCBs));
7773 
7774         phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
7775                                                  lpfc_sli_hbq_size(),
7776                                                  &phba->hbqslimp.phys,
7777                                                  GFP_KERNEL);
7778         if (!phba->hbqslimp.virt)
7779                 goto out_free_slim;
7780 
7781         hbq_count = lpfc_sli_hbq_count();
7782         ptr = phba->hbqslimp.virt;
7783         for (i = 0; i < hbq_count; ++i) {
7784                 phba->hbqs[i].hbq_virt = ptr;
7785                 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
7786                 ptr += (lpfc_hbq_defs[i]->entry_count *
7787                         sizeof(struct lpfc_hbq_entry));
7788         }
7789         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
7790         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
7791 
7792         memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
7793 
7794         phba->MBslimaddr = phba->slim_memmap_p;
7795         phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
7796         phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
7797         phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
7798         phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
7799 
7800         return 0;
7801 
7802 out_free_slim:
7803         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7804                           phba->slim2p.virt, phba->slim2p.phys);
7805 out_iounmap:
7806         iounmap(phba->ctrl_regs_memmap_p);
7807 out_iounmap_slim:
7808         iounmap(phba->slim_memmap_p);
7809 out:
7810         return error;
7811 }
7812 
7813 /**
7814  * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
7815  * @phba: pointer to lpfc hba data structure.
7816  *
7817  * This routine is invoked to unset the PCI device memory space for device
7818  * with SLI-3 interface spec.
7819  **/
7820 static void
7821 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
7822 {
7823         struct pci_dev *pdev;
7824 
7825         /* Obtain PCI device reference */
7826         if (!phba->pcidev)
7827                 return;
7828         else
7829                 pdev = phba->pcidev;
7830 
7831         /* Free coherent DMA memory allocated */
7832         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7833                           phba->hbqslimp.virt, phba->hbqslimp.phys);
7834         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7835                           phba->slim2p.virt, phba->slim2p.phys);
7836 
7837         /* I/O memory unmap */
7838         iounmap(phba->ctrl_regs_memmap_p);
7839         iounmap(phba->slim_memmap_p);
7840 
7841         return;
7842 }
7843 
7844 /**
7845  * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
7846  * @phba: pointer to lpfc hba data structure.
7847  *
7848  * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
7849  * done and check status.
7850  *
7851  * Return 0 if successful, otherwise -ENODEV.
7852  **/
7853 int
7854 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
7855 {
7856         struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
7857         struct lpfc_register reg_data;
7858         int i, port_error = 0;
7859         uint32_t if_type;
7860 
7861         memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
7862         memset(&reg_data, 0, sizeof(reg_data));
7863         if (!phba->sli4_hba.PSMPHRregaddr)
7864                 return -ENODEV;
7865 
7866         /* Wait up to 30 seconds for the SLI Port POST done and ready */
7867         for (i = 0; i < 3000; i++) {
7868                 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
7869                         &portsmphr_reg.word0) ||
7870                         (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
7871                         /* Port has a fatal POST error, break out */
7872                         port_error = -ENODEV;
7873                         break;
7874                 }
7875                 if (LPFC_POST_STAGE_PORT_READY ==
7876                     bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
7877                         break;
7878                 msleep(10);
7879         }
7880 
7881         /*
7882          * If there was a port error during POST, then don't proceed with
7883          * other register reads as the data may not be valid.  Just exit.
7884          */
7885         if (port_error) {
7886                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7887                         "1408 Port Failed POST - portsmphr=0x%x, "
7888                         "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
7889                         "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
7890                         portsmphr_reg.word0,
7891                         bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
7892                         bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
7893                         bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
7894                         bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
7895                         bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
7896                         bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
7897                         bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
7898                         bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
7899         } else {
7900                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7901                                 "2534 Device Info: SLIFamily=0x%x, "
7902                                 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
7903                                 "SLIHint_2=0x%x, FT=0x%x\n",
7904                                 bf_get(lpfc_sli_intf_sli_family,
7905                                        &phba->sli4_hba.sli_intf),
7906                                 bf_get(lpfc_sli_intf_slirev,
7907                                        &phba->sli4_hba.sli_intf),
7908                                 bf_get(lpfc_sli_intf_if_type,
7909                                        &phba->sli4_hba.sli_intf),
7910                                 bf_get(lpfc_sli_intf_sli_hint1,
7911                                        &phba->sli4_hba.sli_intf),
7912                                 bf_get(lpfc_sli_intf_sli_hint2,
7913                                        &phba->sli4_hba.sli_intf),
7914                                 bf_get(lpfc_sli_intf_func_type,
7915                                        &phba->sli4_hba.sli_intf));
7916                 /*
7917                  * Check for other Port errors during the initialization
7918                  * process.  Fail the load if the port did not come up
7919                  * correctly.
7920                  */
7921                 if_type = bf_get(lpfc_sli_intf_if_type,
7922                                  &phba->sli4_hba.sli_intf);
7923                 switch (if_type) {
7924                 case LPFC_SLI_INTF_IF_TYPE_0:
7925                         phba->sli4_hba.ue_mask_lo =
7926                               readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
7927                         phba->sli4_hba.ue_mask_hi =
7928                               readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
7929                         uerrlo_reg.word0 =
7930                               readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
7931                         uerrhi_reg.word0 =
7932                                 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
7933                         if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
7934                             (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
7935                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7936                                                 "1422 Unrecoverable Error "
7937                                                 "Detected during POST "
7938                                                 "uerr_lo_reg=0x%x, "
7939                                                 "uerr_hi_reg=0x%x, "
7940                                                 "ue_mask_lo_reg=0x%x, "
7941                                                 "ue_mask_hi_reg=0x%x\n",
7942                                                 uerrlo_reg.word0,
7943                                                 uerrhi_reg.word0,
7944                                                 phba->sli4_hba.ue_mask_lo,
7945                                                 phba->sli4_hba.ue_mask_hi);
7946                                 port_error = -ENODEV;
7947                         }
7948                         break;
7949                 case LPFC_SLI_INTF_IF_TYPE_2:
7950                 case LPFC_SLI_INTF_IF_TYPE_6:
7951                         /* Final checks.  The port status should be clean. */
7952                         if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7953                                 &reg_data.word0) ||
7954                                 (bf_get(lpfc_sliport_status_err, &reg_data) &&
7955                                  !bf_get(lpfc_sliport_status_rn, &reg_data))) {
7956                                 phba->work_status[0] =
7957                                         readl(phba->sli4_hba.u.if_type2.
7958                                               ERR1regaddr);
7959                                 phba->work_status[1] =
7960                                         readl(phba->sli4_hba.u.if_type2.
7961                                               ERR2regaddr);
7962                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7963                                         "2888 Unrecoverable port error "
7964                                         "following POST: port status reg "
7965                                         "0x%x, port_smphr reg 0x%x, "
7966                                         "error 1=0x%x, error 2=0x%x\n",
7967                                         reg_data.word0,
7968                                         portsmphr_reg.word0,
7969                                         phba->work_status[0],
7970                                         phba->work_status[1]);
7971                                 port_error = -ENODEV;
7972                         }
7973                         break;
7974                 case LPFC_SLI_INTF_IF_TYPE_1:
7975                 default:
7976                         break;
7977                 }
7978         }
7979         return port_error;
7980 }
7981 
7982 /**
7983  * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
7984  * @phba: pointer to lpfc hba data structure.
7985  * @if_type:  The SLI4 interface type getting configured.
7986  *
7987  * This routine is invoked to set up SLI4 BAR0 PCI config space register
7988  * memory map.
7989  **/
7990 static void
7991 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
7992 {
7993         switch (if_type) {
7994         case LPFC_SLI_INTF_IF_TYPE_0:
7995                 phba->sli4_hba.u.if_type0.UERRLOregaddr =
7996                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
7997                 phba->sli4_hba.u.if_type0.UERRHIregaddr =
7998                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
7999                 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
8000                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
8001                 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
8002                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
8003                 phba->sli4_hba.SLIINTFregaddr =
8004                         phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8005                 break;
8006         case LPFC_SLI_INTF_IF_TYPE_2:
8007                 phba->sli4_hba.u.if_type2.EQDregaddr =
8008                         phba->sli4_hba.conf_regs_memmap_p +
8009                                                 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8010                 phba->sli4_hba.u.if_type2.ERR1regaddr =
8011                         phba->sli4_hba.conf_regs_memmap_p +
8012                                                 LPFC_CTL_PORT_ER1_OFFSET;
8013                 phba->sli4_hba.u.if_type2.ERR2regaddr =
8014                         phba->sli4_hba.conf_regs_memmap_p +
8015                                                 LPFC_CTL_PORT_ER2_OFFSET;
8016                 phba->sli4_hba.u.if_type2.CTRLregaddr =
8017                         phba->sli4_hba.conf_regs_memmap_p +
8018                                                 LPFC_CTL_PORT_CTL_OFFSET;
8019                 phba->sli4_hba.u.if_type2.STATUSregaddr =
8020                         phba->sli4_hba.conf_regs_memmap_p +
8021                                                 LPFC_CTL_PORT_STA_OFFSET;
8022                 phba->sli4_hba.SLIINTFregaddr =
8023                         phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8024                 phba->sli4_hba.PSMPHRregaddr =
8025                         phba->sli4_hba.conf_regs_memmap_p +
8026                                                 LPFC_CTL_PORT_SEM_OFFSET;
8027                 phba->sli4_hba.RQDBregaddr =
8028                         phba->sli4_hba.conf_regs_memmap_p +
8029                                                 LPFC_ULP0_RQ_DOORBELL;
8030                 phba->sli4_hba.WQDBregaddr =
8031                         phba->sli4_hba.conf_regs_memmap_p +
8032                                                 LPFC_ULP0_WQ_DOORBELL;
8033                 phba->sli4_hba.CQDBregaddr =
8034                         phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
8035                 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8036                 phba->sli4_hba.MQDBregaddr =
8037                         phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
8038                 phba->sli4_hba.BMBXregaddr =
8039                         phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8040                 break;
8041         case LPFC_SLI_INTF_IF_TYPE_6:
8042                 phba->sli4_hba.u.if_type2.EQDregaddr =
8043                         phba->sli4_hba.conf_regs_memmap_p +
8044                                                 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8045                 phba->sli4_hba.u.if_type2.ERR1regaddr =
8046                         phba->sli4_hba.conf_regs_memmap_p +
8047                                                 LPFC_CTL_PORT_ER1_OFFSET;
8048                 phba->sli4_hba.u.if_type2.ERR2regaddr =
8049                         phba->sli4_hba.conf_regs_memmap_p +
8050                                                 LPFC_CTL_PORT_ER2_OFFSET;
8051                 phba->sli4_hba.u.if_type2.CTRLregaddr =
8052                         phba->sli4_hba.conf_regs_memmap_p +
8053                                                 LPFC_CTL_PORT_CTL_OFFSET;
8054                 phba->sli4_hba.u.if_type2.STATUSregaddr =
8055                         phba->sli4_hba.conf_regs_memmap_p +
8056                                                 LPFC_CTL_PORT_STA_OFFSET;
8057                 phba->sli4_hba.PSMPHRregaddr =
8058                         phba->sli4_hba.conf_regs_memmap_p +
8059                                                 LPFC_CTL_PORT_SEM_OFFSET;
8060                 phba->sli4_hba.BMBXregaddr =
8061                         phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8062                 break;
8063         case LPFC_SLI_INTF_IF_TYPE_1:
8064         default:
8065                 dev_printk(KERN_ERR, &phba->pcidev->dev,
8066                            "FATAL - unsupported SLI4 interface type - %d\n",
8067                            if_type);
8068                 break;
8069         }
8070 }
8071 
8072 /**
8073  * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
8074  * @phba: pointer to lpfc hba data structure.
8075  *
8076  * This routine is invoked to set up SLI4 BAR1 register memory map.
8077  **/
8078 static void
8079 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8080 {
8081         switch (if_type) {
8082         case LPFC_SLI_INTF_IF_TYPE_0:
8083                 phba->sli4_hba.PSMPHRregaddr =
8084                         phba->sli4_hba.ctrl_regs_memmap_p +
8085                         LPFC_SLIPORT_IF0_SMPHR;
8086                 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8087                         LPFC_HST_ISR0;
8088                 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8089                         LPFC_HST_IMR0;
8090                 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8091                         LPFC_HST_ISCR0;
8092                 break;
8093         case LPFC_SLI_INTF_IF_TYPE_6:
8094                 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8095                         LPFC_IF6_RQ_DOORBELL;
8096                 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8097                         LPFC_IF6_WQ_DOORBELL;
8098                 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8099                         LPFC_IF6_CQ_DOORBELL;
8100                 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8101                         LPFC_IF6_EQ_DOORBELL;
8102                 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8103                         LPFC_IF6_MQ_DOORBELL;
8104                 break;
8105         case LPFC_SLI_INTF_IF_TYPE_2:
8106         case LPFC_SLI_INTF_IF_TYPE_1:
8107         default:
8108                 dev_err(&phba->pcidev->dev,
8109                            "FATAL - unsupported SLI4 interface type - %d\n",
8110                            if_type);
8111                 break;
8112         }
8113 }
8114 
8115 /**
8116  * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
8117  * @phba: pointer to lpfc hba data structure.
8118  * @vf: virtual function number
8119  *
8120  * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
8121  * based on the given viftual function number, @vf.
8122  *
8123  * Return 0 if successful, otherwise -ENODEV.
8124  **/
8125 static int
8126 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
8127 {
8128         if (vf > LPFC_VIR_FUNC_MAX)
8129                 return -ENODEV;
8130 
8131         phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8132                                 vf * LPFC_VFR_PAGE_SIZE +
8133                                         LPFC_ULP0_RQ_DOORBELL);
8134         phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8135                                 vf * LPFC_VFR_PAGE_SIZE +
8136                                         LPFC_ULP0_WQ_DOORBELL);
8137         phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8138                                 vf * LPFC_VFR_PAGE_SIZE +
8139                                         LPFC_EQCQ_DOORBELL);
8140         phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8141         phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8142                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
8143         phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8144                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
8145         return 0;
8146 }
8147 
8148 /**
8149  * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
8150  * @phba: pointer to lpfc hba data structure.
8151  *
8152  * This routine is invoked to create the bootstrap mailbox
8153  * region consistent with the SLI-4 interface spec.  This
8154  * routine allocates all memory necessary to communicate
8155  * mailbox commands to the port and sets up all alignment
8156  * needs.  No locks are expected to be held when calling
8157  * this routine.
8158  *
8159  * Return codes
8160  *      0 - successful
8161  *      -ENOMEM - could not allocated memory.
8162  **/
8163 static int
8164 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
8165 {
8166         uint32_t bmbx_size;
8167         struct lpfc_dmabuf *dmabuf;
8168         struct dma_address *dma_address;
8169         uint32_t pa_addr;
8170         uint64_t phys_addr;
8171 
8172         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8173         if (!dmabuf)
8174                 return -ENOMEM;
8175 
8176         /*
8177          * The bootstrap mailbox region is comprised of 2 parts
8178          * plus an alignment restriction of 16 bytes.
8179          */
8180         bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
8181         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8182                                           &dmabuf->phys, GFP_KERNEL);
8183         if (!dmabuf->virt) {
8184                 kfree(dmabuf);
8185                 return -ENOMEM;
8186         }
8187 
8188         /*
8189          * Initialize the bootstrap mailbox pointers now so that the register
8190          * operations are simple later.  The mailbox dma address is required
8191          * to be 16-byte aligned.  Also align the virtual memory as each
8192          * maibox is copied into the bmbx mailbox region before issuing the
8193          * command to the port.
8194          */
8195         phba->sli4_hba.bmbx.dmabuf = dmabuf;
8196         phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
8197 
8198         phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
8199                                               LPFC_ALIGN_16_BYTE);
8200         phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
8201                                               LPFC_ALIGN_16_BYTE);
8202 
8203         /*
8204          * Set the high and low physical addresses now.  The SLI4 alignment
8205          * requirement is 16 bytes and the mailbox is posted to the port
8206          * as two 30-bit addresses.  The other data is a bit marking whether
8207          * the 30-bit address is the high or low address.
8208          * Upcast bmbx aphys to 64bits so shift instruction compiles
8209          * clean on 32 bit machines.
8210          */
8211         dma_address = &phba->sli4_hba.bmbx.dma_address;
8212         phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
8213         pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
8214         dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
8215                                            LPFC_BMBX_BIT1_ADDR_HI);
8216 
8217         pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
8218         dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
8219                                            LPFC_BMBX_BIT1_ADDR_LO);
8220         return 0;
8221 }
8222 
8223 /**
8224  * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
8225  * @phba: pointer to lpfc hba data structure.
8226  *
8227  * This routine is invoked to teardown the bootstrap mailbox
8228  * region and release all host resources. This routine requires
8229  * the caller to ensure all mailbox commands recovered, no
8230  * additional mailbox comands are sent, and interrupts are disabled
8231  * before calling this routine.
8232  *
8233  **/
8234 static void
8235 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
8236 {
8237         dma_free_coherent(&phba->pcidev->dev,
8238                           phba->sli4_hba.bmbx.bmbx_size,
8239                           phba->sli4_hba.bmbx.dmabuf->virt,
8240                           phba->sli4_hba.bmbx.dmabuf->phys);
8241 
8242         kfree(phba->sli4_hba.bmbx.dmabuf);
8243         memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
8244 }
8245 
8246 /**
8247  * lpfc_sli4_read_config - Get the config parameters.
8248  * @phba: pointer to lpfc hba data structure.
8249  *
8250  * This routine is invoked to read the configuration parameters from the HBA.
8251  * The configuration parameters are used to set the base and maximum values
8252  * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
8253  * allocation for the port.
8254  *
8255  * Return codes
8256  *      0 - successful
8257  *      -ENOMEM - No available memory
8258  *      -EIO - The mailbox failed to complete successfully.
8259  **/
8260 int
8261 lpfc_sli4_read_config(struct lpfc_hba *phba)
8262 {
8263         LPFC_MBOXQ_t *pmb;
8264         struct lpfc_mbx_read_config *rd_config;
8265         union  lpfc_sli4_cfg_shdr *shdr;
8266         uint32_t shdr_status, shdr_add_status;
8267         struct lpfc_mbx_get_func_cfg *get_func_cfg;
8268         struct lpfc_rsrc_desc_fcfcoe *desc;
8269         char *pdesc_0;
8270         uint16_t forced_link_speed;
8271         uint32_t if_type, qmin;
8272         int length, i, rc = 0, rc2;
8273 
8274         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8275         if (!pmb) {
8276                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8277                                 "2011 Unable to allocate memory for issuing "
8278                                 "SLI_CONFIG_SPECIAL mailbox command\n");
8279                 return -ENOMEM;
8280         }
8281 
8282         lpfc_read_config(phba, pmb);
8283 
8284         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8285         if (rc != MBX_SUCCESS) {
8286                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8287                         "2012 Mailbox failed , mbxCmd x%x "
8288                         "READ_CONFIG, mbxStatus x%x\n",
8289                         bf_get(lpfc_mqe_command, &pmb->u.mqe),
8290                         bf_get(lpfc_mqe_status, &pmb->u.mqe));
8291                 rc = -EIO;
8292         } else {
8293                 rd_config = &pmb->u.mqe.un.rd_config;
8294                 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
8295                         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
8296                         phba->sli4_hba.lnk_info.lnk_tp =
8297                                 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
8298                         phba->sli4_hba.lnk_info.lnk_no =
8299                                 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
8300                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8301                                         "3081 lnk_type:%d, lnk_numb:%d\n",
8302                                         phba->sli4_hba.lnk_info.lnk_tp,
8303                                         phba->sli4_hba.lnk_info.lnk_no);
8304                 } else
8305                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8306                                         "3082 Mailbox (x%x) returned ldv:x0\n",
8307                                         bf_get(lpfc_mqe_command, &pmb->u.mqe));
8308                 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
8309                         phba->bbcredit_support = 1;
8310                         phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
8311                 }
8312 
8313                 phba->sli4_hba.conf_trunk =
8314                         bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
8315                 phba->sli4_hba.extents_in_use =
8316                         bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
8317                 phba->sli4_hba.max_cfg_param.max_xri =
8318                         bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
8319                 /* Reduce resource usage in kdump environment */
8320                 if (is_kdump_kernel() &&
8321                     phba->sli4_hba.max_cfg_param.max_xri > 512)
8322                         phba->sli4_hba.max_cfg_param.max_xri = 512;
8323                 phba->sli4_hba.max_cfg_param.xri_base =
8324                         bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8325                 phba->sli4_hba.max_cfg_param.max_vpi =
8326                         bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
8327                 /* Limit the max we support */
8328                 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
8329                         phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
8330                 phba->sli4_hba.max_cfg_param.vpi_base =
8331                         bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
8332                 phba->sli4_hba.max_cfg_param.max_rpi =
8333                         bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
8334                 phba->sli4_hba.max_cfg_param.rpi_base =
8335                         bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
8336                 phba->sli4_hba.max_cfg_param.max_vfi =
8337                         bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
8338                 phba->sli4_hba.max_cfg_param.vfi_base =
8339                         bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
8340                 phba->sli4_hba.max_cfg_param.max_fcfi =
8341                         bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
8342                 phba->sli4_hba.max_cfg_param.max_eq =
8343                         bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
8344                 phba->sli4_hba.max_cfg_param.max_rq =
8345                         bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
8346                 phba->sli4_hba.max_cfg_param.max_wq =
8347                         bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
8348                 phba->sli4_hba.max_cfg_param.max_cq =
8349                         bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
8350                 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
8351                 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
8352                 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
8353                 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
8354                 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
8355                                 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
8356                 phba->max_vports = phba->max_vpi;
8357                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8358                                 "2003 cfg params Extents? %d "
8359                                 "XRI(B:%d M:%d), "
8360                                 "VPI(B:%d M:%d) "
8361                                 "VFI(B:%d M:%d) "
8362                                 "RPI(B:%d M:%d) "
8363                                 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
8364                                 phba->sli4_hba.extents_in_use,
8365                                 phba->sli4_hba.max_cfg_param.xri_base,
8366                                 phba->sli4_hba.max_cfg_param.max_xri,
8367                                 phba->sli4_hba.max_cfg_param.vpi_base,
8368                                 phba->sli4_hba.max_cfg_param.max_vpi,
8369                                 phba->sli4_hba.max_cfg_param.vfi_base,
8370                                 phba->sli4_hba.max_cfg_param.max_vfi,
8371                                 phba->sli4_hba.max_cfg_param.rpi_base,
8372                                 phba->sli4_hba.max_cfg_param.max_rpi,
8373                                 phba->sli4_hba.max_cfg_param.max_fcfi,
8374                                 phba->sli4_hba.max_cfg_param.max_eq,
8375                                 phba->sli4_hba.max_cfg_param.max_cq,
8376                                 phba->sli4_hba.max_cfg_param.max_wq,
8377                                 phba->sli4_hba.max_cfg_param.max_rq);
8378 
8379                 /*
8380                  * Calculate queue resources based on how
8381                  * many WQ/CQ/EQs are available.
8382                  */
8383                 qmin = phba->sli4_hba.max_cfg_param.max_wq;
8384                 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8385                         qmin = phba->sli4_hba.max_cfg_param.max_cq;
8386                 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8387                         qmin = phba->sli4_hba.max_cfg_param.max_eq;
8388                 /*
8389                  * Whats left after this can go toward NVME / FCP.
8390                  * The minus 4 accounts for ELS, NVME LS, MBOX
8391                  * plus one extra. When configured for
8392                  * NVMET, FCP io channel WQs are not created.
8393                  */
8394                 qmin -= 4;
8395 
8396                 /* Check to see if there is enough for NVME */
8397                 if ((phba->cfg_irq_chann > qmin) ||
8398                     (phba->cfg_hdw_queue > qmin)) {
8399                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8400                                         "2005 Reducing Queues: "
8401                                         "WQ %d CQ %d EQ %d: min %d: "
8402                                         "IRQ %d HDWQ %d\n",
8403                                         phba->sli4_hba.max_cfg_param.max_wq,
8404                                         phba->sli4_hba.max_cfg_param.max_cq,
8405                                         phba->sli4_hba.max_cfg_param.max_eq,
8406                                         qmin, phba->cfg_irq_chann,
8407                                         phba->cfg_hdw_queue);
8408 
8409                         if (phba->cfg_irq_chann > qmin)
8410                                 phba->cfg_irq_chann = qmin;
8411                         if (phba->cfg_hdw_queue > qmin)
8412                                 phba->cfg_hdw_queue = qmin;
8413                 }
8414         }
8415 
8416         if (rc)
8417                 goto read_cfg_out;
8418 
8419         /* Update link speed if forced link speed is supported */
8420         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8421         if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
8422                 forced_link_speed =
8423                         bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
8424                 if (forced_link_speed) {
8425                         phba->hba_flag |= HBA_FORCED_LINK_SPEED;
8426 
8427                         switch (forced_link_speed) {
8428                         case LINK_SPEED_1G:
8429                                 phba->cfg_link_speed =
8430                                         LPFC_USER_LINK_SPEED_1G;
8431                                 break;
8432                         case LINK_SPEED_2G:
8433                                 phba->cfg_link_speed =
8434                                         LPFC_USER_LINK_SPEED_2G;
8435                                 break;
8436                         case LINK_SPEED_4G:
8437                                 phba->cfg_link_speed =
8438                                         LPFC_USER_LINK_SPEED_4G;
8439                                 break;
8440                         case LINK_SPEED_8G:
8441                                 phba->cfg_link_speed =
8442                                         LPFC_USER_LINK_SPEED_8G;
8443                                 break;
8444                         case LINK_SPEED_10G:
8445                                 phba->cfg_link_speed =
8446                                         LPFC_USER_LINK_SPEED_10G;
8447                                 break;
8448                         case LINK_SPEED_16G:
8449                                 phba->cfg_link_speed =
8450                                         LPFC_USER_LINK_SPEED_16G;
8451                                 break;
8452                         case LINK_SPEED_32G:
8453                                 phba->cfg_link_speed =
8454                                         LPFC_USER_LINK_SPEED_32G;
8455                                 break;
8456                         case LINK_SPEED_64G:
8457                                 phba->cfg_link_speed =
8458                                         LPFC_USER_LINK_SPEED_64G;
8459                                 break;
8460                         case 0xffff:
8461                                 phba->cfg_link_speed =
8462                                         LPFC_USER_LINK_SPEED_AUTO;
8463                                 break;
8464                         default:
8465                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8466                                                 "0047 Unrecognized link "
8467                                                 "speed : %d\n",
8468                                                 forced_link_speed);
8469                                 phba->cfg_link_speed =
8470                                         LPFC_USER_LINK_SPEED_AUTO;
8471                         }
8472                 }
8473         }
8474 
8475         /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
8476         length = phba->sli4_hba.max_cfg_param.max_xri -
8477                         lpfc_sli4_get_els_iocb_cnt(phba);
8478         if (phba->cfg_hba_queue_depth > length) {
8479                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8480                                 "3361 HBA queue depth changed from %d to %d\n",
8481                                 phba->cfg_hba_queue_depth, length);
8482                 phba->cfg_hba_queue_depth = length;
8483         }
8484 
8485         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
8486             LPFC_SLI_INTF_IF_TYPE_2)
8487                 goto read_cfg_out;
8488 
8489         /* get the pf# and vf# for SLI4 if_type 2 port */
8490         length = (sizeof(struct lpfc_mbx_get_func_cfg) -
8491                   sizeof(struct lpfc_sli4_cfg_mhdr));
8492         lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
8493                          LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
8494                          length, LPFC_SLI4_MBX_EMBED);
8495 
8496         rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8497         shdr = (union lpfc_sli4_cfg_shdr *)
8498                                 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
8499         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8500         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8501         if (rc2 || shdr_status || shdr_add_status) {
8502                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8503                                 "3026 Mailbox failed , mbxCmd x%x "
8504                                 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8505                                 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8506                                 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8507                 goto read_cfg_out;
8508         }
8509 
8510         /* search for fc_fcoe resrouce descriptor */
8511         get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
8512 
8513         pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
8514         desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
8515         length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
8516         if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
8517                 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
8518         else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
8519                 goto read_cfg_out;
8520 
8521         for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8522                 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
8523                 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8524                     bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
8525                         phba->sli4_hba.iov.pf_number =
8526                                 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
8527                         phba->sli4_hba.iov.vf_number =
8528                                 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
8529                         break;
8530                 }
8531         }
8532 
8533         if (i < LPFC_RSRC_DESC_MAX_NUM)
8534                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8535                                 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8536                                 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
8537                                 phba->sli4_hba.iov.vf_number);
8538         else
8539                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8540                                 "3028 GET_FUNCTION_CONFIG: failed to find "
8541                                 "Resource Descriptor:x%x\n",
8542                                 LPFC_RSRC_DESC_TYPE_FCFCOE);
8543 
8544 read_cfg_out:
8545         mempool_free(pmb, phba->mbox_mem_pool);
8546         return rc;
8547 }
8548 
8549 /**
8550  * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
8551  * @phba: pointer to lpfc hba data structure.
8552  *
8553  * This routine is invoked to setup the port-side endian order when
8554  * the port if_type is 0.  This routine has no function for other
8555  * if_types.
8556  *
8557  * Return codes
8558  *      0 - successful
8559  *      -ENOMEM - No available memory
8560  *      -EIO - The mailbox failed to complete successfully.
8561  **/
8562 static int
8563 lpfc_setup_endian_order(struct lpfc_hba *phba)
8564 {
8565         LPFC_MBOXQ_t *mboxq;
8566         uint32_t if_type, rc = 0;
8567         uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8568                                       HOST_ENDIAN_HIGH_WORD1};
8569 
8570         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8571         switch (if_type) {
8572         case LPFC_SLI_INTF_IF_TYPE_0:
8573                 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8574                                                        GFP_KERNEL);
8575                 if (!mboxq) {
8576                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8577                                         "0492 Unable to allocate memory for "
8578                                         "issuing SLI_CONFIG_SPECIAL mailbox "
8579                                         "command\n");
8580                         return -ENOMEM;
8581                 }
8582 
8583                 /*
8584                  * The SLI4_CONFIG_SPECIAL mailbox command requires the first
8585                  * two words to contain special data values and no other data.
8586                  */
8587                 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8588                 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8589                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8590                 if (rc != MBX_SUCCESS) {
8591                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8592                                         "0493 SLI_CONFIG_SPECIAL mailbox "
8593                                         "failed with status x%x\n",
8594                                         rc);
8595                         rc = -EIO;
8596                 }
8597                 mempool_free(mboxq, phba->mbox_mem_pool);
8598                 break;
8599         case LPFC_SLI_INTF_IF_TYPE_6:
8600         case LPFC_SLI_INTF_IF_TYPE_2:
8601         case LPFC_SLI_INTF_IF_TYPE_1:
8602         default:
8603                 break;
8604         }
8605         return rc;
8606 }
8607 
8608 /**
8609  * lpfc_sli4_queue_verify - Verify and update EQ counts
8610  * @phba: pointer to lpfc hba data structure.
8611  *
8612  * This routine is invoked to check the user settable queue counts for EQs.
8613  * After this routine is called the counts will be set to valid values that
8614  * adhere to the constraints of the system's interrupt vectors and the port's
8615  * queue resources.
8616  *
8617  * Return codes
8618  *      0 - successful
8619  *      -ENOMEM - No available memory
8620  **/
8621 static int
8622 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
8623 {
8624         /*
8625          * Sanity check for configured queue parameters against the run-time
8626          * device parameters
8627          */
8628 
8629         if (phba->nvmet_support) {
8630                 if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
8631                         phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
8632                 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8633                         phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
8634         }
8635 
8636         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8637                         "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8638                         phba->cfg_hdw_queue, phba->cfg_irq_chann,
8639                         phba->cfg_nvmet_mrq);
8640 
8641         /* Get EQ depth from module parameter, fake the default for now */
8642         phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8643         phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8644 
8645         /* Get CQ depth from module parameter, fake the default for now */
8646         phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8647         phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8648         return 0;
8649 }
8650 
8651 static int
8652 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
8653 {
8654         struct lpfc_queue *qdesc;
8655         u32 wqesize;
8656         int cpu;
8657 
8658         cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
8659         /* Create Fast Path IO CQs */
8660         if (phba->enab_exp_wqcq_pages)
8661                 /* Increase the CQ size when WQEs contain an embedded cdb */
8662                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8663                                               phba->sli4_hba.cq_esize,
8664                                               LPFC_CQE_EXP_COUNT, cpu);
8665 
8666         else
8667                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8668                                               phba->sli4_hba.cq_esize,
8669                                               phba->sli4_hba.cq_ecount, cpu);
8670         if (!qdesc) {
8671                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8672                         "0499 Failed allocate fast-path IO CQ (%d)\n", idx);
8673                 return 1;
8674         }
8675         qdesc->qe_valid = 1;
8676         qdesc->hdwq = idx;
8677         qdesc->chann = cpu;
8678         phba->sli4_hba.hdwq[idx].io_cq = qdesc;
8679 
8680         /* Create Fast Path IO WQs */
8681         if (phba->enab_exp_wqcq_pages) {
8682                 /* Increase the WQ size when WQEs contain an embedded cdb */
8683                 wqesize = (phba->fcp_embed_io) ?
8684                         LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
8685                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8686                                               wqesize,
8687                                               LPFC_WQE_EXP_COUNT, cpu);
8688         } else
8689                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8690                                               phba->sli4_hba.wq_esize,
8691                                               phba->sli4_hba.wq_ecount, cpu);
8692 
8693         if (!qdesc) {
8694                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8695                                 "0503 Failed allocate fast-path IO WQ (%d)\n",
8696                                 idx);
8697                 return 1;
8698         }
8699         qdesc->hdwq = idx;
8700         qdesc->chann = cpu;
8701         phba->sli4_hba.hdwq[idx].io_wq = qdesc;
8702         list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8703         return 0;
8704 }
8705 
8706 /**
8707  * lpfc_sli4_queue_create - Create all the SLI4 queues
8708  * @phba: pointer to lpfc hba data structure.
8709  *
8710  * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
8711  * operation. For each SLI4 queue type, the parameters such as queue entry
8712  * count (queue depth) shall be taken from the module parameter. For now,
8713  * we just use some constant number as place holder.
8714  *
8715  * Return codes
8716  *      0 - successful
8717  *      -ENOMEM - No availble memory
8718  *      -EIO - The mailbox failed to complete successfully.
8719  **/
8720 int
8721 lpfc_sli4_queue_create(struct lpfc_hba *phba)
8722 {
8723         struct lpfc_queue *qdesc;
8724         int idx, cpu, eqcpu;
8725         struct lpfc_sli4_hdw_queue *qp;
8726         struct lpfc_vector_map_info *cpup;
8727         struct lpfc_vector_map_info *eqcpup;
8728         struct lpfc_eq_intr_info *eqi;
8729 
8730         /*
8731          * Create HBA Record arrays.
8732          * Both NVME and FCP will share that same vectors / EQs
8733          */
8734         phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
8735         phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
8736         phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
8737         phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
8738         phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
8739         phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
8740         phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8741         phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8742         phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8743         phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8744 
8745         if (!phba->sli4_hba.hdwq) {
8746                 phba->sli4_hba.hdwq = kcalloc(
8747                         phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
8748                         GFP_KERNEL);
8749                 if (!phba->sli4_hba.hdwq) {
8750                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8751                                         "6427 Failed allocate memory for "
8752                                         "fast-path Hardware Queue array\n");
8753                         goto out_error;
8754                 }
8755                 /* Prepare hardware queues to take IO buffers */
8756                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8757                         qp = &phba->sli4_hba.hdwq[idx];
8758                         spin_lock_init(&qp->io_buf_list_get_lock);
8759                         spin_lock_init(&qp->io_buf_list_put_lock);
8760                         INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
8761                         INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
8762                         qp->get_io_bufs = 0;
8763                         qp->put_io_bufs = 0;
8764                         qp->total_io_bufs = 0;
8765                         spin_lock_init(&qp->abts_io_buf_list_lock);
8766                         INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
8767                         qp->abts_scsi_io_bufs = 0;
8768                         qp->abts_nvme_io_bufs = 0;
8769                         INIT_LIST_HEAD(&qp->sgl_list);
8770                         INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
8771                         spin_lock_init(&qp->hdwq_lock);
8772                 }
8773         }
8774 
8775         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8776                 if (phba->nvmet_support) {
8777                         phba->sli4_hba.nvmet_cqset = kcalloc(
8778                                         phba->cfg_nvmet_mrq,
8779                                         sizeof(struct lpfc_queue *),
8780                                         GFP_KERNEL);
8781                         if (!phba->sli4_hba.nvmet_cqset) {
8782                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8783                                         "3121 Fail allocate memory for "
8784                                         "fast-path CQ set array\n");
8785                                 goto out_error;
8786                         }
8787                         phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
8788                                         phba->cfg_nvmet_mrq,
8789                                         sizeof(struct lpfc_queue *),
8790                                         GFP_KERNEL);
8791                         if (!phba->sli4_hba.nvmet_mrq_hdr) {
8792                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8793                                         "3122 Fail allocate memory for "
8794                                         "fast-path RQ set hdr array\n");
8795                                 goto out_error;
8796                         }
8797                         phba->sli4_hba.nvmet_mrq_data = kcalloc(
8798                                         phba->cfg_nvmet_mrq,
8799                                         sizeof(struct lpfc_queue *),
8800                                         GFP_KERNEL);
8801                         if (!phba->sli4_hba.nvmet_mrq_data) {
8802                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8803                                         "3124 Fail allocate memory for "
8804                                         "fast-path RQ set data array\n");
8805                                 goto out_error;
8806                         }
8807                 }
8808         }
8809 
8810         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
8811 
8812         /* Create HBA Event Queues (EQs) */
8813         for_each_present_cpu(cpu) {
8814                 /* We only want to create 1 EQ per vector, even though
8815                  * multiple CPUs might be using that vector. so only
8816                  * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
8817                  */
8818                 cpup = &phba->sli4_hba.cpu_map[cpu];
8819                 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
8820                         continue;
8821 
8822                 /* Get a ptr to the Hardware Queue associated with this CPU */
8823                 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
8824 
8825                 /* Allocate an EQ */
8826                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8827                                               phba->sli4_hba.eq_esize,
8828                                               phba->sli4_hba.eq_ecount, cpu);
8829                 if (!qdesc) {
8830                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8831                                         "0497 Failed allocate EQ (%d)\n",
8832                                         cpup->hdwq);
8833                         goto out_error;
8834                 }
8835                 qdesc->qe_valid = 1;
8836                 qdesc->hdwq = cpup->hdwq;
8837                 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
8838                 qdesc->last_cpu = qdesc->chann;
8839 
8840                 /* Save the allocated EQ in the Hardware Queue */
8841                 qp->hba_eq = qdesc;
8842 
8843                 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
8844                 list_add(&qdesc->cpu_list, &eqi->list);
8845         }
8846 
8847         /* Now we need to populate the other Hardware Queues, that share
8848          * an IRQ vector, with the associated EQ ptr.
8849          */
8850         for_each_present_cpu(cpu) {
8851                 cpup = &phba->sli4_hba.cpu_map[cpu];
8852 
8853                 /* Check for EQ already allocated in previous loop */
8854                 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
8855                         continue;
8856 
8857                 /* Check for multiple CPUs per hdwq */
8858                 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
8859                 if (qp->hba_eq)
8860                         continue;
8861 
8862                 /* We need to share an EQ for this hdwq */
8863                 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
8864                 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
8865                 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
8866         }
8867 
8868         /* Allocate IO Path SLI4 CQ/WQs */
8869         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8870                 if (lpfc_alloc_io_wq_cq(phba, idx))
8871                         goto out_error;
8872         }
8873 
8874         if (phba->nvmet_support) {
8875                 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8876                         cpu = lpfc_find_cpu_handle(phba, idx,
8877                                                    LPFC_FIND_BY_HDWQ);
8878                         qdesc = lpfc_sli4_queue_alloc(phba,
8879                                                       LPFC_DEFAULT_PAGE_SIZE,
8880                                                       phba->sli4_hba.cq_esize,
8881                                                       phba->sli4_hba.cq_ecount,
8882                                                       cpu);
8883                         if (!qdesc) {
8884                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8885                                                 "3142 Failed allocate NVME "
8886                                                 "CQ Set (%d)\n", idx);
8887                                 goto out_error;
8888                         }
8889                         qdesc->qe_valid = 1;
8890                         qdesc->hdwq = idx;
8891                         qdesc->chann = cpu;
8892                         phba->sli4_hba.nvmet_cqset[idx] = qdesc;
8893                 }
8894         }
8895 
8896         /*
8897          * Create Slow Path Completion Queues (CQs)
8898          */
8899 
8900         cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
8901         /* Create slow-path Mailbox Command Complete Queue */
8902         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8903                                       phba->sli4_hba.cq_esize,
8904                                       phba->sli4_hba.cq_ecount, cpu);
8905         if (!qdesc) {
8906                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8907                                 "0500 Failed allocate slow-path mailbox CQ\n");
8908                 goto out_error;
8909         }
8910         qdesc->qe_valid = 1;
8911         phba->sli4_hba.mbx_cq = qdesc;
8912 
8913         /* Create slow-path ELS Complete Queue */
8914         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8915                                       phba->sli4_hba.cq_esize,
8916                                       phba->sli4_hba.cq_ecount, cpu);
8917         if (!qdesc) {
8918                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8919                                 "0501 Failed allocate slow-path ELS CQ\n");
8920                 goto out_error;
8921         }
8922         qdesc->qe_valid = 1;
8923         qdesc->chann = cpu;
8924         phba->sli4_hba.els_cq = qdesc;
8925 
8926 
8927         /*
8928          * Create Slow Path Work Queues (WQs)
8929          */
8930 
8931         /* Create Mailbox Command Queue */
8932 
8933         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8934                                       phba->sli4_hba.mq_esize,
8935                                       phba->sli4_hba.mq_ecount, cpu);
8936         if (!qdesc) {
8937                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8938                                 "0505 Failed allocate slow-path MQ\n");
8939                 goto out_error;
8940         }
8941         qdesc->chann = cpu;
8942         phba->sli4_hba.mbx_wq = qdesc;
8943 
8944         /*
8945          * Create ELS Work Queues
8946          */
8947 
8948         /* Create slow-path ELS Work Queue */
8949         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8950                                       phba->sli4_hba.wq_esize,
8951                                       phba->sli4_hba.wq_ecount, cpu);
8952         if (!qdesc) {
8953                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8954                                 "0504 Failed allocate slow-path ELS WQ\n");
8955                 goto out_error;
8956         }
8957         qdesc->chann = cpu;
8958         phba->sli4_hba.els_wq = qdesc;
8959         list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8960 
8961         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8962                 /* Create NVME LS Complete Queue */
8963                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8964                                               phba->sli4_hba.cq_esize,
8965                                               phba->sli4_hba.cq_ecount, cpu);
8966                 if (!qdesc) {
8967                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8968                                         "6079 Failed allocate NVME LS CQ\n");
8969                         goto out_error;
8970                 }
8971                 qdesc->chann = cpu;
8972                 qdesc->qe_valid = 1;
8973                 phba->sli4_hba.nvmels_cq = qdesc;
8974 
8975                 /* Create NVME LS Work Queue */
8976                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8977                                               phba->sli4_hba.wq_esize,
8978                                               phba->sli4_hba.wq_ecount, cpu);
8979                 if (!qdesc) {
8980                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8981                                         "6080 Failed allocate NVME LS WQ\n");
8982                         goto out_error;
8983                 }
8984                 qdesc->chann = cpu;
8985                 phba->sli4_hba.nvmels_wq = qdesc;
8986                 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8987         }
8988 
8989         /*
8990          * Create Receive Queue (RQ)
8991          */
8992 
8993         /* Create Receive Queue for header */
8994         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8995                                       phba->sli4_hba.rq_esize,
8996                                       phba->sli4_hba.rq_ecount, cpu);
8997         if (!qdesc) {
8998                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8999                                 "0506 Failed allocate receive HRQ\n");
9000                 goto out_error;
9001         }
9002         phba->sli4_hba.hdr_rq = qdesc;
9003 
9004         /* Create Receive Queue for data */
9005         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9006                                       phba->sli4_hba.rq_esize,
9007                                       phba->sli4_hba.rq_ecount, cpu);
9008         if (!qdesc) {
9009                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9010                                 "0507 Failed allocate receive DRQ\n");
9011                 goto out_error;
9012         }
9013         phba->sli4_hba.dat_rq = qdesc;
9014 
9015         if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9016             phba->nvmet_support) {
9017                 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9018                         cpu = lpfc_find_cpu_handle(phba, idx,
9019                                                    LPFC_FIND_BY_HDWQ);
9020                         /* Create NVMET Receive Queue for header */
9021                         qdesc = lpfc_sli4_queue_alloc(phba,
9022                                                       LPFC_DEFAULT_PAGE_SIZE,
9023                                                       phba->sli4_hba.rq_esize,
9024                                                       LPFC_NVMET_RQE_DEF_COUNT,
9025                                                       cpu);
9026                         if (!qdesc) {
9027                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9028                                                 "3146 Failed allocate "
9029                                                 "receive HRQ\n");
9030                                 goto out_error;
9031                         }
9032                         qdesc->hdwq = idx;
9033                         phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
9034 
9035                         /* Only needed for header of RQ pair */
9036                         qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9037                                                    GFP_KERNEL,
9038                                                    cpu_to_node(cpu));
9039                         if (qdesc->rqbp == NULL) {
9040                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9041                                                 "6131 Failed allocate "
9042                                                 "Header RQBP\n");
9043                                 goto out_error;
9044                         }
9045 
9046                         /* Put list in known state in case driver load fails. */
9047                         INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
9048 
9049                         /* Create NVMET Receive Queue for data */
9050                         qdesc = lpfc_sli4_queue_alloc(phba,
9051                                                       LPFC_DEFAULT_PAGE_SIZE,
9052                                                       phba->sli4_hba.rq_esize,
9053                                                       LPFC_NVMET_RQE_DEF_COUNT,
9054                                                       cpu);
9055                         if (!qdesc) {
9056                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9057                                                 "3156 Failed allocate "
9058                                                 "receive DRQ\n");
9059                                 goto out_error;
9060                         }
9061                         qdesc->hdwq = idx;
9062                         phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
9063                 }
9064         }
9065 
9066         /* Clear NVME stats */
9067         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9068                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9069                         memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9070                                sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9071                 }
9072         }
9073 
9074         /* Clear SCSI stats */
9075         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9076                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9077                         memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9078                                sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9079                 }
9080         }
9081 
9082         return 0;
9083 
9084 out_error:
9085         lpfc_sli4_queue_destroy(phba);
9086         return -ENOMEM;
9087 }
9088 
9089 static inline void
9090 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
9091 {
9092         if (*qp != NULL) {
9093                 lpfc_sli4_queue_free(*qp);
9094                 *qp = NULL;
9095         }
9096 }
9097 
9098 static inline void
9099 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
9100 {
9101         int idx;
9102 
9103         if (*qs == NULL)
9104                 return;
9105 
9106         for (idx = 0; idx < max; idx++)
9107                 __lpfc_sli4_release_queue(&(*qs)[idx]);
9108 
9109         kfree(*qs);
9110         *qs = NULL;
9111 }
9112 
9113 static inline void
9114 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
9115 {
9116         struct lpfc_sli4_hdw_queue *hdwq;
9117         struct lpfc_queue *eq;
9118         uint32_t idx;
9119 
9120         hdwq = phba->sli4_hba.hdwq;
9121 
9122         /* Loop thru all Hardware Queues */
9123         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9124                 /* Free the CQ/WQ corresponding to the Hardware Queue */
9125                 lpfc_sli4_queue_free(hdwq[idx].io_cq);
9126                 lpfc_sli4_queue_free(hdwq[idx].io_wq);
9127                 hdwq[idx].io_cq = NULL;
9128                 hdwq[idx].io_wq = NULL;
9129                 if (phba->cfg_xpsgl && !phba->nvmet_support)
9130                         lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
9131                 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
9132         }
9133         /* Loop thru all IRQ vectors */
9134         for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9135                 /* Free the EQ corresponding to the IRQ vector */
9136                 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9137                 lpfc_sli4_queue_free(eq);
9138                 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
9139         }
9140 }
9141 
9142 /**
9143  * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
9144  * @phba: pointer to lpfc hba data structure.
9145  *
9146  * This routine is invoked to release all the SLI4 queues with the FCoE HBA
9147  * operation.
9148  *
9149  * Return codes
9150  *      0 - successful
9151  *      -ENOMEM - No available memory
9152  *      -EIO - The mailbox failed to complete successfully.
9153  **/
9154 void
9155 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
9156 {
9157         /*
9158          * Set FREE_INIT before beginning to free the queues.
9159          * Wait until the users of queues to acknowledge to
9160          * release queues by clearing FREE_WAIT.
9161          */
9162         spin_lock_irq(&phba->hbalock);
9163         phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9164         while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9165                 spin_unlock_irq(&phba->hbalock);
9166                 msleep(20);
9167                 spin_lock_irq(&phba->hbalock);
9168         }
9169         spin_unlock_irq(&phba->hbalock);
9170 
9171         lpfc_sli4_cleanup_poll_list(phba);
9172 
9173         /* Release HBA eqs */
9174         if (phba->sli4_hba.hdwq)
9175                 lpfc_sli4_release_hdwq(phba);
9176 
9177         if (phba->nvmet_support) {
9178                 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
9179                                          phba->cfg_nvmet_mrq);
9180 
9181                 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
9182                                          phba->cfg_nvmet_mrq);
9183                 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
9184                                          phba->cfg_nvmet_mrq);
9185         }
9186 
9187         /* Release mailbox command work queue */
9188         __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
9189 
9190         /* Release ELS work queue */
9191         __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
9192 
9193         /* Release ELS work queue */
9194         __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
9195 
9196         /* Release unsolicited receive queue */
9197         __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
9198         __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
9199 
9200         /* Release ELS complete queue */
9201         __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
9202 
9203         /* Release NVME LS complete queue */
9204         __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
9205 
9206         /* Release mailbox command complete queue */
9207         __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
9208 
9209         /* Everything on this list has been freed */
9210         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9211 
9212         /* Done with freeing the queues */
9213         spin_lock_irq(&phba->hbalock);
9214         phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9215         spin_unlock_irq(&phba->hbalock);
9216 }
9217 
9218 int
9219 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
9220 {
9221         struct lpfc_rqb *rqbp;
9222         struct lpfc_dmabuf *h_buf;
9223         struct rqb_dmabuf *rqb_buffer;
9224 
9225         rqbp = rq->rqbp;
9226         while (!list_empty(&rqbp->rqb_buffer_list)) {
9227                 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
9228                                  struct lpfc_dmabuf, list);
9229 
9230                 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
9231                 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
9232                 rqbp->buffer_count--;
9233         }
9234         return 1;
9235 }
9236 
9237 static int
9238 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
9239         struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
9240         int qidx, uint32_t qtype)
9241 {
9242         struct lpfc_sli_ring *pring;
9243         int rc;
9244 
9245         if (!eq || !cq || !wq) {
9246                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9247                         "6085 Fast-path %s (%d) not allocated\n",
9248                         ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
9249                 return -ENOMEM;
9250         }
9251 
9252         /* create the Cq first */
9253         rc = lpfc_cq_create(phba, cq, eq,
9254                         (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
9255         if (rc) {
9256                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9257                         "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9258                         qidx, (uint32_t)rc);
9259                 return rc;
9260         }
9261 
9262         if (qtype != LPFC_MBOX) {
9263                 /* Setup cq_map for fast lookup */
9264                 if (cq_map)
9265                         *cq_map = cq->queue_id;
9266 
9267                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9268                         "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9269                         qidx, cq->queue_id, qidx, eq->queue_id);
9270 
9271                 /* create the wq */
9272                 rc = lpfc_wq_create(phba, wq, cq, qtype);
9273                 if (rc) {
9274                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9275                                 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
9276                                 qidx, (uint32_t)rc);
9277                         /* no need to tear down cq - caller will do so */
9278                         return rc;
9279                 }
9280 
9281                 /* Bind this CQ/WQ to the NVME ring */
9282                 pring = wq->pring;
9283                 pring->sli.sli4.wqp = (void *)wq;
9284                 cq->pring = pring;
9285 
9286                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9287                         "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9288                         qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
9289         } else {
9290                 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
9291                 if (rc) {
9292                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9293                                 "0539 Failed setup of slow-path MQ: "
9294                                 "rc = 0x%x\n", rc);
9295                         /* no need to tear down cq - caller will do so */
9296                         return rc;
9297                 }
9298 
9299                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9300                         "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9301                         phba->sli4_hba.mbx_wq->queue_id,
9302                         phba->sli4_hba.mbx_cq->queue_id);
9303         }
9304 
9305         return 0;
9306 }
9307 
9308 /**
9309  * lpfc_setup_cq_lookup - Setup the CQ lookup table
9310  * @phba: pointer to lpfc hba data structure.
9311  *
9312  * This routine will populate the cq_lookup table by all
9313  * available CQ queue_id's.
9314  **/
9315 static void
9316 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9317 {
9318         struct lpfc_queue *eq, *childq;
9319         int qidx;
9320 
9321         memset(phba->sli4_hba.cq_lookup, 0,
9322                (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
9323         /* Loop thru all IRQ vectors */
9324         for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9325                 /* Get the EQ corresponding to the IRQ vector */
9326                 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9327                 if (!eq)
9328                         continue;
9329                 /* Loop through all CQs associated with that EQ */
9330                 list_for_each_entry(childq, &eq->child_list, list) {
9331                         if (childq->queue_id > phba->sli4_hba.cq_max)
9332                                 continue;
9333                         if (childq->subtype == LPFC_IO)
9334                                 phba->sli4_hba.cq_lookup[childq->queue_id] =
9335                                         childq;
9336                 }
9337         }
9338 }
9339 
9340 /**
9341  * lpfc_sli4_queue_setup - Set up all the SLI4 queues
9342  * @phba: pointer to lpfc hba data structure.
9343  *
9344  * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
9345  * operation.
9346  *
9347  * Return codes
9348  *      0 - successful
9349  *      -ENOMEM - No available memory
9350  *      -EIO - The mailbox failed to complete successfully.
9351  **/
9352 int
9353 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9354 {
9355         uint32_t shdr_status, shdr_add_status;
9356         union lpfc_sli4_cfg_shdr *shdr;
9357         struct lpfc_vector_map_info *cpup;
9358         struct lpfc_sli4_hdw_queue *qp;
9359         LPFC_MBOXQ_t *mboxq;
9360         int qidx, cpu;
9361         uint32_t length, usdelay;
9362         int rc = -ENOMEM;
9363 
9364         /* Check for dual-ULP support */
9365         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9366         if (!mboxq) {
9367                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9368                                 "3249 Unable to allocate memory for "
9369                                 "QUERY_FW_CFG mailbox command\n");
9370                 return -ENOMEM;
9371         }
9372         length = (sizeof(struct lpfc_mbx_query_fw_config) -
9373                   sizeof(struct lpfc_sli4_cfg_mhdr));
9374         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9375                          LPFC_MBOX_OPCODE_QUERY_FW_CFG,
9376                          length, LPFC_SLI4_MBX_EMBED);
9377 
9378         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9379 
9380         shdr = (union lpfc_sli4_cfg_shdr *)
9381                         &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9382         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9383         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9384         if (shdr_status || shdr_add_status || rc) {
9385                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9386                                 "3250 QUERY_FW_CFG mailbox failed with status "
9387                                 "x%x add_status x%x, mbx status x%x\n",
9388                                 shdr_status, shdr_add_status, rc);
9389                 if (rc != MBX_TIMEOUT)
9390                         mempool_free(mboxq, phba->mbox_mem_pool);
9391                 rc = -ENXIO;
9392                 goto out_error;
9393         }
9394 
9395         phba->sli4_hba.fw_func_mode =
9396                         mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
9397         phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
9398         phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
9399         phba->sli4_hba.physical_port =
9400                         mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
9401         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9402                         "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9403                         "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
9404                         phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
9405 
9406         if (rc != MBX_TIMEOUT)
9407                 mempool_free(mboxq, phba->mbox_mem_pool);
9408 
9409         /*
9410          * Set up HBA Event Queues (EQs)
9411          */
9412         qp = phba->sli4_hba.hdwq;
9413 
9414         /* Set up HBA event queue */
9415         if (!qp) {
9416                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9417                                 "3147 Fast-path EQs not allocated\n");
9418                 rc = -ENOMEM;
9419                 goto out_error;
9420         }
9421 
9422         /* Loop thru all IRQ vectors */
9423         for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9424                 /* Create HBA Event Queues (EQs) in order */
9425                 for_each_present_cpu(cpu) {
9426                         cpup = &phba->sli4_hba.cpu_map[cpu];
9427 
9428                         /* Look for the CPU thats using that vector with
9429                          * LPFC_CPU_FIRST_IRQ set.
9430                          */
9431                         if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9432                                 continue;
9433                         if (qidx != cpup->eq)
9434                                 continue;
9435 
9436                         /* Create an EQ for that vector */
9437                         rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9438                                             phba->cfg_fcp_imax);
9439                         if (rc) {
9440                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9441                                                 "0523 Failed setup of fast-path"
9442                                                 " EQ (%d), rc = 0x%x\n",
9443                                                 cpup->eq, (uint32_t)rc);
9444                                 goto out_destroy;
9445                         }
9446 
9447                         /* Save the EQ for that vector in the hba_eq_hdl */
9448                         phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9449                                 qp[cpup->hdwq].hba_eq;
9450 
9451                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9452                                         "2584 HBA EQ setup: queue[%d]-id=%d\n",
9453                                         cpup->eq,
9454                                         qp[cpup->hdwq].hba_eq->queue_id);
9455                 }
9456         }
9457 
9458         /* Loop thru all Hardware Queues */
9459         for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9460                 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9461                 cpup = &phba->sli4_hba.cpu_map[cpu];
9462 
9463                 /* Create the CQ/WQ corresponding to the Hardware Queue */
9464                 rc = lpfc_create_wq_cq(phba,
9465                                        phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9466                                        qp[qidx].io_cq,
9467                                        qp[qidx].io_wq,
9468                                        &phba->sli4_hba.hdwq[qidx].io_cq_map,
9469                                        qidx,
9470                                        LPFC_IO);
9471                 if (rc) {
9472                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9473                                         "0535 Failed to setup fastpath "
9474                                         "IO WQ/CQ (%d), rc = 0x%x\n",
9475                                         qidx, (uint32_t)rc);
9476                         goto out_destroy;
9477                 }
9478         }
9479 
9480         /*
9481          * Set up Slow Path Complete Queues (CQs)
9482          */
9483 
9484         /* Set up slow-path MBOX CQ/MQ */
9485 
9486         if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
9487                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9488                                 "0528 %s not allocated\n",
9489                                 phba->sli4_hba.mbx_cq ?
9490                                 "Mailbox WQ" : "Mailbox CQ");
9491                 rc = -ENOMEM;
9492                 goto out_destroy;
9493         }
9494 
9495         rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9496                                phba->sli4_hba.mbx_cq,
9497                                phba->sli4_hba.mbx_wq,
9498                                NULL, 0, LPFC_MBOX);
9499         if (rc) {
9500                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9501                         "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9502                         (uint32_t)rc);
9503                 goto out_destroy;
9504         }
9505         if (phba->nvmet_support) {
9506                 if (!phba->sli4_hba.nvmet_cqset) {
9507                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9508                                         "3165 Fast-path NVME CQ Set "
9509                                         "array not allocated\n");
9510                         rc = -ENOMEM;
9511                         goto out_destroy;
9512                 }
9513                 if (phba->cfg_nvmet_mrq > 1) {
9514                         rc = lpfc_cq_create_set(phba,
9515                                         phba->sli4_hba.nvmet_cqset,
9516                                         qp,
9517                                         LPFC_WCQ, LPFC_NVMET);
9518                         if (rc) {
9519                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9520                                                 "3164 Failed setup of NVME CQ "
9521                                                 "Set, rc = 0x%x\n",
9522                                                 (uint32_t)rc);
9523                                 goto out_destroy;
9524                         }
9525                 } else {
9526                         /* Set up NVMET Receive Complete Queue */
9527                         rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
9528                                             qp[0].hba_eq,
9529                                             LPFC_WCQ, LPFC_NVMET);
9530                         if (rc) {
9531                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9532                                                 "6089 Failed setup NVMET CQ: "
9533                                                 "rc = 0x%x\n", (uint32_t)rc);
9534                                 goto out_destroy;
9535                         }
9536                         phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9537 
9538                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9539                                         "6090 NVMET CQ setup: cq-id=%d, "
9540                                         "parent eq-id=%d\n",
9541                                         phba->sli4_hba.nvmet_cqset[0]->queue_id,
9542                                         qp[0].hba_eq->queue_id);
9543                 }
9544         }
9545 
9546         /* Set up slow-path ELS WQ/CQ */
9547         if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
9548                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9549                                 "0530 ELS %s not allocated\n",
9550                                 phba->sli4_hba.els_cq ? "WQ" : "CQ");
9551                 rc = -ENOMEM;
9552                 goto out_destroy;
9553         }
9554         rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9555                                phba->sli4_hba.els_cq,
9556                                phba->sli4_hba.els_wq,
9557                                NULL, 0, LPFC_ELS);
9558         if (rc) {
9559                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9560                                 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9561                                 (uint32_t)rc);
9562                 goto out_destroy;
9563         }
9564         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9565                         "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9566                         phba->sli4_hba.els_wq->queue_id,
9567                         phba->sli4_hba.els_cq->queue_id);
9568 
9569         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9570                 /* Set up NVME LS Complete Queue */
9571                 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
9572                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9573                                         "6091 LS %s not allocated\n",
9574                                         phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9575                         rc = -ENOMEM;
9576                         goto out_destroy;
9577                 }
9578                 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9579                                        phba->sli4_hba.nvmels_cq,
9580                                        phba->sli4_hba.nvmels_wq,
9581                                        NULL, 0, LPFC_NVME_LS);
9582                 if (rc) {
9583                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9584                                         "0526 Failed setup of NVVME LS WQ/CQ: "
9585                                         "rc = 0x%x\n", (uint32_t)rc);
9586                         goto out_destroy;
9587                 }
9588 
9589                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9590                                 "6096 ELS WQ setup: wq-id=%d, "
9591                                 "parent cq-id=%d\n",
9592                                 phba->sli4_hba.nvmels_wq->queue_id,
9593                                 phba->sli4_hba.nvmels_cq->queue_id);
9594         }
9595 
9596         /*
9597          * Create NVMET Receive Queue (RQ)
9598          */
9599         if (phba->nvmet_support) {
9600                 if ((!phba->sli4_hba.nvmet_cqset) ||
9601                     (!phba->sli4_hba.nvmet_mrq_hdr) ||
9602                     (!phba->sli4_hba.nvmet_mrq_data)) {
9603                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9604                                         "6130 MRQ CQ Queues not "
9605                                         "allocated\n");
9606                         rc = -ENOMEM;
9607                         goto out_destroy;
9608                 }
9609                 if (phba->cfg_nvmet_mrq > 1) {
9610                         rc = lpfc_mrq_create(phba,
9611                                              phba->sli4_hba.nvmet_mrq_hdr,
9612                                              phba->sli4_hba.nvmet_mrq_data,
9613                                              phba->sli4_hba.nvmet_cqset,
9614                                              LPFC_NVMET);
9615                         if (rc) {
9616                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9617                                                 "6098 Failed setup of NVMET "
9618                                                 "MRQ: rc = 0x%x\n",
9619                                                 (uint32_t)rc);
9620                                 goto out_destroy;
9621                         }
9622 
9623                 } else {
9624                         rc = lpfc_rq_create(phba,
9625                                             phba->sli4_hba.nvmet_mrq_hdr[0],
9626                                             phba->sli4_hba.nvmet_mrq_data[0],
9627                                             phba->sli4_hba.nvmet_cqset[0],
9628                                             LPFC_NVMET);
9629                         if (rc) {
9630                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9631                                                 "6057 Failed setup of NVMET "
9632                                                 "Receive Queue: rc = 0x%x\n",
9633                                                 (uint32_t)rc);
9634                                 goto out_destroy;
9635                         }
9636 
9637                         lpfc_printf_log(
9638                                 phba, KERN_INFO, LOG_INIT,
9639                                 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9640                                 "dat-rq-id=%d parent cq-id=%d\n",
9641                                 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9642                                 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9643                                 phba->sli4_hba.nvmet_cqset[0]->queue_id);
9644 
9645                 }
9646         }
9647 
9648         if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
9649                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9650                                 "0540 Receive Queue not allocated\n");
9651                 rc = -ENOMEM;
9652                 goto out_destroy;
9653         }
9654 
9655         rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
9656                             phba->sli4_hba.els_cq, LPFC_USOL);
9657         if (rc) {
9658                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9659                                 "0541 Failed setup of Receive Queue: "
9660                                 "rc = 0x%x\n", (uint32_t)rc);
9661                 goto out_destroy;
9662         }
9663 
9664         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9665                         "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9666                         "parent cq-id=%d\n",
9667                         phba->sli4_hba.hdr_rq->queue_id,
9668                         phba->sli4_hba.dat_rq->queue_id,
9669                         phba->sli4_hba.els_cq->queue_id);
9670 
9671         if (phba->cfg_fcp_imax)
9672                 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
9673         else
9674                 usdelay = 0;
9675 
9676         for (qidx = 0; qidx < phba->cfg_irq_chann;
9677              qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
9678                 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
9679                                          usdelay);
9680 
9681         if (phba->sli4_hba.cq_max) {
9682                 kfree(phba->sli4_hba.cq_lookup);
9683                 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
9684                         sizeof(struct lpfc_queue *), GFP_KERNEL);
9685                 if (!phba->sli4_hba.cq_lookup) {
9686                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9687                                         "0549 Failed setup of CQ Lookup table: "
9688                                         "size 0x%x\n", phba->sli4_hba.cq_max);
9689                         rc = -ENOMEM;
9690                         goto out_destroy;
9691                 }
9692                 lpfc_setup_cq_lookup(phba);
9693         }
9694         return 0;
9695 
9696 out_destroy:
9697         lpfc_sli4_queue_unset(phba);
9698 out_error:
9699         return rc;
9700 }
9701 
9702 /**
9703  * lpfc_sli4_queue_unset - Unset all the SLI4 queues
9704  * @phba: pointer to lpfc hba data structure.
9705  *
9706  * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
9707  * operation.
9708  *
9709  * Return codes
9710  *      0 - successful
9711  *      -ENOMEM - No available memory
9712  *      -EIO - The mailbox failed to complete successfully.
9713  **/
9714 void
9715 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
9716 {
9717         struct lpfc_sli4_hdw_queue *qp;
9718         struct lpfc_queue *eq;
9719         int qidx;
9720 
9721         /* Unset mailbox command work queue */
9722         if (phba->sli4_hba.mbx_wq)
9723                 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
9724 
9725         /* Unset NVME LS work queue */
9726         if (phba->sli4_hba.nvmels_wq)
9727                 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
9728 
9729         /* Unset ELS work queue */
9730         if (phba->sli4_hba.els_wq)
9731                 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
9732 
9733         /* Unset unsolicited receive queue */
9734         if (phba->sli4_hba.hdr_rq)
9735                 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
9736                                 phba->sli4_hba.dat_rq);
9737 
9738         /* Unset mailbox command complete queue */
9739         if (phba->sli4_hba.mbx_cq)
9740                 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
9741 
9742         /* Unset ELS complete queue */
9743         if (phba->sli4_hba.els_cq)
9744                 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
9745 
9746         /* Unset NVME LS complete queue */
9747         if (phba->sli4_hba.nvmels_cq)
9748                 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
9749 
9750         if (phba->nvmet_support) {
9751                 /* Unset NVMET MRQ queue */
9752                 if (phba->sli4_hba.nvmet_mrq_hdr) {
9753                         for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9754                                 lpfc_rq_destroy(
9755                                         phba,
9756                                         phba->sli4_hba.nvmet_mrq_hdr[qidx],
9757                                         phba->sli4_hba.nvmet_mrq_data[qidx]);
9758                 }
9759 
9760                 /* Unset NVMET CQ Set complete queue */
9761                 if (phba->sli4_hba.nvmet_cqset) {
9762                         for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9763                                 lpfc_cq_destroy(
9764                                         phba, phba->sli4_hba.nvmet_cqset[qidx]);
9765                 }
9766         }
9767 
9768         /* Unset fast-path SLI4 queues */
9769         if (phba->sli4_hba.hdwq) {
9770                 /* Loop thru all Hardware Queues */
9771                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9772                         /* Destroy the CQ/WQ corresponding to Hardware Queue */
9773                         qp = &phba->sli4_hba.hdwq[qidx];
9774                         lpfc_wq_destroy(phba, qp->io_wq);
9775                         lpfc_cq_destroy(phba, qp->io_cq);
9776                 }
9777                 /* Loop thru all IRQ vectors */
9778                 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9779                         /* Destroy the EQ corresponding to the IRQ vector */
9780                         eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9781                         lpfc_eq_destroy(phba, eq);
9782                 }
9783         }
9784 
9785         kfree(phba->sli4_hba.cq_lookup);
9786         phba->sli4_hba.cq_lookup = NULL;
9787         phba->sli4_hba.cq_max = 0;
9788 }
9789 
9790 /**
9791  * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
9792  * @phba: pointer to lpfc hba data structure.
9793  *
9794  * This routine is invoked to allocate and set up a pool of completion queue
9795  * events. The body of the completion queue event is a completion queue entry
9796  * CQE. For now, this pool is used for the interrupt service routine to queue
9797  * the following HBA completion queue events for the worker thread to process:
9798  *   - Mailbox asynchronous events
9799  *   - Receive queue completion unsolicited events
9800  * Later, this can be used for all the slow-path events.
9801  *
9802  * Return codes
9803  *      0 - successful
9804  *      -ENOMEM - No available memory
9805  **/
9806 static int
9807 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
9808 {
9809         struct lpfc_cq_event *cq_event;
9810         int i;
9811 
9812         for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
9813                 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
9814                 if (!cq_event)
9815                         goto out_pool_create_fail;
9816                 list_add_tail(&cq_event->list,
9817                               &phba->sli4_hba.sp_cqe_event_pool);
9818         }
9819         return 0;
9820 
9821 out_pool_create_fail:
9822         lpfc_sli4_cq_event_pool_destroy(phba);
9823         return -ENOMEM;
9824 }
9825 
9826 /**
9827  * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
9828  * @phba: pointer to lpfc hba data structure.
9829  *
9830  * This routine is invoked to free the pool of completion queue events at
9831  * driver unload time. Note that, it is the responsibility of the driver
9832  * cleanup routine to free all the outstanding completion-queue events
9833  * allocated from this pool back into the pool before invoking this routine
9834  * to destroy the pool.
9835  **/
9836 static void
9837 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
9838 {
9839         struct lpfc_cq_event *cq_event, *next_cq_event;
9840 
9841         list_for_each_entry_safe(cq_event, next_cq_event,
9842                                  &phba->sli4_hba.sp_cqe_event_pool, list) {
9843                 list_del(&cq_event->list);
9844                 kfree(cq_event);
9845         }
9846 }
9847 
9848 /**
9849  * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
9850  * @phba: pointer to lpfc hba data structure.
9851  *
9852  * This routine is the lock free version of the API invoked to allocate a
9853  * completion-queue event from the free pool.
9854  *
9855  * Return: Pointer to the newly allocated completion-queue event if successful
9856  *         NULL otherwise.
9857  **/
9858 struct lpfc_cq_event *
9859 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9860 {
9861         struct lpfc_cq_event *cq_event = NULL;
9862 
9863         list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
9864                          struct lpfc_cq_event, list);
9865         return cq_event;
9866 }
9867 
9868 /**
9869  * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
9870  * @phba: pointer to lpfc hba data structure.
9871  *
9872  * This routine is the lock version of the API invoked to allocate a
9873  * completion-queue event from the free pool.
9874  *
9875  * Return: Pointer to the newly allocated completion-queue event if successful
9876  *         NULL otherwise.
9877  **/
9878 struct lpfc_cq_event *
9879 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9880 {
9881         struct lpfc_cq_event *cq_event;
9882         unsigned long iflags;
9883 
9884         spin_lock_irqsave(&phba->hbalock, iflags);
9885         cq_event = __lpfc_sli4_cq_event_alloc(phba);
9886         spin_unlock_irqrestore(&phba->hbalock, iflags);
9887         return cq_event;
9888 }
9889 
9890 /**
9891  * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
9892  * @phba: pointer to lpfc hba data structure.
9893  * @cq_event: pointer to the completion queue event to be freed.
9894  *
9895  * This routine is the lock free version of the API invoked to release a
9896  * completion-queue event back into the free pool.
9897  **/
9898 void
9899 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9900                              struct lpfc_cq_event *cq_event)
9901 {
9902         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
9903 }
9904 
9905 /**
9906  * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
9907  * @phba: pointer to lpfc hba data structure.
9908  * @cq_event: pointer to the completion queue event to be freed.
9909  *
9910  * This routine is the lock version of the API invoked to release a
9911  * completion-queue event back into the free pool.
9912  **/
9913 void
9914 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9915                            struct lpfc_cq_event *cq_event)
9916 {
9917         unsigned long iflags;
9918         spin_lock_irqsave(&phba->hbalock, iflags);
9919         __lpfc_sli4_cq_event_release(phba, cq_event);
9920         spin_unlock_irqrestore(&phba->hbalock, iflags);
9921 }
9922 
9923 /**
9924  * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
9925  * @phba: pointer to lpfc hba data structure.
9926  *
9927  * This routine is to free all the pending completion-queue events to the
9928  * back into the free pool for device reset.
9929  **/
9930 static void
9931 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
9932 {
9933         LIST_HEAD(cqelist);
9934         struct lpfc_cq_event *cqe;
9935         unsigned long iflags;
9936 
9937         /* Retrieve all the pending WCQEs from pending WCQE lists */
9938         spin_lock_irqsave(&phba->hbalock, iflags);
9939         /* Pending FCP XRI abort events */
9940         list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
9941                          &cqelist);
9942         /* Pending ELS XRI abort events */
9943         list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
9944                          &cqelist);
9945         /* Pending asynnc events */
9946         list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
9947                          &cqelist);
9948         spin_unlock_irqrestore(&phba->hbalock, iflags);
9949 
9950         while (!list_empty(&cqelist)) {
9951                 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
9952                 lpfc_sli4_cq_event_release(phba, cqe);
9953         }
9954 }
9955 
9956 /**
9957  * lpfc_pci_function_reset - Reset pci function.
9958  * @phba: pointer to lpfc hba data structure.
9959  *
9960  * This routine is invoked to request a PCI function reset. It will destroys
9961  * all resources assigned to the PCI function which originates this request.
9962  *
9963  * Return codes
9964  *      0 - successful
9965  *      -ENOMEM - No available memory
9966  *      -EIO - The mailbox failed to complete successfully.
9967  **/
9968 int
9969 lpfc_pci_function_reset(struct lpfc_hba *phba)
9970 {
9971         LPFC_MBOXQ_t *mboxq;
9972         uint32_t rc = 0, if_type;
9973         uint32_t shdr_status, shdr_add_status;
9974         uint32_t rdy_chk;
9975         uint32_t port_reset = 0;
9976         union lpfc_sli4_cfg_shdr *shdr;
9977         struct lpfc_register reg_data;
9978         uint16_t devid;
9979 
9980         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9981         switch (if_type) {
9982         case LPFC_SLI_INTF_IF_TYPE_0:
9983                 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
9984                                                        GFP_KERNEL);
9985                 if (!mboxq) {
9986                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9987                                         "0494 Unable to allocate memory for "
9988                                         "issuing SLI_FUNCTION_RESET mailbox "
9989                                         "command\n");
9990                         return -ENOMEM;
9991                 }
9992 
9993                 /* Setup PCI function reset mailbox-ioctl command */
9994                 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9995                                  LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
9996                                  LPFC_SLI4_MBX_EMBED);
9997                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9998                 shdr = (union lpfc_sli4_cfg_shdr *)
9999                         &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10000                 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10001                 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
10002                                          &shdr->response);
10003                 if (rc != MBX_TIMEOUT)
10004                         mempool_free(mboxq, phba->mbox_mem_pool);
10005                 if (shdr_status || shdr_add_status || rc) {
10006                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10007                                         "0495 SLI_FUNCTION_RESET mailbox "
10008                                         "failed with status x%x add_status x%x,"
10009                                         " mbx status x%x\n",
10010                                         shdr_status, shdr_add_status, rc);
10011                         rc = -ENXIO;
10012                 }
10013                 break;
10014         case LPFC_SLI_INTF_IF_TYPE_2:
10015         case LPFC_SLI_INTF_IF_TYPE_6:
10016 wait:
10017                 /*
10018                  * Poll the Port Status Register and wait for RDY for
10019                  * up to 30 seconds. If the port doesn't respond, treat
10020                  * it as an error.
10021                  */
10022                 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
10023                         if (lpfc_readl(phba->sli4_hba.u.if_type2.
10024                                 STATUSregaddr, &reg_data.word0)) {
10025                                 rc = -ENODEV;
10026                                 goto out;
10027                         }
10028                         if (bf_get(lpfc_sliport_status_rdy, &reg_data))
10029                                 break;
10030                         msleep(20);
10031                 }
10032 
10033                 if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
10034                         phba->work_status[0] = readl(
10035                                 phba->sli4_hba.u.if_type2.ERR1regaddr);
10036                         phba->work_status[1] = readl(
10037                                 phba->sli4_hba.u.if_type2.ERR2regaddr);
10038                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10039                                         "2890 Port not ready, port status reg "
10040                                         "0x%x error 1=0x%x, error 2=0x%x\n",
10041                                         reg_data.word0,
10042                                         phba->work_status[0],
10043                                         phba->work_status[1]);
10044                         rc = -ENODEV;
10045                         goto out;
10046                 }
10047 
10048                 if (!port_reset) {
10049                         /*
10050                          * Reset the port now
10051                          */
10052                         reg_data.word0 = 0;
10053                         bf_set(lpfc_sliport_ctrl_end, &reg_data,
10054                                LPFC_SLIPORT_LITTLE_ENDIAN);
10055                         bf_set(lpfc_sliport_ctrl_ip, &reg_data,
10056                                LPFC_SLIPORT_INIT_PORT);
10057                         writel(reg_data.word0, phba->sli4_hba.u.if_type2.
10058                                CTRLregaddr);
10059                         /* flush */
10060                         pci_read_config_word(phba->pcidev,
10061                                              PCI_DEVICE_ID, &devid);
10062 
10063                         port_reset = 1;
10064                         msleep(20);
10065                         goto wait;
10066                 } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
10067                         rc = -ENODEV;
10068                         goto out;
10069                 }
10070                 break;
10071 
10072         case LPFC_SLI_INTF_IF_TYPE_1:
10073         default:
10074                 break;
10075         }
10076 
10077 out:
10078         /* Catch the not-ready port failure after a port reset. */
10079         if (rc) {
10080                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10081                                 "3317 HBA not functional: IP Reset Failed "
10082                                 "try: echo fw_reset > board_mode\n");
10083                 rc = -ENODEV;
10084         }
10085 
10086         return rc;
10087 }
10088 
10089 /**
10090  * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
10091  * @phba: pointer to lpfc hba data structure.
10092  *
10093  * This routine is invoked to set up the PCI device memory space for device
10094  * with SLI-4 interface spec.
10095  *
10096  * Return codes
10097  *      0 - successful
10098  *      other values - error
10099  **/
10100 static int
10101 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10102 {
10103         struct pci_dev *pdev = phba->pcidev;
10104         unsigned long bar0map_len, bar1map_len, bar2map_len;
10105         int error;
10106         uint32_t if_type;
10107 
10108         if (!pdev)
10109                 return -ENODEV;
10110 
10111         /* Set the device DMA mask size */
10112         error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10113         if (error)
10114                 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10115         if (error)
10116                 return error;
10117 
10118         /*
10119          * The BARs and register set definitions and offset locations are
10120          * dependent on the if_type.
10121          */
10122         if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10123                                   &phba->sli4_hba.sli_intf.word0)) {
10124                 return -ENODEV;
10125         }
10126 
10127         /* There is no SLI3 failback for SLI4 devices. */
10128         if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
10129             LPFC_SLI_INTF_VALID) {
10130                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10131                                 "2894 SLI_INTF reg contents invalid "
10132                                 "sli_intf reg 0x%x\n",
10133                                 phba->sli4_hba.sli_intf.word0);
10134                 return -ENODEV;
10135         }
10136 
10137         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10138         /*
10139          * Get the bus address of SLI4 device Bar regions and the
10140          * number of bytes required by each mapping. The mapping of the
10141          * particular PCI BARs regions is dependent on the type of
10142          * SLI4 device.
10143          */
10144         if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
10145                 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
10146                 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
10147 
10148                 /*
10149                  * Map SLI4 PCI Config Space Register base to a kernel virtual
10150                  * addr
10151                  */
10152                 phba->sli4_hba.conf_regs_memmap_p =
10153                         ioremap(phba->pci_bar0_map, bar0map_len);
10154                 if (!phba->sli4_hba.conf_regs_memmap_p) {
10155                         dev_printk(KERN_ERR, &pdev->dev,
10156                                    "ioremap failed for SLI4 PCI config "
10157                                    "registers.\n");
10158                         return -ENODEV;
10159                 }
10160                 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
10161                 /* Set up BAR0 PCI config space register memory map */
10162                 lpfc_sli4_bar0_register_memmap(phba, if_type);
10163         } else {
10164                 phba->pci_bar0_map = pci_resource_start(pdev, 1);
10165                 bar0map_len = pci_resource_len(pdev, 1);
10166                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10167                         dev_printk(KERN_ERR, &pdev->dev,
10168                            "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
10169                         return -ENODEV;
10170                 }
10171                 phba->sli4_hba.conf_regs_memmap_p =
10172                                 ioremap(phba->pci_bar0_map, bar0map_len);
10173                 if (!phba->sli4_hba.conf_regs_memmap_p) {
10174                         dev_printk(KERN_ERR, &pdev->dev,
10175                                 "ioremap failed for SLI4 PCI config "
10176                                 "registers.\n");
10177                         return -ENODEV;
10178                 }
10179                 lpfc_sli4_bar0_register_memmap(phba, if_type);
10180         }
10181 
10182         if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10183                 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
10184                         /*
10185                          * Map SLI4 if type 0 HBA Control Register base to a
10186                          * kernel virtual address and setup the registers.
10187                          */
10188                         phba->pci_bar1_map = pci_resource_start(pdev,
10189                                                                 PCI_64BIT_BAR2);
10190                         bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10191                         phba->sli4_hba.ctrl_regs_memmap_p =
10192                                         ioremap(phba->pci_bar1_map,
10193                                                 bar1map_len);
10194                         if (!phba->sli4_hba.ctrl_regs_memmap_p) {
10195                                 dev_err(&pdev->dev,
10196                                            "ioremap failed for SLI4 HBA "
10197                                             "control registers.\n");
10198                                 error = -ENOMEM;
10199                                 goto out_iounmap_conf;
10200                         }
10201                         phba->pci_bar2_memmap_p =
10202                                          phba->sli4_hba.ctrl_regs_memmap_p;
10203                         lpfc_sli4_bar1_register_memmap(phba, if_type);
10204                 } else {
10205                         error = -ENOMEM;
10206                         goto out_iounmap_conf;
10207                 }
10208         }
10209 
10210         if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
10211             (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
10212                 /*
10213                  * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
10214                  * virtual address and setup the registers.
10215                  */
10216                 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
10217                 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10218                 phba->sli4_hba.drbl_regs_memmap_p =
10219                                 ioremap(phba->pci_bar1_map, bar1map_len);
10220                 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10221                         dev_err(&pdev->dev,
10222                            "ioremap failed for SLI4 HBA doorbell registers.\n");
10223                         error = -ENOMEM;
10224                         goto out_iounmap_conf;
10225                 }
10226                 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
10227                 lpfc_sli4_bar1_register_memmap(phba, if_type);
10228         }
10229 
10230         if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10231                 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10232                         /*
10233                          * Map SLI4 if type 0 HBA Doorbell Register base to
10234                          * a kernel virtual address and setup the registers.
10235                          */
10236                         phba->pci_bar2_map = pci_resource_start(pdev,
10237                                                                 PCI_64BIT_BAR4);
10238                         bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10239                         phba->sli4_hba.drbl_regs_memmap_p =
10240                                         ioremap(phba->pci_bar2_map,
10241                                                 bar2map_len);
10242                         if (!phba->sli4_hba.drbl_regs_memmap_p) {
10243                                 dev_err(&pdev->dev,
10244                                            "ioremap failed for SLI4 HBA"
10245                                            " doorbell registers.\n");
10246                                 error = -ENOMEM;
10247                                 goto out_iounmap_ctrl;
10248                         }
10249                         phba->pci_bar4_memmap_p =
10250                                         phba->sli4_hba.drbl_regs_memmap_p;
10251                         error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
10252                         if (error)
10253                                 goto out_iounmap_all;
10254                 } else {
10255                         error = -ENOMEM;
10256                         goto out_iounmap_all;
10257                 }
10258         }
10259 
10260         if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
10261             pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10262                 /*
10263                  * Map SLI4 if type 6 HBA DPP Register base to a kernel
10264                  * virtual address and setup the registers.
10265                  */
10266                 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
10267                 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10268                 phba->sli4_hba.dpp_regs_memmap_p =
10269                                 ioremap(phba->pci_bar2_map, bar2map_len);
10270                 if (!phba->sli4_hba.dpp_regs_memmap_p) {
10271                         dev_err(&pdev->dev,
10272                            "ioremap failed for SLI4 HBA dpp registers.\n");
10273                         error = -ENOMEM;
10274                         goto out_iounmap_ctrl;
10275                 }
10276                 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
10277         }
10278 
10279         /* Set up the EQ/CQ register handeling functions now */
10280         switch (if_type) {
10281         case LPFC_SLI_INTF_IF_TYPE_0:
10282         case LPFC_SLI_INTF_IF_TYPE_2:
10283                 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
10284                 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10285                 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
10286                 break;
10287         case LPFC_SLI_INTF_IF_TYPE_6:
10288                 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
10289                 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10290                 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
10291                 break;
10292         default:
10293                 break;
10294         }
10295 
10296         return 0;
10297 
10298 out_iounmap_all:
10299         iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10300 out_iounmap_ctrl:
10301         iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10302 out_iounmap_conf:
10303         iounmap(phba->sli4_hba.conf_regs_memmap_p);
10304 
10305         return error;
10306 }
10307 
10308 /**
10309  * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
10310  * @phba: pointer to lpfc hba data structure.
10311  *
10312  * This routine is invoked to unset the PCI device memory space for device
10313  * with SLI-4 interface spec.
10314  **/
10315 static void
10316 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
10317 {
10318         uint32_t if_type;
10319         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10320 
10321         switch (if_type) {
10322         case LPFC_SLI_INTF_IF_TYPE_0:
10323                 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10324                 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10325                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10326                 break;
10327         case LPFC_SLI_INTF_IF_TYPE_2:
10328                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10329                 break;
10330         case LPFC_SLI_INTF_IF_TYPE_6:
10331                 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10332                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10333                 break;
10334         case LPFC_SLI_INTF_IF_TYPE_1:
10335         default:
10336                 dev_printk(KERN_ERR, &phba->pcidev->dev,
10337                            "FATAL - unsupported SLI4 interface type - %d\n",
10338                            if_type);
10339                 break;
10340         }
10341 }
10342 
10343 /**
10344  * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
10345  * @phba: pointer to lpfc hba data structure.
10346  *
10347  * This routine is invoked to enable the MSI-X interrupt vectors to device
10348  * with SLI-3 interface specs.
10349  *
10350  * Return codes
10351  *   0 - successful
10352  *   other values - error
10353  **/
10354 static int
10355 lpfc_sli_enable_msix(struct lpfc_hba *phba)
10356 {
10357         int rc;
10358         LPFC_MBOXQ_t *pmb;
10359 
10360         /* Set up MSI-X multi-message vectors */
10361         rc = pci_alloc_irq_vectors(phba->pcidev,
10362                         LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
10363         if (rc < 0) {
10364                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10365                                 "0420 PCI enable MSI-X failed (%d)\n", rc);
10366                 goto vec_fail_out;
10367         }
10368 
10369         /*
10370          * Assign MSI-X vectors to interrupt handlers
10371          */
10372 
10373         /* vector-0 is associated to slow-path handler */
10374         rc = request_irq(pci_irq_vector(phba->pcidev, 0),
10375                          &lpfc_sli_sp_intr_handler, 0,
10376                          LPFC_SP_DRIVER_HANDLER_NAME, phba);
10377         if (rc) {
10378                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10379                                 "0421 MSI-X slow-path request_irq failed "
10380                                 "(%d)\n", rc);
10381                 goto msi_fail_out;
10382         }
10383 
10384         /* vector-1 is associated to fast-path handler */
10385         rc = request_irq(pci_irq_vector(phba->pcidev, 1),
10386                          &lpfc_sli_fp_intr_handler, 0,
10387                          LPFC_FP_DRIVER_HANDLER_NAME, phba);
10388 
10389         if (rc) {
10390                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10391                                 "0429 MSI-X fast-path request_irq failed "
10392                                 "(%d)\n", rc);
10393                 goto irq_fail_out;
10394         }
10395 
10396         /*
10397          * Configure HBA MSI-X attention conditions to messages
10398          */
10399         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10400 
10401         if (!pmb) {
10402                 rc = -ENOMEM;
10403                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10404                                 "0474 Unable to allocate memory for issuing "
10405                                 "MBOX_CONFIG_MSI command\n");
10406                 goto mem_fail_out;
10407         }
10408         rc = lpfc_config_msi(phba, pmb);
10409         if (rc)
10410                 goto mbx_fail_out;
10411         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10412         if (rc != MBX_SUCCESS) {
10413                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
10414                                 "0351 Config MSI mailbox command failed, "
10415                                 "mbxCmd x%x, mbxStatus x%x\n",
10416                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
10417                 goto mbx_fail_out;
10418         }
10419 
10420         /* Free memory allocated for mailbox command */
10421         mempool_free(pmb, phba->mbox_mem_pool);
10422         return rc;
10423 
10424 mbx_fail_out:
10425         /* Free memory allocated for mailbox command */
10426         mempool_free(pmb, phba->mbox_mem_pool);
10427 
10428 mem_fail_out:
10429         /* free the irq already requested */
10430         free_irq(pci_irq_vector(phba->pcidev, 1), phba);
10431 
10432 irq_fail_out:
10433         /* free the irq already requested */
10434         free_irq(pci_irq_vector(phba->pcidev, 0), phba);
10435 
10436 msi_fail_out:
10437         /* Unconfigure MSI-X capability structure */
10438         pci_free_irq_vectors(phba->pcidev);
10439 
10440 vec_fail_out:
10441         return rc;
10442 }
10443 
10444 /**
10445  * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
10446  * @phba: pointer to lpfc hba data structure.
10447  *
10448  * This routine is invoked to enable the MSI interrupt mode to device with
10449  * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
10450  * enable the MSI vector. The device driver is responsible for calling the
10451  * request_irq() to register MSI vector with a interrupt the handler, which
10452  * is done in this function.
10453  *
10454  * Return codes
10455  *      0 - successful
10456  *      other values - error
10457  */
10458 static int
10459 lpfc_sli_enable_msi(struct lpfc_hba *phba)
10460 {
10461         int rc;
10462 
10463         rc = pci_enable_msi(phba->pcidev);
10464         if (!rc)
10465                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10466                                 "0462 PCI enable MSI mode success.\n");
10467         else {
10468                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10469                                 "0471 PCI enable MSI mode failed (%d)\n", rc);
10470                 return rc;
10471         }
10472 
10473         rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10474                          0, LPFC_DRIVER_NAME, phba);
10475         if (rc) {
10476                 pci_disable_msi(phba->pcidev);
10477                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10478                                 "0478 MSI request_irq failed (%d)\n", rc);
10479         }
10480         return rc;
10481 }
10482 
10483 /**
10484  * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
10485  * @phba: pointer to lpfc hba data structure.
10486  *
10487  * This routine is invoked to enable device interrupt and associate driver's
10488  * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
10489  * spec. Depends on the interrupt mode configured to the driver, the driver
10490  * will try to fallback from the configured interrupt mode to an interrupt
10491  * mode which is supported by the platform, kernel, and device in the order
10492  * of:
10493  * MSI-X -> MSI -> IRQ.
10494  *
10495  * Return codes
10496  *   0 - successful
10497  *   other values - error
10498  **/
10499 static uint32_t
10500 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10501 {
10502         uint32_t intr_mode = LPFC_INTR_ERROR;
10503         int retval;
10504 
10505         if (cfg_mode == 2) {
10506                 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
10507                 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
10508                 if (!retval) {
10509                         /* Now, try to enable MSI-X interrupt mode */
10510                         retval = lpfc_sli_enable_msix(phba);
10511                         if (!retval) {
10512                                 /* Indicate initialization to MSI-X mode */
10513                                 phba->intr_type = MSIX;
10514                                 intr_mode = 2;
10515                         }
10516                 }
10517         }
10518 
10519         /* Fallback to MSI if MSI-X initialization failed */
10520         if (cfg_mode >= 1 && phba->intr_type == NONE) {
10521                 retval = lpfc_sli_enable_msi(phba);
10522                 if (!retval) {
10523                         /* Indicate initialization to MSI mode */
10524                         phba->intr_type = MSI;
10525                         intr_mode = 1;
10526                 }
10527         }
10528 
10529         /* Fallback to INTx if both MSI-X/MSI initalization failed */
10530         if (phba->intr_type == NONE) {
10531                 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10532                                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10533                 if (!retval) {
10534                         /* Indicate initialization to INTx mode */
10535                         phba->intr_type = INTx;
10536                         intr_mode = 0;
10537                 }
10538         }
10539         return intr_mode;
10540 }
10541 
10542 /**
10543  * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
10544  * @phba: pointer to lpfc hba data structure.
10545  *
10546  * This routine is invoked to disable device interrupt and disassociate the
10547  * driver's interrupt handler(s) from interrupt vector(s) to device with
10548  * SLI-3 interface spec. Depending on the interrupt mode, the driver will
10549  * release the interrupt vector(s) for the message signaled interrupt.
10550  **/
10551 static void
10552 lpfc_sli_disable_intr(struct lpfc_hba *phba)
10553 {
10554         int nr_irqs, i;
10555 
10556         if (phba->intr_type == MSIX)
10557                 nr_irqs = LPFC_MSIX_VECTORS;
10558         else
10559                 nr_irqs = 1;
10560 
10561         for (i = 0; i < nr_irqs; i++)
10562                 free_irq(pci_irq_vector(phba->pcidev, i), phba);
10563         pci_free_irq_vectors(phba->pcidev);
10564 
10565         /* Reset interrupt management states */
10566         phba->intr_type = NONE;
10567         phba->sli.slistat.sli_intr = 0;
10568 }
10569 
10570 /**
10571  * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
10572  * @phba: pointer to lpfc hba data structure.
10573  * @id: EQ vector index or Hardware Queue index
10574  * @match: LPFC_FIND_BY_EQ = match by EQ
10575  *         LPFC_FIND_BY_HDWQ = match by Hardware Queue
10576  * Return the CPU that matches the selection criteria
10577  */
10578 static uint16_t
10579 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10580 {
10581         struct lpfc_vector_map_info *cpup;
10582         int cpu;
10583 
10584         /* Loop through all CPUs */
10585         for_each_present_cpu(cpu) {
10586                 cpup = &phba->sli4_hba.cpu_map[cpu];
10587 
10588                 /* If we are matching by EQ, there may be multiple CPUs using
10589                  * using the same vector, so select the one with
10590                  * LPFC_CPU_FIRST_IRQ set.
10591                  */
10592                 if ((match == LPFC_FIND_BY_EQ) &&
10593                     (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
10594                     (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
10595                     (cpup->eq == id))
10596                         return cpu;
10597 
10598                 /* If matching by HDWQ, select the first CPU that matches */
10599                 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10600                         return cpu;
10601         }
10602         return 0;
10603 }
10604 
10605 #ifdef CONFIG_X86
10606 /**
10607  * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
10608  * @phba: pointer to lpfc hba data structure.
10609  * @cpu: CPU map index
10610  * @phys_id: CPU package physical id
10611  * @core_id: CPU core id
10612  */
10613 static int
10614 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10615                 uint16_t phys_id, uint16_t core_id)
10616 {
10617         struct lpfc_vector_map_info *cpup;
10618         int idx;
10619 
10620         for_each_present_cpu(idx) {
10621                 cpup = &phba->sli4_hba.cpu_map[idx];
10622                 /* Does the cpup match the one we are looking for */
10623                 if ((cpup->phys_id == phys_id) &&
10624                     (cpup->core_id == core_id) &&
10625                     (cpu != idx))
10626                         return 1;
10627         }
10628         return 0;
10629 }
10630 #endif
10631 
10632 /**
10633  * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
10634  * @phba: pointer to lpfc hba data structure.
10635  * @vectors: number of msix vectors allocated.
10636  *
10637  * The routine will figure out the CPU affinity assignment for every
10638  * MSI-X vector allocated for the HBA.
10639  * In addition, the CPU to IO channel mapping will be calculated
10640  * and the phba->sli4_hba.cpu_map array will reflect this.
10641  */
10642 static void
10643 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10644 {
10645         int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
10646         int max_phys_id, min_phys_id;
10647         int max_core_id, min_core_id;
10648         struct lpfc_vector_map_info *cpup;
10649         struct lpfc_vector_map_info *new_cpup;
10650         const struct cpumask *maskp;
10651 #ifdef CONFIG_X86
10652         struct cpuinfo_x86 *cpuinfo;
10653 #endif
10654 
10655         /* Init cpu_map array */
10656         for_each_possible_cpu(cpu) {
10657                 cpup = &phba->sli4_hba.cpu_map[cpu];
10658                 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
10659                 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
10660                 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
10661                 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
10662                 cpup->irq = LPFC_VECTOR_MAP_EMPTY;
10663                 cpup->flag = 0;
10664         }
10665 
10666         max_phys_id = 0;
10667         min_phys_id = LPFC_VECTOR_MAP_EMPTY;
10668         max_core_id = 0;
10669         min_core_id = LPFC_VECTOR_MAP_EMPTY;
10670 
10671         /* Update CPU map with physical id and core id of each CPU */
10672         for_each_present_cpu(cpu) {
10673                 cpup = &phba->sli4_hba.cpu_map[cpu];
10674 #ifdef CONFIG_X86
10675                 cpuinfo = &cpu_data(cpu);
10676                 cpup->phys_id = cpuinfo->phys_proc_id;
10677                 cpup->core_id = cpuinfo->cpu_core_id;
10678                 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
10679                         cpup->flag |= LPFC_CPU_MAP_HYPER;
10680 #else
10681                 /* No distinction between CPUs for other platforms */
10682                 cpup->phys_id = 0;
10683                 cpup->core_id = cpu;
10684 #endif
10685 
10686                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10687                                 "3328 CPU %d physid %d coreid %d flag x%x\n",
10688                                 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
10689 
10690                 if (cpup->phys_id > max_phys_id)
10691                         max_phys_id = cpup->phys_id;
10692                 if (cpup->phys_id < min_phys_id)
10693                         min_phys_id = cpup->phys_id;
10694 
10695                 if (cpup->core_id > max_core_id)
10696                         max_core_id = cpup->core_id;
10697                 if (cpup->core_id < min_core_id)
10698                         min_core_id = cpup->core_id;
10699         }
10700 
10701         for_each_possible_cpu(i) {
10702                 struct lpfc_eq_intr_info *eqi =
10703                         per_cpu_ptr(phba->sli4_hba.eq_info, i);
10704 
10705                 INIT_LIST_HEAD(&eqi->list);
10706                 eqi->icnt = 0;
10707         }
10708 
10709         /* This loop sets up all CPUs that are affinitized with a
10710          * irq vector assigned to the driver. All affinitized CPUs
10711          * will get a link to that vectors IRQ and EQ.
10712          *
10713          * NULL affinity mask handling:
10714          * If irq count is greater than one, log an error message.
10715          * If the null mask is received for the first irq, find the
10716          * first present cpu, and assign the eq index to ensure at
10717          * least one EQ is assigned.
10718          */
10719         for (idx = 0; idx <  phba->cfg_irq_chann; idx++) {
10720                 /* Get a CPU mask for all CPUs affinitized to this vector */
10721                 maskp = pci_irq_get_affinity(phba->pcidev, idx);
10722                 if (!maskp) {
10723                         if (phba->cfg_irq_chann > 1)
10724                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10725                                                 "3329 No affinity mask found "
10726                                                 "for vector %d (%d)\n",
10727                                                 idx, phba->cfg_irq_chann);
10728                         if (!idx) {
10729                                 cpu = cpumask_first(cpu_present_mask);
10730                                 cpup = &phba->sli4_hba.cpu_map[cpu];
10731                                 cpup->eq = idx;
10732                                 cpup->irq = pci_irq_vector(phba->pcidev, idx);
10733                                 cpup->flag |= LPFC_CPU_FIRST_IRQ;
10734                         }
10735                         break;
10736                 }
10737 
10738                 i = 0;
10739                 /* Loop through all CPUs associated with vector idx */
10740                 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
10741                         /* Set the EQ index and IRQ for that vector */
10742                         cpup = &phba->sli4_hba.cpu_map[cpu];
10743                         cpup->eq = idx;
10744                         cpup->irq = pci_irq_vector(phba->pcidev, idx);
10745 
10746                         /* If this is the first CPU thats assigned to this
10747                          * vector, set LPFC_CPU_FIRST_IRQ.
10748                          */
10749                         if (!i)
10750                                 cpup->flag |= LPFC_CPU_FIRST_IRQ;
10751                         i++;
10752 
10753                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10754                                         "3336 Set Affinity: CPU %d "
10755                                         "irq %d eq %d flag x%x\n",
10756                                         cpu, cpup->irq, cpup->eq, cpup->flag);
10757                 }
10758         }
10759 
10760         /* After looking at each irq vector assigned to this pcidev, its
10761          * possible to see that not ALL CPUs have been accounted for.
10762          * Next we will set any unassigned (unaffinitized) cpu map
10763          * entries to a IRQ on the same phys_id.
10764          */
10765         first_cpu = cpumask_first(cpu_present_mask);
10766         start_cpu = first_cpu;
10767 
10768         for_each_present_cpu(cpu) {
10769                 cpup = &phba->sli4_hba.cpu_map[cpu];
10770 
10771                 /* Is this CPU entry unassigned */
10772                 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10773                         /* Mark CPU as IRQ not assigned by the kernel */
10774                         cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10775 
10776                         /* If so, find a new_cpup thats on the the SAME
10777                          * phys_id as cpup. start_cpu will start where we
10778                          * left off so all unassigned entries don't get assgined
10779                          * the IRQ of the first entry.
10780                          */
10781                         new_cpu = start_cpu;
10782                         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10783                                 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10784                                 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10785                                     (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
10786                                     (new_cpup->phys_id == cpup->phys_id))
10787                                         goto found_same;
10788                                 new_cpu = cpumask_next(
10789                                         new_cpu, cpu_present_mask);
10790                                 if (new_cpu == nr_cpumask_bits)
10791                                         new_cpu = first_cpu;
10792                         }
10793                         /* At this point, we leave the CPU as unassigned */
10794                         continue;
10795 found_same:
10796                         /* We found a matching phys_id, so copy the IRQ info */
10797                         cpup->eq = new_cpup->eq;
10798                         cpup->irq = new_cpup->irq;
10799 
10800                         /* Bump start_cpu to the next slot to minmize the
10801                          * chance of having multiple unassigned CPU entries
10802                          * selecting the same IRQ.
10803                          */
10804                         start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10805                         if (start_cpu == nr_cpumask_bits)
10806                                 start_cpu = first_cpu;
10807 
10808                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10809                                         "3337 Set Affinity: CPU %d "
10810                                         "irq %d from id %d same "
10811                                         "phys_id (%d)\n",
10812                                         cpu, cpup->irq, new_cpu, cpup->phys_id);
10813                 }
10814         }
10815 
10816         /* Set any unassigned cpu map entries to a IRQ on any phys_id */
10817         start_cpu = first_cpu;
10818 
10819         for_each_present_cpu(cpu) {
10820                 cpup = &phba->sli4_hba.cpu_map[cpu];
10821 
10822                 /* Is this entry unassigned */
10823                 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10824                         /* Mark it as IRQ not assigned by the kernel */
10825                         cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10826 
10827                         /* If so, find a new_cpup thats on ANY phys_id
10828                          * as the cpup. start_cpu will start where we
10829                          * left off so all unassigned entries don't get
10830                          * assigned the IRQ of the first entry.
10831                          */
10832                         new_cpu = start_cpu;
10833                         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10834                                 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10835                                 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10836                                     (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY))
10837                                         goto found_any;
10838                                 new_cpu = cpumask_next(
10839                                         new_cpu, cpu_present_mask);
10840                                 if (new_cpu == nr_cpumask_bits)
10841                                         new_cpu = first_cpu;
10842                         }
10843                         /* We should never leave an entry unassigned */
10844                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10845                                         "3339 Set Affinity: CPU %d "
10846                                         "irq %d UNASSIGNED\n",
10847                                         cpup->hdwq, cpup->irq);
10848                         continue;
10849 found_any:
10850                         /* We found an available entry, copy the IRQ info */
10851                         cpup->eq = new_cpup->eq;
10852                         cpup->irq = new_cpup->irq;
10853 
10854                         /* Bump start_cpu to the next slot to minmize the
10855                          * chance of having multiple unassigned CPU entries
10856                          * selecting the same IRQ.
10857                          */
10858                         start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10859                         if (start_cpu == nr_cpumask_bits)
10860                                 start_cpu = first_cpu;
10861 
10862                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10863                                         "3338 Set Affinity: CPU %d "
10864                                         "irq %d from id %d (%d/%d)\n",
10865                                         cpu, cpup->irq, new_cpu,
10866                                         new_cpup->phys_id, new_cpup->core_id);
10867                 }
10868         }
10869 
10870         /* Assign hdwq indices that are unique across all cpus in the map
10871          * that are also FIRST_CPUs.
10872          */
10873         idx = 0;
10874         for_each_present_cpu(cpu) {
10875                 cpup = &phba->sli4_hba.cpu_map[cpu];
10876 
10877                 /* Only FIRST IRQs get a hdwq index assignment. */
10878                 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10879                         continue;
10880 
10881                 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
10882                 cpup->hdwq = idx;
10883                 idx++;
10884                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10885                                 "3333 Set Affinity: CPU %d (phys %d core %d): "
10886                                 "hdwq %d eq %d irq %d flg x%x\n",
10887                                 cpu, cpup->phys_id, cpup->core_id,
10888                                 cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
10889         }
10890         /* Finally we need to associate a hdwq with each cpu_map entry
10891          * This will be 1 to 1 - hdwq to cpu, unless there are less
10892          * hardware queues then CPUs. For that case we will just round-robin
10893          * the available hardware queues as they get assigned to CPUs.
10894          * The next_idx is the idx from the FIRST_CPU loop above to account
10895          * for irq_chann < hdwq.  The idx is used for round-robin assignments
10896          * and needs to start at 0.
10897          */
10898         next_idx = idx;
10899         start_cpu = 0;
10900         idx = 0;
10901         for_each_present_cpu(cpu) {
10902                 cpup = &phba->sli4_hba.cpu_map[cpu];
10903 
10904                 /* FIRST cpus are already mapped. */
10905                 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10906                         continue;
10907 
10908                 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
10909                  * of the unassigned cpus to the next idx so that all
10910                  * hdw queues are fully utilized.
10911                  */
10912                 if (next_idx < phba->cfg_hdw_queue) {
10913                         cpup->hdwq = next_idx;
10914                         next_idx++;
10915                         continue;
10916                 }
10917 
10918                 /* Not a First CPU and all hdw_queues are used.  Reuse a
10919                  * Hardware Queue for another CPU, so be smart about it
10920                  * and pick one that has its IRQ/EQ mapped to the same phys_id
10921                  * (CPU package) and core_id.
10922                  */
10923                 new_cpu = start_cpu;
10924                 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10925                         new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10926                         if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
10927                             new_cpup->phys_id == cpup->phys_id &&
10928                             new_cpup->core_id == cpup->core_id) {
10929                                 goto found_hdwq;
10930                         }
10931                         new_cpu = cpumask_next(new_cpu, cpu_present_mask);
10932                         if (new_cpu == nr_cpumask_bits)
10933                                 new_cpu = first_cpu;
10934                 }
10935 
10936                 /* If we can't match both phys_id and core_id,
10937                  * settle for just a phys_id match.
10938                  */
10939                 new_cpu = start_cpu;
10940                 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10941                         new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10942                         if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
10943                             new_cpup->phys_id == cpup->phys_id)
10944                                 goto found_hdwq;
10945 
10946                         new_cpu = cpumask_next(new_cpu, cpu_present_mask);
10947                         if (new_cpu == nr_cpumask_bits)
10948                                 new_cpu = first_cpu;
10949                 }
10950 
10951                 /* Otherwise just round robin on cfg_hdw_queue */
10952                 cpup->hdwq = idx % phba->cfg_hdw_queue;
10953                 idx++;
10954                 goto logit;
10955  found_hdwq:
10956                 /* We found an available entry, copy the IRQ info */
10957                 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10958                 if (start_cpu == nr_cpumask_bits)
10959                         start_cpu = first_cpu;
10960                 cpup->hdwq = new_cpup->hdwq;
10961  logit:
10962                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10963                                 "3335 Set Affinity: CPU %d (phys %d core %d): "
10964                                 "hdwq %d eq %d irq %d flg x%x\n",
10965                                 cpu, cpup->phys_id, cpup->core_id,
10966                                 cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
10967         }
10968 
10969         /* The cpu_map array will be used later during initialization
10970          * when EQ / CQ / WQs are allocated and configured.
10971          */
10972         return;
10973 }
10974 
10975 /**
10976  * lpfc_cpuhp_get_eq
10977  *
10978  * @phba:   pointer to lpfc hba data structure.
10979  * @cpu:    cpu going offline
10980  * @eqlist:
10981  */
10982 static void
10983 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
10984                   struct list_head *eqlist)
10985 {
10986         struct lpfc_vector_map_info *map;
10987         const struct cpumask *maskp;
10988         struct lpfc_queue *eq;
10989         unsigned int i;
10990         cpumask_t tmp;
10991         u16 idx;
10992 
10993         for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10994                 maskp = pci_irq_get_affinity(phba->pcidev, idx);
10995                 if (!maskp)
10996                         continue;
10997                 /*
10998                  * if irq is not affinitized to the cpu going
10999                  * then we don't need to poll the eq attached
11000                  * to it.
11001                  */
11002                 if (!cpumask_and(&tmp, maskp, cpumask_of(cpu)))
11003                         continue;
11004                 /* get the cpus that are online and are affini-
11005                  * tized to this irq vector.  If the count is
11006                  * more than 1 then cpuhp is not going to shut-
11007                  * down this vector.  Since this cpu has not
11008                  * gone offline yet, we need >1.
11009                  */
11010                 cpumask_and(&tmp, maskp, cpu_online_mask);
11011                 if (cpumask_weight(&tmp) > 1)
11012                         continue;
11013 
11014                 /* Now that we have an irq to shutdown, get the eq
11015                  * mapped to this irq.  Note: multiple hdwq's in
11016                  * the software can share an eq, but eventually
11017                  * only eq will be mapped to this vector
11018                  */
11019                 for_each_possible_cpu(i) {
11020                         map = &phba->sli4_hba.cpu_map[i];
11021                         if (!(map->irq == pci_irq_vector(phba->pcidev, idx)))
11022                                 continue;
11023                         eq = phba->sli4_hba.hdwq[map->hdwq].hba_eq;
11024                         list_add(&eq->_poll_list, eqlist);
11025                         /* 1 is good enough. others will be a copy of this */
11026                         break;
11027                 }
11028         }
11029 }
11030 
11031 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
11032 {
11033         if (phba->sli_rev != LPFC_SLI_REV4)
11034                 return;
11035 
11036         cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
11037                                             &phba->cpuhp);
11038         /*
11039          * unregistering the instance doesn't stop the polling
11040          * timer. Wait for the poll timer to retire.
11041          */
11042         synchronize_rcu();
11043         del_timer_sync(&phba->cpuhp_poll_timer);
11044 }
11045 
11046 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
11047 {
11048         if (phba->pport->fc_flag & FC_OFFLINE_MODE)
11049                 return;
11050 
11051         __lpfc_cpuhp_remove(phba);
11052 }
11053 
11054 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
11055 {
11056         if (phba->sli_rev != LPFC_SLI_REV4)
11057                 return;
11058 
11059         rcu_read_lock();
11060 
11061         if (!list_empty(&phba->poll_list)) {
11062                 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
11063                 mod_timer(&phba->cpuhp_poll_timer,
11064                           jiffies + msecs_to_jiffies(LPFC_POLL_HB));
11065         }
11066 
11067         rcu_read_unlock();
11068 
11069         cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
11070                                          &phba->cpuhp);
11071 }
11072 
11073 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
11074 {
11075         if (phba->pport->load_flag & FC_UNLOADING) {
11076                 *retval = -EAGAIN;
11077                 return true;
11078         }
11079 
11080         if (phba->sli_rev != LPFC_SLI_REV4) {
11081                 *retval = 0;
11082                 return true;
11083         }
11084 
11085         /* proceed with the hotplug */
11086         return false;
11087 }
11088 
11089 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
11090 {
11091         struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11092         struct lpfc_queue *eq, *next;
11093         LIST_HEAD(eqlist);
11094         int retval;
11095 
11096         if (!phba) {
11097                 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11098                 return 0;
11099         }
11100 
11101         if (__lpfc_cpuhp_checks(phba, &retval))
11102                 return retval;
11103 
11104         lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
11105 
11106         /* start polling on these eq's */
11107         list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
11108                 list_del_init(&eq->_poll_list);
11109                 lpfc_sli4_start_polling(eq);
11110         }
11111 
11112         return 0;
11113 }
11114 
11115 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
11116 {
11117         struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11118         struct lpfc_queue *eq, *next;
11119         unsigned int n;
11120         int retval;
11121 
11122         if (!phba) {
11123                 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11124                 return 0;
11125         }
11126 
11127         if (__lpfc_cpuhp_checks(phba, &retval))
11128                 return retval;
11129 
11130         list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
11131                 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
11132                 if (n == cpu)
11133                         lpfc_sli4_stop_polling(eq);
11134         }
11135 
11136         return 0;
11137 }
11138 
11139 /**
11140  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
11141  * @phba: pointer to lpfc hba data structure.
11142  *
11143  * This routine is invoked to enable the MSI-X interrupt vectors to device
11144  * with SLI-4 interface spec.
11145  *
11146  * Return codes
11147  * 0 - successful
11148  * other values - error
11149  **/
11150 static int
11151 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
11152 {
11153         int vectors, rc, index;
11154         char *name;
11155 
11156         /* Set up MSI-X multi-message vectors */
11157         vectors = phba->cfg_irq_chann;
11158 
11159         rc = pci_alloc_irq_vectors(phba->pcidev,
11160                                 1,
11161                                 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
11162         if (rc < 0) {
11163                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11164                                 "0484 PCI enable MSI-X failed (%d)\n", rc);
11165                 goto vec_fail_out;
11166         }
11167         vectors = rc;
11168 
11169         /* Assign MSI-X vectors to interrupt handlers */
11170         for (index = 0; index < vectors; index++) {
11171                 name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
11172                 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
11173                 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
11174                          LPFC_DRIVER_HANDLER_NAME"%d", index);
11175 
11176                 phba->sli4_hba.hba_eq_hdl[index].idx = index;
11177                 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
11178                 rc = request_irq(pci_irq_vector(phba->pcidev, index),
11179                          &lpfc_sli4_hba_intr_handler, 0,
11180                          name,
11181                          &phba->sli4_hba.hba_eq_hdl[index]);
11182                 if (rc) {
11183                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11184                                         "0486 MSI-X fast-path (%d) "
11185                                         "request_irq failed (%d)\n", index, rc);
11186                         goto cfg_fail_out;
11187                 }
11188         }
11189 
11190         if (vectors != phba->cfg_irq_chann) {
11191                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11192                                 "3238 Reducing IO channels to match number of "
11193                                 "MSI-X vectors, requested %d got %d\n",
11194                                 phba->cfg_irq_chann, vectors);
11195                 if (phba->cfg_irq_chann > vectors)
11196                         phba->cfg_irq_chann = vectors;
11197                 if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors))
11198                         phba->cfg_nvmet_mrq = vectors;
11199         }
11200 
11201         return rc;
11202 
11203 cfg_fail_out:
11204         /* free the irq already requested */
11205         for (--index; index >= 0; index--)
11206                 free_irq(pci_irq_vector(phba->pcidev, index),
11207                                 &phba->sli4_hba.hba_eq_hdl[index]);
11208 
11209         /* Unconfigure MSI-X capability structure */
11210         pci_free_irq_vectors(phba->pcidev);
11211 
11212 vec_fail_out:
11213         return rc;
11214 }
11215 
11216 /**
11217  * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
11218  * @phba: pointer to lpfc hba data structure.
11219  *
11220  * This routine is invoked to enable the MSI interrupt mode to device with
11221  * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
11222  * called to enable the MSI vector. The device driver is responsible for
11223  * calling the request_irq() to register MSI vector with a interrupt the
11224  * handler, which is done in this function.
11225  *
11226  * Return codes
11227  *      0 - successful
11228  *      other values - error
11229  **/
11230 static int
11231 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11232 {
11233         int rc, index;
11234 
11235         rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
11236                                    PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
11237         if (rc > 0)
11238                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11239                                 "0487 PCI enable MSI mode success.\n");
11240         else {
11241                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11242                                 "0488 PCI enable MSI mode failed (%d)\n", rc);
11243                 return rc ? rc : -1;
11244         }
11245 
11246         rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11247                          0, LPFC_DRIVER_NAME, phba);
11248         if (rc) {
11249                 pci_free_irq_vectors(phba->pcidev);
11250                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11251                                 "0490 MSI request_irq failed (%d)\n", rc);
11252                 return rc;
11253         }
11254 
11255         for (index = 0; index < phba->cfg_irq_chann; index++) {
11256                 phba->sli4_hba.hba_eq_hdl[index].idx = index;
11257                 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
11258         }
11259 
11260         return 0;
11261 }
11262 
11263 /**
11264  * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
11265  * @phba: pointer to lpfc hba data structure.
11266  *
11267  * This routine is invoked to enable device interrupt and associate driver's
11268  * interrupt handler(s) to interrupt vector(s) to device with SLI-4
11269  * interface spec. Depends on the interrupt mode configured to the driver,
11270  * the driver will try to fallback from the configured interrupt mode to an
11271  * interrupt mode which is supported by the platform, kernel, and device in
11272  * the order of:
11273  * MSI-X -> MSI -> IRQ.
11274  *
11275  * Return codes
11276  *      0 - successful
11277  *      other values - error
11278  **/
11279 static uint32_t
11280 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11281 {
11282         uint32_t intr_mode = LPFC_INTR_ERROR;
11283         int retval, idx;
11284 
11285         if (cfg_mode == 2) {
11286                 /* Preparation before conf_msi mbox cmd */
11287                 retval = 0;
11288                 if (!retval) {
11289                         /* Now, try to enable MSI-X interrupt mode */
11290                         retval = lpfc_sli4_enable_msix(phba);
11291                         if (!retval) {
11292                                 /* Indicate initialization to MSI-X mode */
11293                                 phba->intr_type = MSIX;
11294                                 intr_mode = 2;
11295                         }
11296                 }
11297         }
11298 
11299         /* Fallback to MSI if MSI-X initialization failed */
11300         if (cfg_mode >= 1 && phba->intr_type == NONE) {
11301                 retval = lpfc_sli4_enable_msi(phba);
11302                 if (!retval) {
11303                         /* Indicate initialization to MSI mode */
11304                         phba->intr_type = MSI;
11305                         intr_mode = 1;
11306                 }
11307         }
11308 
11309         /* Fallback to INTx if both MSI-X/MSI initalization failed */
11310         if (phba->intr_type == NONE) {
11311                 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11312                                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
11313                 if (!retval) {
11314                         struct lpfc_hba_eq_hdl *eqhdl;
11315 
11316                         /* Indicate initialization to INTx mode */
11317                         phba->intr_type = INTx;
11318                         intr_mode = 0;
11319 
11320                         for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11321                                 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
11322                                 eqhdl->idx = idx;
11323                                 eqhdl->phba = phba;
11324                         }
11325                 }
11326         }
11327         return intr_mode;
11328 }
11329 
11330 /**
11331  * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
11332  * @phba: pointer to lpfc hba data structure.
11333  *
11334  * This routine is invoked to disable device interrupt and disassociate
11335  * the driver's interrupt handler(s) from interrupt vector(s) to device
11336  * with SLI-4 interface spec. Depending on the interrupt mode, the driver
11337  * will release the interrupt vector(s) for the message signaled interrupt.
11338  **/
11339 static void
11340 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
11341 {
11342         /* Disable the currently initialized interrupt mode */
11343         if (phba->intr_type == MSIX) {
11344                 int index;
11345 
11346                 /* Free up MSI-X multi-message vectors */
11347                 for (index = 0; index < phba->cfg_irq_chann; index++) {
11348                         irq_set_affinity_hint(
11349                                 pci_irq_vector(phba->pcidev, index),
11350                                 NULL);
11351                         free_irq(pci_irq_vector(phba->pcidev, index),
11352                                         &phba->sli4_hba.hba_eq_hdl[index]);
11353                 }
11354         } else {
11355                 free_irq(phba->pcidev->irq, phba);
11356         }
11357 
11358         pci_free_irq_vectors(phba->pcidev);
11359 
11360         /* Reset interrupt management states */
11361         phba->intr_type = NONE;
11362         phba->sli.slistat.sli_intr = 0;
11363 }
11364 
11365 /**
11366  * lpfc_unset_hba - Unset SLI3 hba device initialization
11367  * @phba: pointer to lpfc hba data structure.
11368  *
11369  * This routine is invoked to unset the HBA device initialization steps to
11370  * a device with SLI-3 interface spec.
11371  **/
11372 static void
11373 lpfc_unset_hba(struct lpfc_hba *phba)
11374 {
11375         struct lpfc_vport *vport = phba->pport;
11376         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
11377 
11378         spin_lock_irq(shost->host_lock);
11379         vport->load_flag |= FC_UNLOADING;
11380         spin_unlock_irq(shost->host_lock);
11381 
11382         kfree(phba->vpi_bmask);
11383         kfree(phba->vpi_ids);
11384 
11385         lpfc_stop_hba_timers(phba);
11386 
11387         phba->pport->work_port_events = 0;
11388 
11389         lpfc_sli_hba_down(phba);
11390 
11391         lpfc_sli_brdrestart(phba);
11392 
11393         lpfc_sli_disable_intr(phba);
11394 
11395         return;
11396 }
11397 
11398 /**
11399  * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
11400  * @phba: Pointer to HBA context object.
11401  *
11402  * This function is called in the SLI4 code path to wait for completion
11403  * of device's XRIs exchange busy. It will check the XRI exchange busy
11404  * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
11405  * that, it will check the XRI exchange busy on outstanding FCP and ELS
11406  * I/Os every 30 seconds, log error message, and wait forever. Only when
11407  * all XRI exchange busy complete, the driver unload shall proceed with
11408  * invoking the function reset ioctl mailbox command to the CNA and the
11409  * the rest of the driver unload resource release.
11410  **/
11411 static void
11412 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11413 {
11414         struct lpfc_sli4_hdw_queue *qp;
11415         int idx, ccnt;
11416         int wait_time = 0;
11417         int io_xri_cmpl = 1;
11418         int nvmet_xri_cmpl = 1;
11419         int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11420 
11421         /* Driver just aborted IOs during the hba_unset process.  Pause
11422          * here to give the HBA time to complete the IO and get entries
11423          * into the abts lists.
11424          */
11425         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
11426 
11427         /* Wait for NVME pending IO to flush back to transport. */
11428         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11429                 lpfc_nvme_wait_for_io_drain(phba);
11430 
11431         ccnt = 0;
11432         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11433                 qp = &phba->sli4_hba.hdwq[idx];
11434                 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
11435                 if (!io_xri_cmpl) /* if list is NOT empty */
11436                         ccnt++;
11437         }
11438         if (ccnt)
11439                 io_xri_cmpl = 0;
11440 
11441         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11442                 nvmet_xri_cmpl =
11443                         list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11444         }
11445 
11446         while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
11447                 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
11448                         if (!nvmet_xri_cmpl)
11449                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11450                                                 "6424 NVMET XRI exchange busy "
11451                                                 "wait time: %d seconds.\n",
11452                                                 wait_time/1000);
11453                         if (!io_xri_cmpl)
11454                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11455                                                 "6100 IO XRI exchange busy "
11456                                                 "wait time: %d seconds.\n",
11457                                                 wait_time/1000);
11458                         if (!els_xri_cmpl)
11459                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11460                                                 "2878 ELS XRI exchange busy "
11461                                                 "wait time: %d seconds.\n",
11462                                                 wait_time/1000);
11463                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
11464                         wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
11465                 } else {
11466                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
11467                         wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
11468                 }
11469 
11470                 ccnt = 0;
11471                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11472                         qp = &phba->sli4_hba.hdwq[idx];
11473                         io_xri_cmpl = list_empty(
11474                             &qp->lpfc_abts_io_buf_list);
11475                         if (!io_xri_cmpl) /* if list is NOT empty */
11476                                 ccnt++;
11477                 }
11478                 if (ccnt)
11479                         io_xri_cmpl = 0;
11480 
11481                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11482                         nvmet_xri_cmpl = list_empty(
11483                                 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11484                 }
11485                 els_xri_cmpl =
11486                         list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11487 
11488         }
11489 }
11490 
11491 /**
11492  * lpfc_sli4_hba_unset - Unset the fcoe hba
11493  * @phba: Pointer to HBA context object.
11494  *
11495  * This function is called in the SLI4 code path to reset the HBA's FCoE
11496  * function. The caller is not required to hold any lock. This routine
11497  * issues PCI function reset mailbox command to reset the FCoE function.
11498  * At the end of the function, it calls lpfc_hba_down_post function to
11499  * free any pending commands.
11500  **/
11501 static void
11502 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
11503 {
11504         int wait_cnt = 0;
11505         LPFC_MBOXQ_t *mboxq;
11506         struct pci_dev *pdev = phba->pcidev;
11507 
11508         lpfc_stop_hba_timers(phba);
11509         if (phba->pport)
11510                 phba->sli4_hba.intr_enable = 0;
11511 
11512         /*
11513          * Gracefully wait out the potential current outstanding asynchronous
11514          * mailbox command.
11515          */
11516 
11517         /* First, block any pending async mailbox command from posted */
11518         spin_lock_irq(&phba->hbalock);
11519         phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11520         spin_unlock_irq(&phba->hbalock);
11521         /* Now, trying to wait it out if we can */
11522         while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11523                 msleep(10);
11524                 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
11525                         break;
11526         }
11527         /* Forcefully release the outstanding mailbox command if timed out */
11528         if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11529                 spin_lock_irq(&phba->hbalock);
11530                 mboxq = phba->sli.mbox_active;
11531                 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
11532                 __lpfc_mbox_cmpl_put(phba, mboxq);
11533                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11534                 phba->sli.mbox_active = NULL;
11535                 spin_unlock_irq(&phba->hbalock);
11536         }
11537 
11538         /* Abort all iocbs associated with the hba */
11539         lpfc_sli_hba_iocb_abort(phba);
11540 
11541         /* Wait for completion of device XRI exchange busy */
11542         lpfc_sli4_xri_exchange_busy_wait(phba);
11543 
11544         /* per-phba callback de-registration for hotplug event */
11545         lpfc_cpuhp_remove(phba);
11546 
11547         /* Disable PCI subsystem interrupt */
11548         lpfc_sli4_disable_intr(phba);
11549 
11550         /* Disable SR-IOV if enabled */
11551         if (phba->cfg_sriov_nr_virtfn)
11552                 pci_disable_sriov(pdev);
11553 
11554         /* Stop kthread signal shall trigger work_done one more time */
11555         kthread_stop(phba->worker_thread);
11556 
11557         /* Disable FW logging to host memory */
11558         lpfc_ras_stop_fwlog(phba);
11559 
11560         /* Unset the queues shared with the hardware then release all
11561          * allocated resources.
11562          */
11563         lpfc_sli4_queue_unset(phba);
11564         lpfc_sli4_queue_destroy(phba);
11565 
11566         /* Reset SLI4 HBA FCoE function */
11567         lpfc_pci_function_reset(phba);
11568 
11569         /* Free RAS DMA memory */
11570         if (phba->ras_fwlog.ras_enabled)
11571                 lpfc_sli4_ras_dma_free(phba);
11572 
11573         /* Stop the SLI4 device port */
11574         if (phba->pport)
11575                 phba->pport->work_port_events = 0;
11576 }
11577 
11578  /**
11579  * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
11580  * @phba: Pointer to HBA context object.
11581  * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
11582  *
11583  * This function is called in the SLI4 code path to read the port's
11584  * sli4 capabilities.
11585  *
11586  * This function may be be called from any context that can block-wait
11587  * for the completion.  The expectation is that this routine is called
11588  * typically from probe_one or from the online routine.
11589  **/
11590 int
11591 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11592 {
11593         int rc;
11594         struct lpfc_mqe *mqe;
11595         struct lpfc_pc_sli4_params *sli4_params;
11596         uint32_t mbox_tmo;
11597 
11598         rc = 0;
11599         mqe = &mboxq->u.mqe;
11600 
11601         /* Read the port's SLI4 Parameters port capabilities */
11602         lpfc_pc_sli4_params(mboxq);
11603         if (!phba->sli4_hba.intr_enable)
11604                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11605         else {
11606                 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
11607                 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11608         }
11609 
11610         if (unlikely(rc))
11611                 return 1;
11612 
11613         sli4_params = &phba->sli4_hba.pc_sli4_params;
11614         sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
11615         sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
11616         sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
11617         sli4_params->featurelevel_1 = bf_get(featurelevel_1,
11618                                              &mqe->un.sli4_params);
11619         sli4_params->featurelevel_2 = bf_get(featurelevel_2,
11620                                              &mqe->un.sli4_params);
11621         sli4_params->proto_types = mqe->un.sli4_params.word3;
11622         sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
11623         sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
11624         sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
11625         sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
11626         sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
11627         sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
11628         sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
11629         sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
11630         sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
11631         sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
11632         sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
11633         sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
11634         sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
11635         sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
11636         sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
11637         sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
11638         sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
11639         sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
11640         sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
11641         sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
11642 
11643         /* Make sure that sge_supp_len can be handled by the driver */
11644         if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
11645                 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
11646 
11647         return rc;
11648 }
11649 
11650 /**
11651  * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
11652  * @phba: Pointer to HBA context object.
11653  * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
11654  *
11655  * This function is called in the SLI4 code path to read the port's
11656  * sli4 capabilities.
11657  *
11658  * This function may be be called from any context that can block-wait
11659  * for the completion.  The expectation is that this routine is called
11660  * typically from probe_one or from the online routine.
11661  **/
11662 int
11663 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11664 {
11665         int rc;
11666         struct lpfc_mqe *mqe = &mboxq->u.mqe;
11667         struct lpfc_pc_sli4_params *sli4_params;
11668         uint32_t mbox_tmo;
11669         int length;
11670         bool exp_wqcq_pages = true;
11671         struct lpfc_sli4_parameters *mbx_sli4_parameters;
11672 
11673         /*
11674          * By default, the driver assumes the SLI4 port requires RPI
11675          * header postings.  The SLI4_PARAM response will correct this
11676          * assumption.
11677          */
11678         phba->sli4_hba.rpi_hdrs_in_use = 1;
11679 
11680         /* Read the port's SLI4 Config Parameters */
11681         length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
11682                   sizeof(struct lpfc_sli4_cfg_mhdr));
11683         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11684                          LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
11685                          length, LPFC_SLI4_MBX_EMBED);
11686         if (!phba->sli4_hba.intr_enable)
11687                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11688         else {
11689                 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
11690                 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11691         }
11692         if (unlikely(rc))
11693                 return rc;
11694         sli4_params = &phba->sli4_hba.pc_sli4_params;
11695         mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
11696         sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
11697         sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
11698         sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
11699         sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
11700                                              mbx_sli4_parameters);
11701         sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
11702                                              mbx_sli4_parameters);
11703         if (bf_get(cfg_phwq, mbx_sli4_parameters))
11704                 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
11705         else
11706                 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
11707         sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
11708         sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
11709         sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
11710         sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
11711         sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
11712         sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
11713         sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
11714         sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
11715         sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
11716         sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
11717         sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
11718         sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
11719                                             mbx_sli4_parameters);
11720         sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
11721         sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
11722                                            mbx_sli4_parameters);
11723         phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
11724         phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
11725 
11726         /* Check for Extended Pre-Registered SGL support */
11727         phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
11728 
11729         /* Check for firmware nvme support */
11730         rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
11731                      bf_get(cfg_xib, mbx_sli4_parameters));
11732 
11733         if (rc) {
11734                 /* Save this to indicate the Firmware supports NVME */
11735                 sli4_params->nvme = 1;
11736 
11737                 /* Firmware NVME support, check driver FC4 NVME support */
11738                 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
11739                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
11740                                         "6133 Disabling NVME support: "
11741                                         "FC4 type not supported: x%x\n",
11742                                         phba->cfg_enable_fc4_type);
11743                         goto fcponly;
11744                 }
11745         } else {
11746                 /* No firmware NVME support, check driver FC4 NVME support */
11747                 sli4_params->nvme = 0;
11748                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11749                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
11750                                         "6101 Disabling NVME support: Not "
11751                                         "supported by firmware (%d %d) x%x\n",
11752                                         bf_get(cfg_nvme, mbx_sli4_parameters),
11753                                         bf_get(cfg_xib, mbx_sli4_parameters),
11754                                         phba->cfg_enable_fc4_type);
11755 fcponly:
11756                         phba->nvme_support = 0;
11757                         phba->nvmet_support = 0;
11758                         phba->cfg_nvmet_mrq = 0;
11759                         phba->cfg_nvme_seg_cnt = 0;
11760 
11761                         /* If no FC4 type support, move to just SCSI support */
11762                         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
11763                                 return -ENODEV;
11764                         phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
11765                 }
11766         }
11767 
11768         /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
11769          * accommodate 512K and 1M IOs in a single nvme buf and supply
11770          * enough NVME LS iocb buffers for larger connectivity counts.
11771          */
11772         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11773                 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
11774                 phba->cfg_iocb_cnt = 5;
11775         }
11776 
11777         /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
11778         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
11779             LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
11780                 phba->cfg_enable_pbde = 0;
11781 
11782         /*
11783          * To support Suppress Response feature we must satisfy 3 conditions.
11784          * lpfc_suppress_rsp module parameter must be set (default).
11785          * In SLI4-Parameters Descriptor:
11786          * Extended Inline Buffers (XIB) must be supported.
11787          * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
11788          * (double negative).
11789          */
11790         if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
11791             !(bf_get(cfg_nosr, mbx_sli4_parameters)))
11792                 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
11793         else
11794                 phba->cfg_suppress_rsp = 0;
11795 
11796         if (bf_get(cfg_eqdr, mbx_sli4_parameters))
11797                 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
11798 
11799         /* Make sure that sge_supp_len can be handled by the driver */
11800         if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
11801                 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
11802 
11803         /*
11804          * Check whether the adapter supports an embedded copy of the
11805          * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
11806          * to use this option, 128-byte WQEs must be used.
11807          */
11808         if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
11809                 phba->fcp_embed_io = 1;
11810         else
11811                 phba->fcp_embed_io = 0;
11812 
11813         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
11814                         "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
11815                         bf_get(cfg_xib, mbx_sli4_parameters),
11816                         phba->cfg_enable_pbde,
11817                         phba->fcp_embed_io, phba->nvme_support,
11818                         phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
11819 
11820         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
11821             LPFC_SLI_INTF_IF_TYPE_2) &&
11822             (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
11823                  LPFC_SLI_INTF_FAMILY_LNCR_A0))
11824                 exp_wqcq_pages = false;
11825 
11826         if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
11827             (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
11828             exp_wqcq_pages &&
11829             (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
11830                 phba->enab_exp_wqcq_pages = 1;
11831         else
11832                 phba->enab_exp_wqcq_pages = 0;
11833         /*
11834          * Check if the SLI port supports MDS Diagnostics
11835          */
11836         if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
11837                 phba->mds_diags_support = 1;
11838         else
11839                 phba->mds_diags_support = 0;
11840 
11841         /*
11842          * Check if the SLI port supports NSLER
11843          */
11844         if (bf_get(cfg_nsler, mbx_sli4_parameters))
11845                 phba->nsler = 1;
11846         else
11847                 phba->nsler = 0;
11848 
11849         return 0;
11850 }
11851 
11852 /**
11853  * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
11854  * @pdev: pointer to PCI device
11855  * @pid: pointer to PCI device identifier
11856  *
11857  * This routine is to be called to attach a device with SLI-3 interface spec
11858  * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
11859  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
11860  * information of the device and driver to see if the driver state that it can
11861  * support this kind of device. If the match is successful, the driver core
11862  * invokes this routine. If this routine determines it can claim the HBA, it
11863  * does all the initialization that it needs to do to handle the HBA properly.
11864  *
11865  * Return code
11866  *      0 - driver can claim the device
11867  *      negative value - driver can not claim the device
11868  **/
11869 static int
11870 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
11871 {
11872         struct lpfc_hba   *phba;
11873         struct lpfc_vport *vport = NULL;
11874         struct Scsi_Host  *shost = NULL;
11875         int error;
11876         uint32_t cfg_mode, intr_mode;
11877 
11878         /* Allocate memory for HBA structure */
11879         phba = lpfc_hba_alloc(pdev);
11880         if (!phba)
11881                 return -ENOMEM;
11882 
11883         /* Perform generic PCI device enabling operation */
11884         error = lpfc_enable_pci_dev(phba);
11885         if (error)
11886                 goto out_free_phba;
11887 
11888         /* Set up SLI API function jump table for PCI-device group-0 HBAs */
11889         error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
11890         if (error)
11891                 goto out_disable_pci_dev;
11892 
11893         /* Set up SLI-3 specific device PCI memory space */
11894         error = lpfc_sli_pci_mem_setup(phba);
11895         if (error) {
11896                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11897                                 "1402 Failed to set up pci memory space.\n");
11898                 goto out_disable_pci_dev;
11899         }
11900 
11901         /* Set up SLI-3 specific device driver resources */
11902         error = lpfc_sli_driver_resource_setup(phba);
11903         if (error) {
11904                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11905                                 "1404 Failed to set up driver resource.\n");
11906                 goto out_unset_pci_mem_s3;
11907         }
11908 
11909         /* Initialize and populate the iocb list per host */
11910 
11911         error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
11912         if (error) {
11913                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11914                                 "1405 Failed to initialize iocb list.\n");
11915                 goto out_unset_driver_resource_s3;
11916         }
11917 
11918         /* Set up common device driver resources */
11919         error = lpfc_setup_driver_resource_phase2(phba);
11920         if (error) {
11921                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11922                                 "1406 Failed to set up driver resource.\n");
11923                 goto out_free_iocb_list;
11924         }
11925 
11926         /* Get the default values for Model Name and Description */
11927         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
11928 
11929         /* Create SCSI host to the physical port */
11930         error = lpfc_create_shost(phba);
11931         if (error) {
11932                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11933                                 "1407 Failed to create scsi host.\n");
11934                 goto out_unset_driver_resource;
11935         }
11936 
11937         /* Configure sysfs attributes */
11938         vport = phba->pport;
11939         error = lpfc_alloc_sysfs_attr(vport);
11940         if (error) {
11941                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11942                                 "1476 Failed to allocate sysfs attr\n");
11943                 goto out_destroy_shost;
11944         }
11945 
11946         shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
11947         /* Now, trying to enable interrupt and bring up the device */
11948         cfg_mode = phba->cfg_use_msi;
11949         while (true) {
11950                 /* Put device to a known state before enabling interrupt */
11951                 lpfc_stop_port(phba);
11952                 /* Configure and enable interrupt */
11953                 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
11954                 if (intr_mode == LPFC_INTR_ERROR) {
11955                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11956                                         "0431 Failed to enable interrupt.\n");
11957                         error = -ENODEV;
11958                         goto out_free_sysfs_attr;
11959                 }
11960                 /* SLI-3 HBA setup */
11961                 if (lpfc_sli_hba_setup(phba)) {
11962                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11963                                         "1477 Failed to set up hba\n");
11964                         error = -ENODEV;
11965                         goto out_remove_device;
11966                 }
11967 
11968                 /* Wait 50ms for the interrupts of previous mailbox commands */
11969                 msleep(50);
11970                 /* Check active interrupts on message signaled interrupts */
11971                 if (intr_mode == 0 ||
11972                     phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
11973                         /* Log the current active interrupt mode */
11974                         phba->intr_mode = intr_mode;
11975                         lpfc_log_intr_mode(phba, intr_mode);
11976                         break;
11977                 } else {
11978                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11979                                         "0447 Configure interrupt mode (%d) "
11980                                         "failed active interrupt test.\n",
11981                                         intr_mode);
11982                         /* Disable the current interrupt mode */
11983                         lpfc_sli_disable_intr(phba);
11984                         /* Try next level of interrupt mode */
11985                         cfg_mode = --intr_mode;
11986                 }
11987         }
11988 
11989         /* Perform post initialization setup */
11990         lpfc_post_init_setup(phba);
11991 
11992         /* Check if there are static vports to be created. */
11993         lpfc_create_static_vport(phba);
11994 
11995         return 0;
11996 
11997 out_remove_device:
11998         lpfc_unset_hba(phba);
11999 out_free_sysfs_attr:
12000         lpfc_free_sysfs_attr(vport);
12001 out_destroy_shost:
12002         lpfc_destroy_shost(phba);
12003 out_unset_driver_resource:
12004         lpfc_unset_driver_resource_phase2(phba);
12005 out_free_iocb_list:
12006         lpfc_free_iocb_list(phba);
12007 out_unset_driver_resource_s3:
12008         lpfc_sli_driver_resource_unset(phba);
12009 out_unset_pci_mem_s3:
12010         lpfc_sli_pci_mem_unset(phba);
12011 out_disable_pci_dev:
12012         lpfc_disable_pci_dev(phba);
12013         if (shost)
12014                 scsi_host_put(shost);
12015 out_free_phba:
12016         lpfc_hba_free(phba);
12017         return error;
12018 }
12019 
12020 /**
12021  * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
12022  * @pdev: pointer to PCI device
12023  *
12024  * This routine is to be called to disattach a device with SLI-3 interface
12025  * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
12026  * removed from PCI bus, it performs all the necessary cleanup for the HBA
12027  * device to be removed from the PCI subsystem properly.
12028  **/
12029 static void
12030 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
12031 {
12032         struct Scsi_Host  *shost = pci_get_drvdata(pdev);
12033         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
12034         struct lpfc_vport **vports;
12035         struct lpfc_hba   *phba = vport->phba;
12036         int i;
12037 
12038         spin_lock_irq(&phba->hbalock);
12039         vport->load_flag |= FC_UNLOADING;
12040         spin_unlock_irq(&phba->hbalock);
12041 
12042         lpfc_free_sysfs_attr(vport);
12043 
12044         /* Release all the vports against this physical port */
12045         vports = lpfc_create_vport_work_array(phba);
12046         if (vports != NULL)
12047                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12048                         if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12049                                 continue;
12050                         fc_vport_terminate(vports[i]->fc_vport);
12051                 }
12052         lpfc_destroy_vport_work_array(phba, vports);
12053 
12054         /* Remove FC host and then SCSI host with the physical port */
12055         fc_remove_host(shost);
12056         scsi_remove_host(shost);
12057 
12058         lpfc_cleanup(vport);
12059 
12060         /*
12061          * Bring down the SLI Layer. This step disable all interrupts,
12062          * clears the rings, discards all mailbox commands, and resets
12063          * the HBA.
12064          */
12065 
12066         /* HBA interrupt will be disabled after this call */
12067         lpfc_sli_hba_down(phba);
12068         /* Stop kthread signal shall trigger work_done one more time */
12069         kthread_stop(phba->worker_thread);
12070         /* Final cleanup of txcmplq and reset the HBA */
12071         lpfc_sli_brdrestart(phba);
12072 
12073         kfree(phba->vpi_bmask);
12074         kfree(phba->vpi_ids);
12075 
12076         lpfc_stop_hba_timers(phba);
12077         spin_lock_irq(&phba->port_list_lock);
12078         list_del_init(&vport->listentry);
12079         spin_unlock_irq(&phba->port_list_lock);
12080 
12081         lpfc_debugfs_terminate(vport);
12082 
12083         /* Disable SR-IOV if enabled */
12084         if (phba->cfg_sriov_nr_virtfn)
12085                 pci_disable_sriov(pdev);
12086 
12087         /* Disable interrupt */
12088         lpfc_sli_disable_intr(phba);
12089 
12090         scsi_host_put(shost);
12091 
12092         /*
12093          * Call scsi_free before mem_free since scsi bufs are released to their
12094          * corresponding pools here.
12095          */
12096         lpfc_scsi_free(phba);
12097         lpfc_free_iocb_list(phba);
12098 
12099         lpfc_mem_free_all(phba);
12100 
12101         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
12102                           phba->hbqslimp.virt, phba->hbqslimp.phys);
12103 
12104         /* Free resources associated with SLI2 interface */
12105         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
12106                           phba->slim2p.virt, phba->slim2p.phys);
12107 
12108         /* unmap adapter SLIM and Control Registers */
12109         iounmap(phba->ctrl_regs_memmap_p);
12110         iounmap(phba->slim_memmap_p);
12111 
12112         lpfc_hba_free(phba);
12113 
12114         pci_release_mem_regions(pdev);
12115         pci_disable_device(pdev);
12116 }
12117 
12118 /**
12119  * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
12120  * @pdev: pointer to PCI device
12121  * @msg: power management message
12122  *
12123  * This routine is to be called from the kernel's PCI subsystem to support
12124  * system Power Management (PM) to device with SLI-3 interface spec. When
12125  * PM invokes this method, it quiesces the device by stopping the driver's
12126  * worker thread for the device, turning off device's interrupt and DMA,
12127  * and bring the device offline. Note that as the driver implements the
12128  * minimum PM requirements to a power-aware driver's PM support for the
12129  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
12130  * to the suspend() method call will be treated as SUSPEND and the driver will
12131  * fully reinitialize its device during resume() method call, the driver will
12132  * set device to PCI_D3hot state in PCI config space instead of setting it
12133  * according to the @msg provided by the PM.
12134  *
12135  * Return code
12136  *      0 - driver suspended the device
12137  *      Error otherwise
12138  **/
12139 static int
12140 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
12141 {
12142         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12143         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12144 
12145         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12146                         "0473 PCI device Power Management suspend.\n");
12147 
12148         /* Bring down the device */
12149         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12150         lpfc_offline(phba);
12151         kthread_stop(phba->worker_thread);
12152 
12153         /* Disable interrupt from device */
12154         lpfc_sli_disable_intr(phba);
12155 
12156         /* Save device state to PCI config space */
12157         pci_save_state(pdev);
12158         pci_set_power_state(pdev, PCI_D3hot);
12159 
12160         return 0;
12161 }
12162 
12163 /**
12164  * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
12165  * @pdev: pointer to PCI device
12166  *
12167  * This routine is to be called from the kernel's PCI subsystem to support
12168  * system Power Management (PM) to device with SLI-3 interface spec. When PM
12169  * invokes this method, it restores the device's PCI config space state and
12170  * fully reinitializes the device and brings it online. Note that as the
12171  * driver implements the minimum PM requirements to a power-aware driver's
12172  * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
12173  * FREEZE) to the suspend() method call will be treated as SUSPEND and the
12174  * driver will fully reinitialize its device during resume() method call,
12175  * the device will be set to PCI_D0 directly in PCI config space before
12176  * restoring the state.
12177  *
12178  * Return code
12179  *      0 - driver suspended the device
12180  *      Error otherwise
12181  **/
12182 static int
12183 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
12184 {
12185         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12186         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12187         uint32_t intr_mode;
12188         int error;
12189 
12190         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12191                         "0452 PCI device Power Management resume.\n");
12192 
12193         /* Restore device state from PCI config space */
12194         pci_set_power_state(pdev, PCI_D0);
12195         pci_restore_state(pdev);
12196 
12197         /*
12198          * As the new kernel behavior of pci_restore_state() API call clears
12199          * device saved_state flag, need to save the restored state again.
12200          */
12201         pci_save_state(pdev);
12202 
12203         if (pdev->is_busmaster)
12204                 pci_set_master(pdev);
12205 
12206         /* Startup the kernel thread for this host adapter. */
12207         phba->worker_thread = kthread_run(lpfc_do_work, phba,
12208                                         "lpfc_worker_%d", phba->brd_no);
12209         if (IS_ERR(phba->worker_thread)) {
12210                 error = PTR_ERR(phba->worker_thread);
12211                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12212                                 "0434 PM resume failed to start worker "
12213                                 "thread: error=x%x.\n", error);
12214                 return error;
12215         }
12216 
12217         /* Configure and enable interrupt */
12218         intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12219         if (intr_mode == LPFC_INTR_ERROR) {
12220                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12221                                 "0430 PM resume Failed to enable interrupt\n");
12222                 return -EIO;
12223         } else
12224                 phba->intr_mode = intr_mode;
12225 
12226         /* Restart HBA and bring it online */
12227         lpfc_sli_brdrestart(phba);
12228         lpfc_online(phba);
12229 
12230         /* Log the current active interrupt mode */
12231         lpfc_log_intr_mode(phba, phba->intr_mode);
12232 
12233         return 0;
12234 }
12235 
12236 /**
12237  * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
12238  * @phba: pointer to lpfc hba data structure.
12239  *
12240  * This routine is called to prepare the SLI3 device for PCI slot recover. It
12241  * aborts all the outstanding SCSI I/Os to the pci device.
12242  **/
12243 static void
12244 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
12245 {
12246         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12247                         "2723 PCI channel I/O abort preparing for recovery\n");
12248 
12249         /*
12250          * There may be errored I/Os through HBA, abort all I/Os on txcmplq
12251          * and let the SCSI mid-layer to retry them to recover.
12252          */
12253         lpfc_sli_abort_fcp_rings(phba);
12254 }
12255 
12256 /**
12257  * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
12258  * @phba: pointer to lpfc hba data structure.
12259  *
12260  * This routine is called to prepare the SLI3 device for PCI slot reset. It
12261  * disables the device interrupt and pci device, and aborts the internal FCP
12262  * pending I/Os.
12263  **/
12264 static void
12265 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12266 {
12267         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12268                         "2710 PCI channel disable preparing for reset\n");
12269 
12270         /* Block any management I/Os to the device */
12271         lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
12272 
12273         /* Block all SCSI devices' I/Os on the host */
12274         lpfc_scsi_dev_block(phba);
12275 
12276         /* Flush all driver's outstanding SCSI I/Os as we are to reset */
12277         lpfc_sli_flush_io_rings(phba);
12278 
12279         /* stop all timers */
12280         lpfc_stop_hba_timers(phba);
12281 
12282         /* Disable interrupt and pci device */
12283         lpfc_sli_disable_intr(phba);
12284         pci_disable_device(phba->pcidev);
12285 }
12286 
12287 /**
12288  * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
12289  * @phba: pointer to lpfc hba data structure.
12290  *
12291  * This routine is called to prepare the SLI3 device for PCI slot permanently
12292  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
12293  * pending I/Os.
12294  **/
12295 static void
12296 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12297 {
12298         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12299                         "2711 PCI channel permanent disable for failure\n");
12300         /* Block all SCSI devices' I/Os on the host */
12301         lpfc_scsi_dev_block(phba);
12302 
12303         /* stop all timers */
12304         lpfc_stop_hba_timers(phba);
12305 
12306         /* Clean up all driver's outstanding SCSI I/Os */
12307         lpfc_sli_flush_io_rings(phba);
12308 }
12309 
12310 /**
12311  * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
12312  * @pdev: pointer to PCI device.
12313  * @state: the current PCI connection state.
12314  *
12315  * This routine is called from the PCI subsystem for I/O error handling to
12316  * device with SLI-3 interface spec. This function is called by the PCI
12317  * subsystem after a PCI bus error affecting this device has been detected.
12318  * When this function is invoked, it will need to stop all the I/Os and
12319  * interrupt(s) to the device. Once that is done, it will return
12320  * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
12321  * as desired.
12322  *
12323  * Return codes
12324  *      PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
12325  *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
12326  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12327  **/
12328 static pci_ers_result_t
12329 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
12330 {
12331         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12332         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12333 
12334         switch (state) {
12335         case pci_channel_io_normal:
12336                 /* Non-fatal error, prepare for recovery */
12337                 lpfc_sli_prep_dev_for_recover(phba);
12338                 return PCI_ERS_RESULT_CAN_RECOVER;
12339         case pci_channel_io_frozen:
12340                 /* Fatal error, prepare for slot reset */
12341                 lpfc_sli_prep_dev_for_reset(phba);
12342                 return PCI_ERS_RESULT_NEED_RESET;
12343         case pci_channel_io_perm_failure:
12344                 /* Permanent failure, prepare for device down */
12345                 lpfc_sli_prep_dev_for_perm_failure(phba);
12346                 return PCI_ERS_RESULT_DISCONNECT;
12347         default:
12348                 /* Unknown state, prepare and request slot reset */
12349                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12350                                 "0472 Unknown PCI error state: x%x\n", state);
12351                 lpfc_sli_prep_dev_for_reset(phba);
12352                 return PCI_ERS_RESULT_NEED_RESET;
12353         }
12354 }
12355 
12356 /**
12357  * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
12358  * @pdev: pointer to PCI device.
12359  *
12360  * This routine is called from the PCI subsystem for error handling to
12361  * device with SLI-3 interface spec. This is called after PCI bus has been
12362  * reset to restart the PCI card from scratch, as if from a cold-boot.
12363  * During the PCI subsystem error recovery, after driver returns
12364  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
12365  * recovery and then call this routine before calling the .resume method
12366  * to recover the device. This function will initialize the HBA device,
12367  * enable the interrupt, but it will just put the HBA to offline state
12368  * without passing any I/O traffic.
12369  *
12370  * Return codes
12371  *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
12372  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12373  */
12374 static pci_ers_result_t
12375 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
12376 {
12377         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12378         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12379         struct lpfc_sli *psli = &phba->sli;
12380         uint32_t intr_mode;
12381 
12382         dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
12383         if (pci_enable_device_mem(pdev)) {
12384                 printk(KERN_ERR "lpfc: Cannot re-enable "
12385                         "PCI device after reset.\n");
12386                 return PCI_ERS_RESULT_DISCONNECT;
12387         }
12388 
12389         pci_restore_state(pdev);
12390 
12391         /*
12392          * As the new kernel behavior of pci_restore_state() API call clears
12393          * device saved_state flag, need to save the restored state again.
12394          */
12395         pci_save_state(pdev);
12396 
12397         if (pdev->is_busmaster)
12398                 pci_set_master(pdev);
12399 
12400         spin_lock_irq(&phba->hbalock);
12401         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
12402         spin_unlock_irq(&phba->hbalock);
12403 
12404         /* Configure and enable interrupt */
12405         intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12406         if (intr_mode == LPFC_INTR_ERROR) {
12407                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12408                                 "0427 Cannot re-enable interrupt after "
12409                                 "slot reset.\n");
12410                 return PCI_ERS_RESULT_DISCONNECT;
12411         } else
12412                 phba->intr_mode = intr_mode;
12413 
12414         /* Take device offline, it will perform cleanup */
12415         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12416         lpfc_offline(phba);
12417         lpfc_sli_brdrestart(phba);
12418 
12419         /* Log the current active interrupt mode */
12420         lpfc_log_intr_mode(phba, phba->intr_mode);
12421 
12422         return PCI_ERS_RESULT_RECOVERED;
12423 }
12424 
12425 /**
12426  * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
12427  * @pdev: pointer to PCI device
12428  *
12429  * This routine is called from the PCI subsystem for error handling to device
12430  * with SLI-3 interface spec. It is called when kernel error recovery tells
12431  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
12432  * error recovery. After this call, traffic can start to flow from this device
12433  * again.
12434  */
12435 static void
12436 lpfc_io_resume_s3(struct pci_dev *pdev)
12437 {
12438         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12439         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12440 
12441         /* Bring device online, it will be no-op for non-fatal error resume */
12442         lpfc_online(phba);
12443 }
12444 
12445 /**
12446  * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
12447  * @phba: pointer to lpfc hba data structure.
12448  *
12449  * returns the number of ELS/CT IOCBs to reserve
12450  **/
12451 int
12452 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
12453 {
12454         int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
12455 
12456         if (phba->sli_rev == LPFC_SLI_REV4) {
12457                 if (max_xri <= 100)
12458                         return 10;
12459                 else if (max_xri <= 256)
12460                         return 25;
12461                 else if (max_xri <= 512)
12462                         return 50;
12463                 else if (max_xri <= 1024)
12464                         return 100;
12465                 else if (max_xri <= 1536)
12466                         return 150;
12467                 else if (max_xri <= 2048)
12468                         return 200;
12469                 else
12470                         return 250;
12471         } else
12472                 return 0;
12473 }
12474 
12475 /**
12476  * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
12477  * @phba: pointer to lpfc hba data structure.
12478  *
12479  * returns the number of ELS/CT + NVMET IOCBs to reserve
12480  **/
12481 int
12482 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
12483 {
12484         int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
12485 
12486         if (phba->nvmet_support)
12487                 max_xri += LPFC_NVMET_BUF_POST;
12488         return max_xri;
12489 }
12490 
12491 
12492 static void
12493 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
12494         uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
12495         const struct firmware *fw)
12496 {
12497         if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
12498             (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
12499              magic_number != MAGIC_NUMER_G6) ||
12500             (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
12501              magic_number != MAGIC_NUMER_G7))
12502                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12503                         "3030 This firmware version is not supported on "
12504                         "this HBA model. Device:%x Magic:%x Type:%x "
12505                         "ID:%x Size %d %zd\n",
12506                         phba->pcidev->device, magic_number, ftype, fid,
12507                         fsize, fw->size);
12508         else
12509                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12510                         "3022 FW Download failed. Device:%x Magic:%x Type:%x "
12511                         "ID:%x Size %d %zd\n",
12512                         phba->pcidev->device, magic_number, ftype, fid,
12513                         fsize, fw->size);
12514 }
12515 
12516 
12517 /**
12518  * lpfc_write_firmware - attempt to write a firmware image to the port
12519  * @fw: pointer to firmware image returned from request_firmware.
12520  * @phba: pointer to lpfc hba data structure.
12521  *
12522  **/
12523 static void
12524 lpfc_write_firmware(const struct firmware *fw, void *context)
12525 {
12526         struct lpfc_hba *phba = (struct lpfc_hba *)context;
12527         char fwrev[FW_REV_STR_SIZE];
12528         struct lpfc_grp_hdr *image;
12529         struct list_head dma_buffer_list;
12530         int i, rc = 0;
12531         struct lpfc_dmabuf *dmabuf, *next;
12532         uint32_t offset = 0, temp_offset = 0;
12533         uint32_t magic_number, ftype, fid, fsize;
12534 
12535         /* It can be null in no-wait mode, sanity check */
12536         if (!fw) {
12537                 rc = -ENXIO;
12538                 goto out;
12539         }
12540         image = (struct lpfc_grp_hdr *)fw->data;
12541 
12542         magic_number = be32_to_cpu(image->magic_number);
12543         ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
12544         fid = bf_get_be32(lpfc_grp_hdr_id, image);
12545         fsize = be32_to_cpu(image->size);
12546 
12547         INIT_LIST_HEAD(&dma_buffer_list);
12548         lpfc_decode_firmware_rev(phba, fwrev, 1);
12549         if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
12550                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12551                                 "3023 Updating Firmware, Current Version:%s "
12552                                 "New Version:%s\n",
12553                                 fwrev, image->revision);
12554                 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
12555                         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
12556                                          GFP_KERNEL);
12557                         if (!dmabuf) {
12558                                 rc = -ENOMEM;
12559                                 goto release_out;
12560                         }
12561                         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
12562                                                           SLI4_PAGE_SIZE,
12563                                                           &dmabuf->phys,
12564                                                           GFP_KERNEL);
12565                         if (!dmabuf->virt) {
12566                                 kfree(dmabuf);
12567                                 rc = -ENOMEM;
12568                                 goto release_out;
12569                         }
12570                         list_add_tail(&dmabuf->list, &dma_buffer_list);
12571                 }
12572                 while (offset < fw->size) {
12573                         temp_offset = offset;
12574                         list_for_each_entry(dmabuf, &dma_buffer_list, list) {
12575                                 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
12576                                         memcpy(dmabuf->virt,
12577                                                fw->data + temp_offset,
12578                                                fw->size - temp_offset);
12579                                         temp_offset = fw->size;
12580                                         break;
12581                                 }
12582                                 memcpy(dmabuf->virt, fw->data + temp_offset,
12583                                        SLI4_PAGE_SIZE);
12584                                 temp_offset += SLI4_PAGE_SIZE;
12585                         }
12586                         rc = lpfc_wr_object(phba, &dma_buffer_list,
12587                                     (fw->size - offset), &offset);
12588                         if (rc) {
12589                                 lpfc_log_write_firmware_error(phba, offset,
12590                                         magic_number, ftype, fid, fsize, fw);
12591                                 goto release_out;
12592                         }
12593                 }
12594                 rc = offset;
12595         } else
12596                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12597                                 "3029 Skipped Firmware update, Current "
12598                                 "Version:%s New Version:%s\n",
12599                                 fwrev, image->revision);
12600 
12601 release_out:
12602         list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
12603                 list_del(&dmabuf->list);
12604                 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
12605                                   dmabuf->virt, dmabuf->phys);
12606                 kfree(dmabuf);
12607         }
12608         release_firmware(fw);
12609 out:
12610         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12611                         "3024 Firmware update done: %d.\n", rc);
12612         return;
12613 }
12614 
12615 /**
12616  * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
12617  * @phba: pointer to lpfc hba data structure.
12618  *
12619  * This routine is called to perform Linux generic firmware upgrade on device
12620  * that supports such feature.
12621  **/
12622 int
12623 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
12624 {
12625         uint8_t file_name[ELX_MODEL_NAME_SIZE];
12626         int ret;
12627         const struct firmware *fw;
12628 
12629         /* Only supported on SLI4 interface type 2 for now */
12630         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
12631             LPFC_SLI_INTF_IF_TYPE_2)
12632                 return -EPERM;
12633 
12634         snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
12635 
12636         if (fw_upgrade == INT_FW_UPGRADE) {
12637                 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
12638                                         file_name, &phba->pcidev->dev,
12639                                         GFP_KERNEL, (void *)phba,
12640                                         lpfc_write_firmware);
12641         } else if (fw_upgrade == RUN_FW_UPGRADE) {
12642                 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
12643                 if (!ret)
12644                         lpfc_write_firmware(fw, (void *)phba);
12645         } else {
12646                 ret = -EINVAL;
12647         }
12648 
12649         return ret;
12650 }
12651 
12652 /**
12653  * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
12654  * @pdev: pointer to PCI device
12655  * @pid: pointer to PCI device identifier
12656  *
12657  * This routine is called from the kernel's PCI subsystem to device with
12658  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
12659  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
12660  * information of the device and driver to see if the driver state that it
12661  * can support this kind of device. If the match is successful, the driver
12662  * core invokes this routine. If this routine determines it can claim the HBA,
12663  * it does all the initialization that it needs to do to handle the HBA
12664  * properly.
12665  *
12666  * Return code
12667  *      0 - driver can claim the device
12668  *      negative value - driver can not claim the device
12669  **/
12670 static int
12671 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
12672 {
12673         struct lpfc_hba   *phba;
12674         struct lpfc_vport *vport = NULL;
12675         struct Scsi_Host  *shost = NULL;
12676         int error;
12677         uint32_t cfg_mode, intr_mode;
12678 
12679         /* Allocate memory for HBA structure */
12680         phba = lpfc_hba_alloc(pdev);
12681         if (!phba)
12682                 return -ENOMEM;
12683 
12684         /* Perform generic PCI device enabling operation */
12685         error = lpfc_enable_pci_dev(phba);
12686         if (error)
12687                 goto out_free_phba;
12688 
12689         /* Set up SLI API function jump table for PCI-device group-1 HBAs */
12690         error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
12691         if (error)
12692                 goto out_disable_pci_dev;
12693 
12694         /* Set up SLI-4 specific device PCI memory space */
12695         error = lpfc_sli4_pci_mem_setup(phba);
12696         if (error) {
12697                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12698                                 "1410 Failed to set up pci memory space.\n");
12699                 goto out_disable_pci_dev;
12700         }
12701 
12702         /* Set up SLI-4 Specific device driver resources */
12703         error = lpfc_sli4_driver_resource_setup(phba);
12704         if (error) {
12705                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12706                                 "1412 Failed to set up driver resource.\n");
12707                 goto out_unset_pci_mem_s4;
12708         }
12709 
12710         INIT_LIST_HEAD(&phba->active_rrq_list);
12711         INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
12712 
12713         /* Set up common device driver resources */
12714         error = lpfc_setup_driver_resource_phase2(phba);
12715         if (error) {
12716                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12717                                 "1414 Failed to set up driver resource.\n");
12718                 goto out_unset_driver_resource_s4;
12719         }
12720 
12721         /* Get the default values for Model Name and Description */
12722         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
12723 
12724         /* Now, trying to enable interrupt and bring up the device */
12725         cfg_mode = phba->cfg_use_msi;
12726 
12727         /* Put device to a known state before enabling interrupt */
12728         phba->pport = NULL;
12729         lpfc_stop_port(phba);
12730 
12731         /* Configure and enable interrupt */
12732         intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
12733         if (intr_mode == LPFC_INTR_ERROR) {
12734                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12735                                 "0426 Failed to enable interrupt.\n");
12736                 error = -ENODEV;
12737                 goto out_unset_driver_resource;
12738         }
12739         /* Default to single EQ for non-MSI-X */
12740         if (phba->intr_type != MSIX) {
12741                 phba->cfg_irq_chann = 1;
12742                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12743                         if (phba->nvmet_support)
12744                                 phba->cfg_nvmet_mrq = 1;
12745                 }
12746         }
12747         lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
12748 
12749         /* Create SCSI host to the physical port */
12750         error = lpfc_create_shost(phba);
12751         if (error) {
12752                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12753                                 "1415 Failed to create scsi host.\n");
12754                 goto out_disable_intr;
12755         }
12756         vport = phba->pport;
12757         shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
12758 
12759         /* Configure sysfs attributes */
12760         error = lpfc_alloc_sysfs_attr(vport);
12761         if (error) {
12762                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12763                                 "1416 Failed to allocate sysfs attr\n");
12764                 goto out_destroy_shost;
12765         }
12766 
12767         /* Set up SLI-4 HBA */
12768         if (lpfc_sli4_hba_setup(phba)) {
12769                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12770                                 "1421 Failed to set up hba\n");
12771                 error = -ENODEV;
12772                 goto out_free_sysfs_attr;
12773         }
12774 
12775         /* Log the current active interrupt mode */
12776         phba->intr_mode = intr_mode;
12777         lpfc_log_intr_mode(phba, intr_mode);
12778 
12779         /* Perform post initialization setup */
12780         lpfc_post_init_setup(phba);
12781 
12782         /* NVME support in FW earlier in the driver load corrects the
12783          * FC4 type making a check for nvme_support unnecessary.
12784          */
12785         if (phba->nvmet_support == 0) {
12786                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12787                         /* Create NVME binding with nvme_fc_transport. This
12788                          * ensures the vport is initialized.  If the localport
12789                          * create fails, it should not unload the driver to
12790                          * support field issues.
12791                          */
12792                         error = lpfc_nvme_create_localport(vport);
12793                         if (error) {
12794                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12795                                                 "6004 NVME registration "
12796                                                 "failed, error x%x\n",
12797                                                 error);
12798                         }
12799                 }
12800         }
12801 
12802         /* check for firmware upgrade or downgrade */
12803         if (phba->cfg_request_firmware_upgrade)
12804                 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
12805 
12806         /* Check if there are static vports to be created. */
12807         lpfc_create_static_vport(phba);
12808 
12809         /* Enable RAS FW log support */
12810         lpfc_sli4_ras_setup(phba);
12811 
12812         INIT_LIST_HEAD(&phba->poll_list);
12813         cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
12814 
12815         return 0;
12816 
12817 out_free_sysfs_attr:
12818         lpfc_free_sysfs_attr(vport);
12819 out_destroy_shost:
12820         lpfc_destroy_shost(phba);
12821 out_disable_intr:
12822         lpfc_sli4_disable_intr(phba);
12823 out_unset_driver_resource:
12824         lpfc_unset_driver_resource_phase2(phba);
12825 out_unset_driver_resource_s4:
12826         lpfc_sli4_driver_resource_unset(phba);
12827 out_unset_pci_mem_s4:
12828         lpfc_sli4_pci_mem_unset(phba);
12829 out_disable_pci_dev:
12830         lpfc_disable_pci_dev(phba);
12831         if (shost)
12832                 scsi_host_put(shost);
12833 out_free_phba:
12834         lpfc_hba_free(phba);
12835         return error;
12836 }
12837 
12838 /**
12839  * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
12840  * @pdev: pointer to PCI device
12841  *
12842  * This routine is called from the kernel's PCI subsystem to device with
12843  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
12844  * removed from PCI bus, it performs all the necessary cleanup for the HBA
12845  * device to be removed from the PCI subsystem properly.
12846  **/
12847 static void
12848 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
12849 {
12850         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12851         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
12852         struct lpfc_vport **vports;
12853         struct lpfc_hba *phba = vport->phba;
12854         int i;
12855 
12856         /* Mark the device unloading flag */
12857         spin_lock_irq(&phba->hbalock);
12858         vport->load_flag |= FC_UNLOADING;
12859         spin_unlock_irq(&phba->hbalock);
12860 
12861         /* Free the HBA sysfs attributes */
12862         lpfc_free_sysfs_attr(vport);
12863 
12864         /* Release all the vports against this physical port */
12865         vports = lpfc_create_vport_work_array(phba);
12866         if (vports != NULL)
12867                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12868                         if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12869                                 continue;
12870                         fc_vport_terminate(vports[i]->fc_vport);
12871                 }
12872         lpfc_destroy_vport_work_array(phba, vports);
12873 
12874         /* Remove FC host and then SCSI host with the physical port */
12875         fc_remove_host(shost);
12876         scsi_remove_host(shost);
12877 
12878         /* Perform ndlp cleanup on the physical port.  The nvme and nvmet
12879          * localports are destroyed after to cleanup all transport memory.
12880          */
12881         lpfc_cleanup(vport);
12882         lpfc_nvmet_destroy_targetport(phba);
12883         lpfc_nvme_destroy_localport(vport);
12884 
12885         /* De-allocate multi-XRI pools */
12886         if (phba->cfg_xri_rebalancing)
12887                 lpfc_destroy_multixri_pools(phba);
12888 
12889         /*
12890          * Bring down the SLI Layer. This step disables all interrupts,
12891          * clears the rings, discards all mailbox commands, and resets
12892          * the HBA FCoE function.
12893          */
12894         lpfc_debugfs_terminate(vport);
12895 
12896         lpfc_stop_hba_timers(phba);
12897         spin_lock_irq(&phba->port_list_lock);
12898         list_del_init(&vport->listentry);
12899         spin_unlock_irq(&phba->port_list_lock);
12900 
12901         /* Perform scsi free before driver resource_unset since scsi
12902          * buffers are released to their corresponding pools here.
12903          */
12904         lpfc_io_free(phba);
12905         lpfc_free_iocb_list(phba);
12906         lpfc_sli4_hba_unset(phba);
12907 
12908         lpfc_unset_driver_resource_phase2(phba);
12909         lpfc_sli4_driver_resource_unset(phba);
12910 
12911         /* Unmap adapter Control and Doorbell registers */
12912         lpfc_sli4_pci_mem_unset(phba);
12913 
12914         /* Release PCI resources and disable device's PCI function */
12915         scsi_host_put(shost);
12916         lpfc_disable_pci_dev(phba);
12917 
12918         /* Finally, free the driver's device data structure */
12919         lpfc_hba_free(phba);
12920 
12921         return;
12922 }
12923 
12924 /**
12925  * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
12926  * @pdev: pointer to PCI device
12927  * @msg: power management message
12928  *
12929  * This routine is called from the kernel's PCI subsystem to support system
12930  * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
12931  * this method, it quiesces the device by stopping the driver's worker
12932  * thread for the device, turning off device's interrupt and DMA, and bring
12933  * the device offline. Note that as the driver implements the minimum PM
12934  * requirements to a power-aware driver's PM support for suspend/resume -- all
12935  * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
12936  * method call will be treated as SUSPEND and the driver will fully
12937  * reinitialize its device during resume() method call, the driver will set
12938  * device to PCI_D3hot state in PCI config space instead of setting it
12939  * according to the @msg provided by the PM.
12940  *
12941  * Return code
12942  *      0 - driver suspended the device
12943  *      Error otherwise
12944  **/
12945 static int
12946 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
12947 {
12948         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12949         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12950 
12951         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12952                         "2843 PCI device Power Management suspend.\n");
12953 
12954         /* Bring down the device */
12955         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12956         lpfc_offline(phba);
12957         kthread_stop(phba->worker_thread);
12958 
12959         /* Disable interrupt from device */
12960         lpfc_sli4_disable_intr(phba);
12961         lpfc_sli4_queue_destroy(phba);
12962 
12963         /* Save device state to PCI config space */
12964         pci_save_state(pdev);
12965         pci_set_power_state(pdev, PCI_D3hot);
12966 
12967         return 0;
12968 }
12969 
12970 /**
12971  * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
12972  * @pdev: pointer to PCI device
12973  *
12974  * This routine is called from the kernel's PCI subsystem to support system
12975  * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
12976  * this method, it restores the device's PCI config space state and fully
12977  * reinitializes the device and brings it online. Note that as the driver
12978  * implements the minimum PM requirements to a power-aware driver's PM for
12979  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
12980  * to the suspend() method call will be treated as SUSPEND and the driver
12981  * will fully reinitialize its device during resume() method call, the device
12982  * will be set to PCI_D0 directly in PCI config space before restoring the
12983  * state.
12984  *
12985  * Return code
12986  *      0 - driver suspended the device
12987  *      Error otherwise
12988  **/
12989 static int
12990 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
12991 {
12992         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12993         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12994         uint32_t intr_mode;
12995         int error;
12996 
12997         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12998                         "0292 PCI device Power Management resume.\n");
12999 
13000         /* Restore device state from PCI config space */
13001         pci_set_power_state(pdev, PCI_D0);
13002         pci_restore_state(pdev);
13003 
13004         /*
13005          * As the new kernel behavior of pci_restore_state() API call clears
13006          * device saved_state flag, need to save the restored state again.
13007          */
13008         pci_save_state(pdev);
13009 
13010         if (pdev->is_busmaster)
13011                 pci_set_master(pdev);
13012 
13013          /* Startup the kernel thread for this host adapter. */
13014         phba->worker_thread = kthread_run(lpfc_do_work, phba,
13015                                         "lpfc_worker_%d", phba->brd_no);
13016         if (IS_ERR(phba->worker_thread)) {
13017                 error = PTR_ERR(phba->worker_thread);
13018                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13019                                 "0293 PM resume failed to start worker "
13020                                 "thread: error=x%x.\n", error);
13021                 return error;
13022         }
13023 
13024         /* Configure and enable interrupt */
13025         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13026         if (intr_mode == LPFC_INTR_ERROR) {
13027                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13028                                 "0294 PM resume Failed to enable interrupt\n");
13029                 return -EIO;
13030         } else
13031                 phba->intr_mode = intr_mode;
13032 
13033         /* Restart HBA and bring it online */
13034         lpfc_sli_brdrestart(phba);
13035         lpfc_online(phba);
13036 
13037         /* Log the current active interrupt mode */
13038         lpfc_log_intr_mode(phba, phba->intr_mode);
13039 
13040         return 0;
13041 }
13042 
13043 /**
13044  * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
13045  * @phba: pointer to lpfc hba data structure.
13046  *
13047  * This routine is called to prepare the SLI4 device for PCI slot recover. It
13048  * aborts all the outstanding SCSI I/Os to the pci device.
13049  **/
13050 static void
13051 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
13052 {
13053         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13054                         "2828 PCI channel I/O abort preparing for recovery\n");
13055         /*
13056          * There may be errored I/Os through HBA, abort all I/Os on txcmplq
13057          * and let the SCSI mid-layer to retry them to recover.
13058          */
13059         lpfc_sli_abort_fcp_rings(phba);
13060 }
13061 
13062 /**
13063  * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
13064  * @phba: pointer to lpfc hba data structure.
13065  *
13066  * This routine is called to prepare the SLI4 device for PCI slot reset. It
13067  * disables the device interrupt and pci device, and aborts the internal FCP
13068  * pending I/Os.
13069  **/
13070 static void
13071 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
13072 {
13073         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13074                         "2826 PCI channel disable preparing for reset\n");
13075 
13076         /* Block any management I/Os to the device */
13077         lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
13078 
13079         /* Block all SCSI devices' I/Os on the host */
13080         lpfc_scsi_dev_block(phba);
13081 
13082         /* Flush all driver's outstanding I/Os as we are to reset */
13083         lpfc_sli_flush_io_rings(phba);
13084 
13085         /* stop all timers */
13086         lpfc_stop_hba_timers(phba);
13087 
13088         /* Disable interrupt and pci device */
13089         lpfc_sli4_disable_intr(phba);
13090         lpfc_sli4_queue_destroy(phba);
13091         pci_disable_device(phba->pcidev);
13092 }
13093 
13094 /**
13095  * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
13096  * @phba: pointer to lpfc hba data structure.
13097  *
13098  * This routine is called to prepare the SLI4 device for PCI slot permanently
13099  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
13100  * pending I/Os.
13101  **/
13102 static void
13103 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
13104 {
13105         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13106                         "2827 PCI channel permanent disable for failure\n");
13107 
13108         /* Block all SCSI devices' I/Os on the host */
13109         lpfc_scsi_dev_block(phba);
13110 
13111         /* stop all timers */
13112         lpfc_stop_hba_timers(phba);
13113 
13114         /* Clean up all driver's outstanding I/Os */
13115         lpfc_sli_flush_io_rings(phba);
13116 }
13117 
13118 /**
13119  * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
13120  * @pdev: pointer to PCI device.
13121  * @state: the current PCI connection state.
13122  *
13123  * This routine is called from the PCI subsystem for error handling to device
13124  * with SLI-4 interface spec. This function is called by the PCI subsystem
13125  * after a PCI bus error affecting this device has been detected. When this
13126  * function is invoked, it will need to stop all the I/Os and interrupt(s)
13127  * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
13128  * for the PCI subsystem to perform proper recovery as desired.
13129  *
13130  * Return codes
13131  *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13132  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13133  **/
13134 static pci_ers_result_t
13135 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
13136 {
13137         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13138         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13139 
13140         switch (state) {
13141         case pci_channel_io_normal:
13142                 /* Non-fatal error, prepare for recovery */
13143                 lpfc_sli4_prep_dev_for_recover(phba);
13144                 return PCI_ERS_RESULT_CAN_RECOVER;
13145         case pci_channel_io_frozen:
13146                 /* Fatal error, prepare for slot reset */
13147                 lpfc_sli4_prep_dev_for_reset(phba);
13148                 return PCI_ERS_RESULT_NEED_RESET;
13149         case pci_channel_io_perm_failure:
13150                 /* Permanent failure, prepare for device down */
13151                 lpfc_sli4_prep_dev_for_perm_failure(phba);
13152                 return PCI_ERS_RESULT_DISCONNECT;
13153         default:
13154                 /* Unknown state, prepare and request slot reset */
13155                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13156                                 "2825 Unknown PCI error state: x%x\n", state);
13157                 lpfc_sli4_prep_dev_for_reset(phba);
13158                 return PCI_ERS_RESULT_NEED_RESET;
13159         }
13160 }
13161 
13162 /**
13163  * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
13164  * @pdev: pointer to PCI device.
13165  *
13166  * This routine is called from the PCI subsystem for error handling to device
13167  * with SLI-4 interface spec. It is called after PCI bus has been reset to
13168  * restart the PCI card from scratch, as if from a cold-boot. During the
13169  * PCI subsystem error recovery, after the driver returns
13170  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
13171  * recovery and then call this routine before calling the .resume method to
13172  * recover the device. This function will initialize the HBA device, enable
13173  * the interrupt, but it will just put the HBA to offline state without
13174  * passing any I/O traffic.
13175  *
13176  * Return codes
13177  *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
13178  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13179  */
13180 static pci_ers_result_t
13181 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
13182 {
13183         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13184         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13185         struct lpfc_sli *psli = &phba->sli;
13186         uint32_t intr_mode;
13187 
13188         dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
13189         if (pci_enable_device_mem(pdev)) {
13190                 printk(KERN_ERR "lpfc: Cannot re-enable "
13191                         "PCI device after reset.\n");
13192                 return PCI_ERS_RESULT_DISCONNECT;
13193         }
13194 
13195         pci_restore_state(pdev);
13196 
13197         /*
13198          * As the new kernel behavior of pci_restore_state() API call clears
13199          * device saved_state flag, need to save the restored state again.
13200          */
13201         pci_save_state(pdev);
13202 
13203         if (pdev->is_busmaster)
13204                 pci_set_master(pdev);
13205 
13206         spin_lock_irq(&phba->hbalock);
13207         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13208         spin_unlock_irq(&phba->hbalock);
13209 
13210         /* Configure and enable interrupt */
13211         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13212         if (intr_mode == LPFC_INTR_ERROR) {
13213                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13214                                 "2824 Cannot re-enable interrupt after "
13215                                 "slot reset.\n");
13216                 return PCI_ERS_RESULT_DISCONNECT;
13217         } else
13218                 phba->intr_mode = intr_mode;
13219 
13220         /* Log the current active interrupt mode */
13221         lpfc_log_intr_mode(phba, phba->intr_mode);
13222 
13223         return PCI_ERS_RESULT_RECOVERED;
13224 }
13225 
13226 /**
13227  * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
13228  * @pdev: pointer to PCI device
13229  *
13230  * This routine is called from the PCI subsystem for error handling to device
13231  * with SLI-4 interface spec. It is called when kernel error recovery tells
13232  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
13233  * error recovery. After this call, traffic can start to flow from this device
13234  * again.
13235  **/
13236 static void
13237 lpfc_io_resume_s4(struct pci_dev *pdev)
13238 {
13239         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13240         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13241 
13242         /*
13243          * In case of slot reset, as function reset is performed through
13244          * mailbox command which needs DMA to be enabled, this operation
13245          * has to be moved to the io resume phase. Taking device offline
13246          * will perform the necessary cleanup.
13247          */
13248         if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
13249                 /* Perform device reset */
13250                 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13251                 lpfc_offline(phba);
13252                 lpfc_sli_brdrestart(phba);
13253                 /* Bring the device back online */
13254                 lpfc_online(phba);
13255         }
13256 }
13257 
13258 /**
13259  * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
13260  * @pdev: pointer to PCI device
13261  * @pid: pointer to PCI device identifier
13262  *
13263  * This routine is to be registered to the kernel's PCI subsystem. When an
13264  * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
13265  * at PCI device-specific information of the device and driver to see if the
13266  * driver state that it can support this kind of device. If the match is
13267  * successful, the driver core invokes this routine. This routine dispatches
13268  * the action to the proper SLI-3 or SLI-4 device probing routine, which will
13269  * do all the initialization that it needs to do to handle the HBA device
13270  * properly.
13271  *
13272  * Return code
13273  *      0 - driver can claim the device
13274  *      negative value - driver can not claim the device
13275  **/
13276 static int
13277 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
13278 {
13279         int rc;
13280         struct lpfc_sli_intf intf;
13281 
13282         if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
13283                 return -ENODEV;
13284 
13285         if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
13286             (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
13287                 rc = lpfc_pci_probe_one_s4(pdev, pid);
13288         else
13289                 rc = lpfc_pci_probe_one_s3(pdev, pid);
13290 
13291         return rc;
13292 }
13293 
13294 /**
13295  * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
13296  * @pdev: pointer to PCI device
13297  *
13298  * This routine is to be registered to the kernel's PCI subsystem. When an
13299  * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
13300  * This routine dispatches the action to the proper SLI-3 or SLI-4 device
13301  * remove routine, which will perform all the necessary cleanup for the
13302  * device to be removed from the PCI subsystem properly.
13303  **/
13304 static void
13305 lpfc_pci_remove_one(struct pci_dev *pdev)
13306 {
13307         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13308         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13309 
13310         switch (phba->pci_dev_grp) {
13311         case LPFC_PCI_DEV_LP:
13312                 lpfc_pci_remove_one_s3(pdev);
13313                 break;
13314         case LPFC_PCI_DEV_OC:
13315                 lpfc_pci_remove_one_s4(pdev);
13316                 break;
13317         default:
13318                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13319                                 "1424 Invalid PCI device group: 0x%x\n",
13320                                 phba->pci_dev_grp);
13321                 break;
13322         }
13323         return;
13324 }
13325 
13326 /**
13327  * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
13328  * @pdev: pointer to PCI device
13329  * @msg: power management message
13330  *
13331  * This routine is to be registered to the kernel's PCI subsystem to support
13332  * system Power Management (PM). When PM invokes this method, it dispatches
13333  * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
13334  * suspend the device.
13335  *
13336  * Return code
13337  *      0 - driver suspended the device
13338  *      Error otherwise
13339  **/
13340 static int
13341 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
13342 {
13343         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13344         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13345         int rc = -ENODEV;
13346 
13347         switch (phba->pci_dev_grp) {
13348         case LPFC_PCI_DEV_LP:
13349                 rc = lpfc_pci_suspend_one_s3(pdev, msg);
13350                 break;
13351         case LPFC_PCI_DEV_OC:
13352                 rc = lpfc_pci_suspend_one_s4(pdev, msg);
13353                 break;
13354         default:
13355                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13356                                 "1425 Invalid PCI device group: 0x%x\n",
13357                                 phba->pci_dev_grp);
13358                 break;
13359         }
13360         return rc;
13361 }
13362 
13363 /**
13364  * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
13365  * @pdev: pointer to PCI device
13366  *
13367  * This routine is to be registered to the kernel's PCI subsystem to support
13368  * system Power Management (PM). When PM invokes this method, it dispatches
13369  * the action to the proper SLI-3 or SLI-4 device resume routine, which will
13370  * resume the device.
13371  *
13372  * Return code
13373  *      0 - driver suspended the device
13374  *      Error otherwise
13375  **/
13376 static int
13377 lpfc_pci_resume_one(struct pci_dev *pdev)
13378 {
13379         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13380         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13381         int rc = -ENODEV;
13382 
13383         switch (phba->pci_dev_grp) {
13384         case LPFC_PCI_DEV_LP:
13385                 rc = lpfc_pci_resume_one_s3(pdev);
13386                 break;
13387         case LPFC_PCI_DEV_OC:
13388                 rc = lpfc_pci_resume_one_s4(pdev);
13389                 break;
13390         default:
13391                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13392                                 "1426 Invalid PCI device group: 0x%x\n",
13393                                 phba->pci_dev_grp);
13394                 break;
13395         }
13396         return rc;
13397 }
13398 
13399 /**
13400  * lpfc_io_error_detected - lpfc method for handling PCI I/O error
13401  * @pdev: pointer to PCI device.
13402  * @state: the current PCI connection state.
13403  *
13404  * This routine is registered to the PCI subsystem for error handling. This
13405  * function is called by the PCI subsystem after a PCI bus error affecting
13406  * this device has been detected. When this routine is invoked, it dispatches
13407  * the action to the proper SLI-3 or SLI-4 device error detected handling
13408  * routine, which will perform the proper error detected operation.
13409  *
13410  * Return codes
13411  *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13412  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13413  **/
13414 static pci_ers_result_t
13415 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13416 {
13417         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13418         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13419         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13420 
13421         switch (phba->pci_dev_grp) {
13422         case LPFC_PCI_DEV_LP:
13423                 rc = lpfc_io_error_detected_s3(pdev, state);
13424                 break;
13425         case LPFC_PCI_DEV_OC:
13426                 rc = lpfc_io_error_detected_s4(pdev, state);
13427                 break;
13428         default:
13429                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13430                                 "1427 Invalid PCI device group: 0x%x\n",
13431                                 phba->pci_dev_grp);
13432                 break;
13433         }
13434         return rc;
13435 }
13436 
13437 /**
13438  * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
13439  * @pdev: pointer to PCI device.
13440  *
13441  * This routine is registered to the PCI subsystem for error handling. This
13442  * function is called after PCI bus has been reset to restart the PCI card
13443  * from scratch, as if from a cold-boot. When this routine is invoked, it
13444  * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
13445  * routine, which will perform the proper device reset.
13446  *
13447  * Return codes
13448  *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
13449  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13450  **/
13451 static pci_ers_result_t
13452 lpfc_io_slot_reset(struct pci_dev *pdev)
13453 {
13454         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13455         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13456         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13457 
13458         switch (phba->pci_dev_grp) {
13459         case LPFC_PCI_DEV_LP:
13460                 rc = lpfc_io_slot_reset_s3(pdev);
13461                 break;
13462         case LPFC_PCI_DEV_OC:
13463                 rc = lpfc_io_slot_reset_s4(pdev);
13464                 break;
13465         default:
13466                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13467                                 "1428 Invalid PCI device group: 0x%x\n",
13468                                 phba->pci_dev_grp);
13469                 break;
13470         }
13471         return rc;
13472 }
13473 
13474 /**
13475  * lpfc_io_resume - lpfc method for resuming PCI I/O operation
13476  * @pdev: pointer to PCI device
13477  *
13478  * This routine is registered to the PCI subsystem for error handling. It
13479  * is called when kernel error recovery tells the lpfc driver that it is
13480  * OK to resume normal PCI operation after PCI bus error recovery. When
13481  * this routine is invoked, it dispatches the action to the proper SLI-3
13482  * or SLI-4 device io_resume routine, which will resume the device operation.
13483  **/
13484 static void
13485 lpfc_io_resume(struct pci_dev *pdev)
13486 {
13487         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13488         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13489 
13490         switch (phba->pci_dev_grp) {
13491         case LPFC_PCI_DEV_LP:
13492                 lpfc_io_resume_s3(pdev);
13493                 break;
13494         case LPFC_PCI_DEV_OC:
13495                 lpfc_io_resume_s4(pdev);
13496                 break;
13497         default:
13498                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13499                                 "1429 Invalid PCI device group: 0x%x\n",
13500                                 phba->pci_dev_grp);
13501                 break;
13502         }
13503         return;
13504 }
13505 
13506 /**
13507  * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
13508  * @phba: pointer to lpfc hba data structure.
13509  *
13510  * This routine checks to see if OAS is supported for this adapter. If
13511  * supported, the configure Flash Optimized Fabric flag is set.  Otherwise,
13512  * the enable oas flag is cleared and the pool created for OAS device data
13513  * is destroyed.
13514  *
13515  **/
13516 static void
13517 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
13518 {
13519 
13520         if (!phba->cfg_EnableXLane)
13521                 return;
13522 
13523         if (phba->sli4_hba.pc_sli4_params.oas_supported) {
13524                 phba->cfg_fof = 1;
13525         } else {
13526                 phba->cfg_fof = 0;
13527                 if (phba->device_data_mem_pool)
13528                         mempool_destroy(phba->device_data_mem_pool);
13529                 phba->device_data_mem_pool = NULL;
13530         }
13531 
13532         return;
13533 }
13534 
13535 /**
13536  * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
13537  * @phba: pointer to lpfc hba data structure.
13538  *
13539  * This routine checks to see if RAS is supported by the adapter. Check the
13540  * function through which RAS support enablement is to be done.
13541  **/
13542 void
13543 lpfc_sli4_ras_init(struct lpfc_hba *phba)
13544 {
13545         switch (phba->pcidev->device) {
13546         case PCI_DEVICE_ID_LANCER_G6_FC:
13547         case PCI_DEVICE_ID_LANCER_G7_FC:
13548                 phba->ras_fwlog.ras_hwsupport = true;
13549                 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
13550                     phba->cfg_ras_fwlog_buffsize)
13551                         phba->ras_fwlog.ras_enabled = true;
13552                 else
13553                         phba->ras_fwlog.ras_enabled = false;
13554                 break;
13555         default:
13556                 phba->ras_fwlog.ras_hwsupport = false;
13557         }
13558 }
13559 
13560 
13561 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
13562 
13563 static const struct pci_error_handlers lpfc_err_handler = {
13564         .error_detected = lpfc_io_error_detected,
13565         .slot_reset = lpfc_io_slot_reset,
13566         .resume = lpfc_io_resume,
13567 };
13568 
13569 static struct pci_driver lpfc_driver = {
13570         .name           = LPFC_DRIVER_NAME,
13571         .id_table       = lpfc_id_table,
13572         .probe          = lpfc_pci_probe_one,
13573         .remove         = lpfc_pci_remove_one,
13574         .shutdown       = lpfc_pci_remove_one,
13575         .suspend        = lpfc_pci_suspend_one,
13576         .resume         = lpfc_pci_resume_one,
13577         .err_handler    = &lpfc_err_handler,
13578 };
13579 
13580 static const struct file_operations lpfc_mgmt_fop = {
13581         .owner = THIS_MODULE,
13582 };
13583 
13584 static struct miscdevice lpfc_mgmt_dev = {
13585         .minor = MISC_DYNAMIC_MINOR,
13586         .name = "lpfcmgmt",
13587         .fops = &lpfc_mgmt_fop,
13588 };
13589 
13590 /**
13591  * lpfc_init - lpfc module initialization routine
13592  *
13593  * This routine is to be invoked when the lpfc module is loaded into the
13594  * kernel. The special kernel macro module_init() is used to indicate the
13595  * role of this routine to the kernel as lpfc module entry point.
13596  *
13597  * Return codes
13598  *   0 - successful
13599  *   -ENOMEM - FC attach transport failed
13600  *   all others - failed
13601  */
13602 static int __init
13603 lpfc_init(void)
13604 {
13605         int error = 0;
13606 
13607         printk(LPFC_MODULE_DESC "\n");
13608         printk(LPFC_COPYRIGHT "\n");
13609 
13610         error = misc_register(&lpfc_mgmt_dev);
13611         if (error)
13612                 printk(KERN_ERR "Could not register lpfcmgmt device, "
13613                         "misc_register returned with status %d", error);
13614 
13615         lpfc_transport_functions.vport_create = lpfc_vport_create;
13616         lpfc_transport_functions.vport_delete = lpfc_vport_delete;
13617         lpfc_transport_template =
13618                                 fc_attach_transport(&lpfc_transport_functions);
13619         if (lpfc_transport_template == NULL)
13620                 return -ENOMEM;
13621         lpfc_vport_transport_template =
13622                 fc_attach_transport(&lpfc_vport_transport_functions);
13623         if (lpfc_vport_transport_template == NULL) {
13624                 fc_release_transport(lpfc_transport_template);
13625                 return -ENOMEM;
13626         }
13627         lpfc_nvme_cmd_template();
13628         lpfc_nvmet_cmd_template();
13629 
13630         /* Initialize in case vector mapping is needed */
13631         lpfc_present_cpu = num_present_cpus();
13632 
13633         error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
13634                                         "lpfc/sli4:online",
13635                                         lpfc_cpu_online, lpfc_cpu_offline);
13636         if (error < 0)
13637                 goto cpuhp_failure;
13638         lpfc_cpuhp_state = error;
13639 
13640         error = pci_register_driver(&lpfc_driver);
13641         if (error)
13642                 goto unwind;
13643 
13644         return error;
13645 
13646 unwind:
13647         cpuhp_remove_multi_state(lpfc_cpuhp_state);
13648 cpuhp_failure:
13649         fc_release_transport(lpfc_transport_template);
13650         fc_release_transport(lpfc_vport_transport_template);
13651 
13652         return error;
13653 }
13654 
13655 /**
13656  * lpfc_exit - lpfc module removal routine
13657  *
13658  * This routine is invoked when the lpfc module is removed from the kernel.
13659  * The special kernel macro module_exit() is used to indicate the role of
13660  * this routine to the kernel as lpfc module exit point.
13661  */
13662 static void __exit
13663 lpfc_exit(void)
13664 {
13665         misc_deregister(&lpfc_mgmt_dev);
13666         pci_unregister_driver(&lpfc_driver);
13667         cpuhp_remove_multi_state(lpfc_cpuhp_state);
13668         fc_release_transport(lpfc_transport_template);
13669         fc_release_transport(lpfc_vport_transport_template);
13670         idr_destroy(&lpfc_hba_index);
13671 }
13672 
13673 module_init(lpfc_init);
13674 module_exit(lpfc_exit);
13675 MODULE_LICENSE("GPL");
13676 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
13677 MODULE_AUTHOR("Broadcom");
13678 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);

/* [<][>][^][v][top][bottom][index][help] */