This source file includes following definitions.
- lpfc_config_port_prep
- lpfc_config_async_cmpl
- lpfc_dump_wakeup_param_cmpl
- lpfc_update_vport_wwn
- lpfc_config_port_post
- lpfc_hba_init_link
- lpfc_hba_init_link_fc_topology
- lpfc_hba_down_link
- lpfc_hba_down_prep
- lpfc_sli4_free_sp_events
- lpfc_hba_free_post_buf
- lpfc_hba_clean_txcmplq
- lpfc_hba_down_post_s3
- lpfc_hba_down_post_s4
- lpfc_hba_down_post
- lpfc_hb_timeout
- lpfc_rrq_timeout
- lpfc_hb_mbox_cmpl
- lpfc_hb_eq_delay_work
- lpfc_hb_mxp_handler
- lpfc_hb_timeout_handler
- lpfc_offline_eratt
- lpfc_sli4_offline_eratt
- lpfc_handle_deferred_eratt
- lpfc_board_errevt_to_mgmt
- lpfc_handle_eratt_s3
- lpfc_sli4_port_sta_fn_reset
- lpfc_handle_eratt_s4
- lpfc_handle_eratt
- lpfc_handle_latt
- lpfc_parse_vpd
- lpfc_get_hba_model_desc
- lpfc_post_buffer
- lpfc_post_rcv_buf
- lpfc_sha_init
- lpfc_sha_iterate
- lpfc_challenge_key
- lpfc_hba_init
- lpfc_cleanup
- lpfc_stop_vport_timers
- __lpfc_sli4_stop_fcf_redisc_wait_timer
- lpfc_sli4_stop_fcf_redisc_wait_timer
- lpfc_stop_hba_timers
- lpfc_block_mgmt_io
- lpfc_sli4_node_prep
- lpfc_create_expedite_pool
- lpfc_destroy_expedite_pool
- lpfc_create_multixri_pools
- lpfc_destroy_multixri_pools
- lpfc_online
- lpfc_unblock_mgmt_io
- lpfc_offline_prep
- lpfc_offline
- lpfc_scsi_free
- lpfc_io_free
- lpfc_sli4_els_sgl_update
- lpfc_sli4_nvmet_sgl_update
- lpfc_io_buf_flush
- lpfc_io_buf_replenish
- lpfc_sli4_io_sgl_update
- lpfc_new_io_buf
- lpfc_get_wwpn
- lpfc_create_port
- destroy_port
- lpfc_get_instance
- lpfc_scan_finished
- lpfc_host_supported_speeds_set
- lpfc_host_attrib_init
- lpfc_stop_port_s3
- lpfc_stop_port_s4
- lpfc_stop_port
- lpfc_fcf_redisc_wait_start_timer
- lpfc_sli4_fcf_redisc_wait_tmo
- lpfc_sli4_parse_latt_fault
- lpfc_sli4_parse_latt_type
- lpfc_sli_port_speed_get
- lpfc_sli4_port_speed_parse
- lpfc_sli4_async_link_evt
- lpfc_async_link_speed_to_read_top
- lpfc_update_trunk_link_status
- lpfc_sli4_async_fc_evt
- lpfc_sli4_async_sli_evt
- lpfc_sli4_perform_vport_cvl
- lpfc_sli4_perform_all_vport_cvl
- lpfc_sli4_async_fip_evt
- lpfc_sli4_async_dcbx_evt
- lpfc_sli4_async_grp5_evt
- lpfc_sli4_async_event_proc
- lpfc_sli4_fcf_redisc_event_proc
- lpfc_api_table_setup
- lpfc_log_intr_mode
- lpfc_enable_pci_dev
- lpfc_disable_pci_dev
- lpfc_reset_hba
- lpfc_sli_sriov_nr_virtfn_get
- lpfc_sli_probe_sriov_nr_virtfn
- lpfc_setup_driver_resource_phase1
- lpfc_sli_driver_resource_setup
- lpfc_sli_driver_resource_unset
- lpfc_sli4_driver_resource_setup
- lpfc_sli4_driver_resource_unset
- lpfc_init_api_table_setup
- lpfc_setup_driver_resource_phase2
- lpfc_unset_driver_resource_phase2
- lpfc_free_iocb_list
- lpfc_init_iocb_list
- lpfc_free_sgl_list
- lpfc_free_els_sgl_list
- lpfc_free_nvmet_sgl_list
- lpfc_init_active_sgl_array
- lpfc_free_active_sgl
- lpfc_init_sgl_list
- lpfc_sli4_init_rpi_hdrs
- lpfc_sli4_create_rpi_hdr
- lpfc_sli4_remove_rpi_hdrs
- lpfc_hba_alloc
- lpfc_hba_free
- lpfc_create_shost
- lpfc_destroy_shost
- lpfc_setup_bg
- lpfc_post_init_setup
- lpfc_sli_pci_mem_setup
- lpfc_sli_pci_mem_unset
- lpfc_sli4_post_status_check
- lpfc_sli4_bar0_register_memmap
- lpfc_sli4_bar1_register_memmap
- lpfc_sli4_bar2_register_memmap
- lpfc_create_bootstrap_mbox
- lpfc_destroy_bootstrap_mbox
- lpfc_sli4_read_config
- lpfc_setup_endian_order
- lpfc_sli4_queue_verify
- lpfc_alloc_io_wq_cq
- lpfc_sli4_queue_create
- __lpfc_sli4_release_queue
- lpfc_sli4_release_queues
- lpfc_sli4_release_hdwq
- lpfc_sli4_queue_destroy
- lpfc_free_rq_buffer
- lpfc_create_wq_cq
- lpfc_setup_cq_lookup
- lpfc_sli4_queue_setup
- lpfc_sli4_queue_unset
- lpfc_sli4_cq_event_pool_create
- lpfc_sli4_cq_event_pool_destroy
- __lpfc_sli4_cq_event_alloc
- lpfc_sli4_cq_event_alloc
- __lpfc_sli4_cq_event_release
- lpfc_sli4_cq_event_release
- lpfc_sli4_cq_event_release_all
- lpfc_pci_function_reset
- lpfc_sli4_pci_mem_setup
- lpfc_sli4_pci_mem_unset
- lpfc_sli_enable_msix
- lpfc_sli_enable_msi
- lpfc_sli_enable_intr
- lpfc_sli_disable_intr
- lpfc_find_cpu_handle
- lpfc_find_hyper
- lpfc_cpu_affinity_check
- lpfc_cpuhp_get_eq
- __lpfc_cpuhp_remove
- lpfc_cpuhp_remove
- lpfc_cpuhp_add
- __lpfc_cpuhp_checks
- lpfc_cpu_offline
- lpfc_cpu_online
- lpfc_sli4_enable_msix
- lpfc_sli4_enable_msi
- lpfc_sli4_enable_intr
- lpfc_sli4_disable_intr
- lpfc_unset_hba
- lpfc_sli4_xri_exchange_busy_wait
- lpfc_sli4_hba_unset
- lpfc_pc_sli4_params_get
- lpfc_get_sli4_parameters
- lpfc_pci_probe_one_s3
- lpfc_pci_remove_one_s3
- lpfc_pci_suspend_one_s3
- lpfc_pci_resume_one_s3
- lpfc_sli_prep_dev_for_recover
- lpfc_sli_prep_dev_for_reset
- lpfc_sli_prep_dev_for_perm_failure
- lpfc_io_error_detected_s3
- lpfc_io_slot_reset_s3
- lpfc_io_resume_s3
- lpfc_sli4_get_els_iocb_cnt
- lpfc_sli4_get_iocb_cnt
- lpfc_log_write_firmware_error
- lpfc_write_firmware
- lpfc_sli4_request_firmware_update
- lpfc_pci_probe_one_s4
- lpfc_pci_remove_one_s4
- lpfc_pci_suspend_one_s4
- lpfc_pci_resume_one_s4
- lpfc_sli4_prep_dev_for_recover
- lpfc_sli4_prep_dev_for_reset
- lpfc_sli4_prep_dev_for_perm_failure
- lpfc_io_error_detected_s4
- lpfc_io_slot_reset_s4
- lpfc_io_resume_s4
- lpfc_pci_probe_one
- lpfc_pci_remove_one
- lpfc_pci_suspend_one
- lpfc_pci_resume_one
- lpfc_io_error_detected
- lpfc_io_slot_reset
- lpfc_io_resume
- lpfc_sli4_oas_verify
- lpfc_sli4_ras_init
- lpfc_init
- lpfc_exit
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 #include <linux/blkdev.h>
  25 #include <linux/delay.h>
  26 #include <linux/dma-mapping.h>
  27 #include <linux/idr.h>
  28 #include <linux/interrupt.h>
  29 #include <linux/module.h>
  30 #include <linux/kthread.h>
  31 #include <linux/pci.h>
  32 #include <linux/spinlock.h>
  33 #include <linux/ctype.h>
  34 #include <linux/aer.h>
  35 #include <linux/slab.h>
  36 #include <linux/firmware.h>
  37 #include <linux/miscdevice.h>
  38 #include <linux/percpu.h>
  39 #include <linux/msi.h>
  40 #include <linux/irq.h>
  41 #include <linux/bitops.h>
  42 #include <linux/crash_dump.h>
  43 #include <linux/cpuhotplug.h>
  44 
  45 #include <scsi/scsi.h>
  46 #include <scsi/scsi_device.h>
  47 #include <scsi/scsi_host.h>
  48 #include <scsi/scsi_transport_fc.h>
  49 #include <scsi/scsi_tcq.h>
  50 #include <scsi/fc/fc_fs.h>
  51 
  52 #include <linux/nvme-fc-driver.h>
  53 
  54 #include "lpfc_hw4.h"
  55 #include "lpfc_hw.h"
  56 #include "lpfc_sli.h"
  57 #include "lpfc_sli4.h"
  58 #include "lpfc_nl.h"
  59 #include "lpfc_disc.h"
  60 #include "lpfc.h"
  61 #include "lpfc_scsi.h"
  62 #include "lpfc_nvme.h"
  63 #include "lpfc_nvmet.h"
  64 #include "lpfc_logmsg.h"
  65 #include "lpfc_crtn.h"
  66 #include "lpfc_vport.h"
  67 #include "lpfc_version.h"
  68 #include "lpfc_ids.h"
  69 
  70 static enum cpuhp_state lpfc_cpuhp_state;
  71 
  72 static uint32_t lpfc_present_cpu;
  73 
  74 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
  75 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
  76 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
  77 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
  78 static int lpfc_post_rcv_buf(struct lpfc_hba *);
  79 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
  80 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
  81 static int lpfc_setup_endian_order(struct lpfc_hba *);
  82 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
  83 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
  84 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
  85 static void lpfc_init_sgl_list(struct lpfc_hba *);
  86 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
  87 static void lpfc_free_active_sgl(struct lpfc_hba *);
  88 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
  89 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
  90 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
  91 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
  92 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
  93 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
  94 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
  95 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
  96 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
  97 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
  98 
  99 static struct scsi_transport_template *lpfc_transport_template = NULL;
 100 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
 101 static DEFINE_IDR(lpfc_hba_index);
 102 #define LPFC_NVMET_BUF_POST 254
 103 
 104 
 105 
 106 
 107 
 108 
 109 
 110 
 111 
 112 
 113 
 114 
 115 
 116 
 117 
 118 int
 119 lpfc_config_port_prep(struct lpfc_hba *phba)
 120 {
 121         lpfc_vpd_t *vp = &phba->vpd;
 122         int i = 0, rc;
 123         LPFC_MBOXQ_t *pmb;
 124         MAILBOX_t *mb;
 125         char *lpfc_vpd_data = NULL;
 126         uint16_t offset = 0;
 127         static char licensed[56] =
 128                     "key unlock for use with gnu public licensed code only\0";
 129         static int init_key = 1;
 130 
 131         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 132         if (!pmb) {
 133                 phba->link_state = LPFC_HBA_ERROR;
 134                 return -ENOMEM;
 135         }
 136 
 137         mb = &pmb->u.mb;
 138         phba->link_state = LPFC_INIT_MBX_CMDS;
 139 
 140         if (lpfc_is_LC_HBA(phba->pcidev->device)) {
 141                 if (init_key) {
 142                         uint32_t *ptext = (uint32_t *) licensed;
 143 
 144                         for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
 145                                 *ptext = cpu_to_be32(*ptext);
 146                         init_key = 0;
 147                 }
 148 
 149                 lpfc_read_nv(phba, pmb);
 150                 memset((char*)mb->un.varRDnvp.rsvd3, 0,
 151                         sizeof (mb->un.varRDnvp.rsvd3));
 152                 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
 153                          sizeof (licensed));
 154 
 155                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 156 
 157                 if (rc != MBX_SUCCESS) {
 158                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
 159                                         "0324 Config Port initialization "
 160                                         "error, mbxCmd x%x READ_NVPARM, "
 161                                         "mbxStatus x%x\n",
 162                                         mb->mbxCommand, mb->mbxStatus);
 163                         mempool_free(pmb, phba->mbox_mem_pool);
 164                         return -ERESTART;
 165                 }
 166                 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
 167                        sizeof(phba->wwnn));
 168                 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
 169                        sizeof(phba->wwpn));
 170         }
 171 
 172         
 173 
 174 
 175 
 176         phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
 177 
 178         
 179         lpfc_read_rev(phba, pmb);
 180         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 181         if (rc != MBX_SUCCESS) {
 182                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 183                                 "0439 Adapter failed to init, mbxCmd x%x "
 184                                 "READ_REV, mbxStatus x%x\n",
 185                                 mb->mbxCommand, mb->mbxStatus);
 186                 mempool_free( pmb, phba->mbox_mem_pool);
 187                 return -ERESTART;
 188         }
 189 
 190 
 191         
 192 
 193 
 194 
 195         if (mb->un.varRdRev.rr == 0) {
 196                 vp->rev.rBit = 0;
 197                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 198                                 "0440 Adapter failed to init, READ_REV has "
 199                                 "missing revision information.\n");
 200                 mempool_free(pmb, phba->mbox_mem_pool);
 201                 return -ERESTART;
 202         }
 203 
 204         if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
 205                 mempool_free(pmb, phba->mbox_mem_pool);
 206                 return -EINVAL;
 207         }
 208 
 209         
 210         vp->rev.rBit = 1;
 211         memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
 212         vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
 213         memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
 214         vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
 215         memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
 216         vp->rev.biuRev = mb->un.varRdRev.biuRev;
 217         vp->rev.smRev = mb->un.varRdRev.smRev;
 218         vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
 219         vp->rev.endecRev = mb->un.varRdRev.endecRev;
 220         vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
 221         vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
 222         vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
 223         vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
 224         vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
 225         vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
 226 
 227         
 228 
 229 
 230 
 231         if (vp->rev.feaLevelHigh < 9)
 232                 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
 233 
 234         if (lpfc_is_LC_HBA(phba->pcidev->device))
 235                 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
 236                                                 sizeof (phba->RandomData));
 237 
 238         
 239         lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
 240         if (!lpfc_vpd_data)
 241                 goto out_free_mbox;
 242         do {
 243                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
 244                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 245 
 246                 if (rc != MBX_SUCCESS) {
 247                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 248                                         "0441 VPD not present on adapter, "
 249                                         "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
 250                                         mb->mbxCommand, mb->mbxStatus);
 251                         mb->un.varDmp.word_cnt = 0;
 252                 }
 253                 
 254 
 255 
 256                 if (mb->un.varDmp.word_cnt == 0)
 257                         break;
 258                 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
 259                         mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
 260                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
 261                                       lpfc_vpd_data + offset,
 262                                       mb->un.varDmp.word_cnt);
 263                 offset += mb->un.varDmp.word_cnt;
 264         } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
 265         lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
 266 
 267         kfree(lpfc_vpd_data);
 268 out_free_mbox:
 269         mempool_free(pmb, phba->mbox_mem_pool);
 270         return 0;
 271 }
 272 
 273 
 274 
 275 
 276 
 277 
 278 
 279 
 280 
 281 
 282 
 283 static void
 284 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
 285 {
 286         if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
 287                 phba->temp_sensor_support = 1;
 288         else
 289                 phba->temp_sensor_support = 0;
 290         mempool_free(pmboxq, phba->mbox_mem_pool);
 291         return;
 292 }
 293 
 294 
 295 
 296 
 297 
 298 
 299 
 300 
 301 
 302 
 303 
 304 static void
 305 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 306 {
 307         struct prog_id *prg;
 308         uint32_t prog_id_word;
 309         char dist = ' ';
 310         
 311         char dist_char[] = "nabx";
 312 
 313         if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
 314                 mempool_free(pmboxq, phba->mbox_mem_pool);
 315                 return;
 316         }
 317 
 318         prg = (struct prog_id *) &prog_id_word;
 319 
 320         
 321         prog_id_word = pmboxq->u.mb.un.varWords[7];
 322 
 323         
 324         if (prg->dist < 4)
 325                 dist = dist_char[prg->dist];
 326 
 327         if ((prg->dist == 3) && (prg->num == 0))
 328                 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
 329                         prg->ver, prg->rev, prg->lev);
 330         else
 331                 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
 332                         prg->ver, prg->rev, prg->lev,
 333                         dist, prg->num);
 334         mempool_free(pmboxq, phba->mbox_mem_pool);
 335         return;
 336 }
 337 
 338 
 339 
 340 
 341 
 342 
 343 
 344 
 345 
 346 
 347 void
 348 lpfc_update_vport_wwn(struct lpfc_vport *vport)
 349 {
 350         uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
 351         u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
 352 
 353         
 354         if (vport->phba->cfg_soft_wwnn)
 355                 u64_to_wwn(vport->phba->cfg_soft_wwnn,
 356                            vport->fc_sparam.nodeName.u.wwn);
 357         if (vport->phba->cfg_soft_wwpn)
 358                 u64_to_wwn(vport->phba->cfg_soft_wwpn,
 359                            vport->fc_sparam.portName.u.wwn);
 360 
 361         
 362 
 363 
 364 
 365         if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
 366                 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
 367                         sizeof(struct lpfc_name));
 368         else
 369                 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
 370                         sizeof(struct lpfc_name));
 371 
 372         
 373 
 374 
 375 
 376         if (vport->fc_portname.u.wwn[0] != 0 &&
 377                 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
 378                         sizeof(struct lpfc_name)))
 379                 vport->vport_flag |= FAWWPN_PARAM_CHG;
 380 
 381         if (vport->fc_portname.u.wwn[0] == 0 ||
 382             vport->phba->cfg_soft_wwpn ||
 383             (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
 384             vport->vport_flag & FAWWPN_SET) {
 385                 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
 386                         sizeof(struct lpfc_name));
 387                 vport->vport_flag &= ~FAWWPN_SET;
 388                 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
 389                         vport->vport_flag |= FAWWPN_SET;
 390         }
 391         else
 392                 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
 393                         sizeof(struct lpfc_name));
 394 }
 395 
 396 
 397 
 398 
 399 
 400 
 401 
 402 
 403 
 404 
 405 
 406 
 407 
 408 
 409 int
 410 lpfc_config_port_post(struct lpfc_hba *phba)
 411 {
 412         struct lpfc_vport *vport = phba->pport;
 413         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 414         LPFC_MBOXQ_t *pmb;
 415         MAILBOX_t *mb;
 416         struct lpfc_dmabuf *mp;
 417         struct lpfc_sli *psli = &phba->sli;
 418         uint32_t status, timeout;
 419         int i, j;
 420         int rc;
 421 
 422         spin_lock_irq(&phba->hbalock);
 423         
 424 
 425 
 426 
 427         if (phba->over_temp_state == HBA_OVER_TEMP)
 428                 phba->over_temp_state = HBA_NORMAL_TEMP;
 429         spin_unlock_irq(&phba->hbalock);
 430 
 431         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 432         if (!pmb) {
 433                 phba->link_state = LPFC_HBA_ERROR;
 434                 return -ENOMEM;
 435         }
 436         mb = &pmb->u.mb;
 437 
 438         
 439         rc = lpfc_read_sparam(phba, pmb, 0);
 440         if (rc) {
 441                 mempool_free(pmb, phba->mbox_mem_pool);
 442                 return -ENOMEM;
 443         }
 444 
 445         pmb->vport = vport;
 446         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
 447                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 448                                 "0448 Adapter failed init, mbxCmd x%x "
 449                                 "READ_SPARM mbxStatus x%x\n",
 450                                 mb->mbxCommand, mb->mbxStatus);
 451                 phba->link_state = LPFC_HBA_ERROR;
 452                 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 453                 mempool_free(pmb, phba->mbox_mem_pool);
 454                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
 455                 kfree(mp);
 456                 return -EIO;
 457         }
 458 
 459         mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 460 
 461         memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
 462         lpfc_mbuf_free(phba, mp->virt, mp->phys);
 463         kfree(mp);
 464         pmb->ctx_buf = NULL;
 465         lpfc_update_vport_wwn(vport);
 466 
 467         
 468         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
 469         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
 470         fc_host_max_npiv_vports(shost) = phba->max_vpi;
 471 
 472         
 473         
 474         if (phba->SerialNumber[0] == 0) {
 475                 uint8_t *outptr;
 476 
 477                 outptr = &vport->fc_nodename.u.s.IEEE[0];
 478                 for (i = 0; i < 12; i++) {
 479                         status = *outptr++;
 480                         j = ((status & 0xf0) >> 4);
 481                         if (j <= 9)
 482                                 phba->SerialNumber[i] =
 483                                     (char)((uint8_t) 0x30 + (uint8_t) j);
 484                         else
 485                                 phba->SerialNumber[i] =
 486                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
 487                         i++;
 488                         j = (status & 0xf);
 489                         if (j <= 9)
 490                                 phba->SerialNumber[i] =
 491                                     (char)((uint8_t) 0x30 + (uint8_t) j);
 492                         else
 493                                 phba->SerialNumber[i] =
 494                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
 495                 }
 496         }
 497 
 498         lpfc_read_config(phba, pmb);
 499         pmb->vport = vport;
 500         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
 501                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 502                                 "0453 Adapter failed to init, mbxCmd x%x "
 503                                 "READ_CONFIG, mbxStatus x%x\n",
 504                                 mb->mbxCommand, mb->mbxStatus);
 505                 phba->link_state = LPFC_HBA_ERROR;
 506                 mempool_free( pmb, phba->mbox_mem_pool);
 507                 return -EIO;
 508         }
 509 
 510         
 511         lpfc_sli_read_link_ste(phba);
 512 
 513         
 514         i = (mb->un.varRdConfig.max_xri + 1);
 515         if (phba->cfg_hba_queue_depth > i) {
 516                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
 517                                 "3359 HBA queue depth changed from %d to %d\n",
 518                                 phba->cfg_hba_queue_depth, i);
 519                 phba->cfg_hba_queue_depth = i;
 520         }
 521 
 522         
 523         i = (mb->un.varRdConfig.max_xri >> 3);
 524         if (phba->pport->cfg_lun_queue_depth > i) {
 525                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
 526                                 "3360 LUN queue depth changed from %d to %d\n",
 527                                 phba->pport->cfg_lun_queue_depth, i);
 528                 phba->pport->cfg_lun_queue_depth = i;
 529         }
 530 
 531         phba->lmt = mb->un.varRdConfig.lmt;
 532 
 533         
 534         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
 535 
 536         phba->link_state = LPFC_LINK_DOWN;
 537 
 538         
 539         if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
 540                 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
 541         if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
 542                 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
 543 
 544         
 545         if (phba->sli_rev != 3)
 546                 lpfc_post_rcv_buf(phba);
 547 
 548         
 549 
 550 
 551         if (phba->intr_type == MSIX) {
 552                 rc = lpfc_config_msi(phba, pmb);
 553                 if (rc) {
 554                         mempool_free(pmb, phba->mbox_mem_pool);
 555                         return -EIO;
 556                 }
 557                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 558                 if (rc != MBX_SUCCESS) {
 559                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
 560                                         "0352 Config MSI mailbox command "
 561                                         "failed, mbxCmd x%x, mbxStatus x%x\n",
 562                                         pmb->u.mb.mbxCommand,
 563                                         pmb->u.mb.mbxStatus);
 564                         mempool_free(pmb, phba->mbox_mem_pool);
 565                         return -EIO;
 566                 }
 567         }
 568 
 569         spin_lock_irq(&phba->hbalock);
 570         
 571         phba->hba_flag &= ~HBA_ERATT_HANDLED;
 572 
 573         
 574         if (lpfc_readl(phba->HCregaddr, &status)) {
 575                 spin_unlock_irq(&phba->hbalock);
 576                 return -EIO;
 577         }
 578         status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
 579         if (psli->num_rings > 0)
 580                 status |= HC_R0INT_ENA;
 581         if (psli->num_rings > 1)
 582                 status |= HC_R1INT_ENA;
 583         if (psli->num_rings > 2)
 584                 status |= HC_R2INT_ENA;
 585         if (psli->num_rings > 3)
 586                 status |= HC_R3INT_ENA;
 587 
 588         if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
 589             (phba->cfg_poll & DISABLE_FCP_RING_INT))
 590                 status &= ~(HC_R0INT_ENA);
 591 
 592         writel(status, phba->HCregaddr);
 593         readl(phba->HCregaddr); 
 594         spin_unlock_irq(&phba->hbalock);
 595 
 596         
 597         timeout = phba->fc_ratov * 2;
 598         mod_timer(&vport->els_tmofunc,
 599                   jiffies + msecs_to_jiffies(1000 * timeout));
 600         
 601         mod_timer(&phba->hb_tmofunc,
 602                   jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
 603         phba->hb_outstanding = 0;
 604         phba->last_completion_time = jiffies;
 605         
 606         mod_timer(&phba->eratt_poll,
 607                   jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
 608 
 609         if (phba->hba_flag & LINK_DISABLED) {
 610                 lpfc_printf_log(phba,
 611                         KERN_ERR, LOG_INIT,
 612                         "2598 Adapter Link is disabled.\n");
 613                 lpfc_down_link(phba, pmb);
 614                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 615                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 616                 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
 617                         lpfc_printf_log(phba,
 618                         KERN_ERR, LOG_INIT,
 619                         "2599 Adapter failed to issue DOWN_LINK"
 620                         " mbox command rc 0x%x\n", rc);
 621 
 622                         mempool_free(pmb, phba->mbox_mem_pool);
 623                         return -EIO;
 624                 }
 625         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
 626                 mempool_free(pmb, phba->mbox_mem_pool);
 627                 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
 628                 if (rc)
 629                         return rc;
 630         }
 631         
 632         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 633         if (!pmb) {
 634                 phba->link_state = LPFC_HBA_ERROR;
 635                 return -ENOMEM;
 636         }
 637 
 638         lpfc_config_async(phba, pmb, LPFC_ELS_RING);
 639         pmb->mbox_cmpl = lpfc_config_async_cmpl;
 640         pmb->vport = phba->pport;
 641         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 642 
 643         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 644                 lpfc_printf_log(phba,
 645                                 KERN_ERR,
 646                                 LOG_INIT,
 647                                 "0456 Adapter failed to issue "
 648                                 "ASYNCEVT_ENABLE mbox status x%x\n",
 649                                 rc);
 650                 mempool_free(pmb, phba->mbox_mem_pool);
 651         }
 652 
 653         
 654         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 655         if (!pmb) {
 656                 phba->link_state = LPFC_HBA_ERROR;
 657                 return -ENOMEM;
 658         }
 659 
 660         lpfc_dump_wakeup_param(phba, pmb);
 661         pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
 662         pmb->vport = phba->pport;
 663         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 664 
 665         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 666                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
 667                                 "to get Option ROM version status x%x\n", rc);
 668                 mempool_free(pmb, phba->mbox_mem_pool);
 669         }
 670 
 671         return 0;
 672 }
 673 
 674 
 675 
 676 
 677 
 678 
 679 
 680 
 681 
 682 
 683 
 684 
 685 
 686 
 687 
 688 static int
 689 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
 690 {
 691         return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
 692 }
 693 
 694 
 695 
 696 
 697 
 698 
 699 
 700 
 701 
 702 
 703 
 704 
 705 
 706 
 707 
 708 
 709 int
 710 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
 711                                uint32_t flag)
 712 {
 713         struct lpfc_vport *vport = phba->pport;
 714         LPFC_MBOXQ_t *pmb;
 715         MAILBOX_t *mb;
 716         int rc;
 717 
 718         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 719         if (!pmb) {
 720                 phba->link_state = LPFC_HBA_ERROR;
 721                 return -ENOMEM;
 722         }
 723         mb = &pmb->u.mb;
 724         pmb->vport = vport;
 725 
 726         if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
 727             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
 728              !(phba->lmt & LMT_1Gb)) ||
 729             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
 730              !(phba->lmt & LMT_2Gb)) ||
 731             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
 732              !(phba->lmt & LMT_4Gb)) ||
 733             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
 734              !(phba->lmt & LMT_8Gb)) ||
 735             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
 736              !(phba->lmt & LMT_10Gb)) ||
 737             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
 738              !(phba->lmt & LMT_16Gb)) ||
 739             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
 740              !(phba->lmt & LMT_32Gb)) ||
 741             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
 742              !(phba->lmt & LMT_64Gb))) {
 743                 
 744                 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
 745                         "1302 Invalid speed for this board:%d "
 746                         "Reset link speed to auto.\n",
 747                         phba->cfg_link_speed);
 748                         phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
 749         }
 750         lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
 751         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 752         if (phba->sli_rev < LPFC_SLI_REV4)
 753                 lpfc_set_loopback_flag(phba);
 754         rc = lpfc_sli_issue_mbox(phba, pmb, flag);
 755         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 756                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 757                         "0498 Adapter failed to init, mbxCmd x%x "
 758                         "INIT_LINK, mbxStatus x%x\n",
 759                         mb->mbxCommand, mb->mbxStatus);
 760                 if (phba->sli_rev <= LPFC_SLI_REV3) {
 761                         
 762                         writel(0, phba->HCregaddr);
 763                         readl(phba->HCregaddr); 
 764                         
 765                         writel(0xffffffff, phba->HAregaddr);
 766                         readl(phba->HAregaddr); 
 767                 }
 768                 phba->link_state = LPFC_HBA_ERROR;
 769                 if (rc != MBX_BUSY || flag == MBX_POLL)
 770                         mempool_free(pmb, phba->mbox_mem_pool);
 771                 return -EIO;
 772         }
 773         phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
 774         if (flag == MBX_POLL)
 775                 mempool_free(pmb, phba->mbox_mem_pool);
 776 
 777         return 0;
 778 }
 779 
 780 
 781 
 782 
 783 
 784 
 785 
 786 
 787 
 788 
 789 
 790 
 791 
 792 
 793 static int
 794 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
 795 {
 796         LPFC_MBOXQ_t *pmb;
 797         int rc;
 798 
 799         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 800         if (!pmb) {
 801                 phba->link_state = LPFC_HBA_ERROR;
 802                 return -ENOMEM;
 803         }
 804 
 805         lpfc_printf_log(phba,
 806                 KERN_ERR, LOG_INIT,
 807                 "0491 Adapter Link is disabled.\n");
 808         lpfc_down_link(phba, pmb);
 809         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 810         rc = lpfc_sli_issue_mbox(phba, pmb, flag);
 811         if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
 812                 lpfc_printf_log(phba,
 813                 KERN_ERR, LOG_INIT,
 814                 "2522 Adapter failed to issue DOWN_LINK"
 815                 " mbox command rc 0x%x\n", rc);
 816 
 817                 mempool_free(pmb, phba->mbox_mem_pool);
 818                 return -EIO;
 819         }
 820         if (flag == MBX_POLL)
 821                 mempool_free(pmb, phba->mbox_mem_pool);
 822 
 823         return 0;
 824 }
 825 
 826 
 827 
 828 
 829 
 830 
 831 
 832 
 833 
 834 
 835 
 836 
 837 int
 838 lpfc_hba_down_prep(struct lpfc_hba *phba)
 839 {
 840         struct lpfc_vport **vports;
 841         int i;
 842 
 843         if (phba->sli_rev <= LPFC_SLI_REV3) {
 844                 
 845                 writel(0, phba->HCregaddr);
 846                 readl(phba->HCregaddr); 
 847         }
 848 
 849         if (phba->pport->load_flag & FC_UNLOADING)
 850                 lpfc_cleanup_discovery_resources(phba->pport);
 851         else {
 852                 vports = lpfc_create_vport_work_array(phba);
 853                 if (vports != NULL)
 854                         for (i = 0; i <= phba->max_vports &&
 855                                 vports[i] != NULL; i++)
 856                                 lpfc_cleanup_discovery_resources(vports[i]);
 857                 lpfc_destroy_vport_work_array(phba, vports);
 858         }
 859         return 0;
 860 }
 861 
 862 
 863 
 864 
 865 
 866 
 867 
 868 
 869 
 870 
 871 
 872 
 873 
 874 
 875 static void
 876 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
 877 {
 878         struct lpfc_iocbq *rspiocbq;
 879         struct hbq_dmabuf *dmabuf;
 880         struct lpfc_cq_event *cq_event;
 881 
 882         spin_lock_irq(&phba->hbalock);
 883         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
 884         spin_unlock_irq(&phba->hbalock);
 885 
 886         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
 887                 
 888                 spin_lock_irq(&phba->hbalock);
 889                 list_remove_head(&phba->sli4_hba.sp_queue_event,
 890                                  cq_event, struct lpfc_cq_event, list);
 891                 spin_unlock_irq(&phba->hbalock);
 892 
 893                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
 894                 case CQE_CODE_COMPL_WQE:
 895                         rspiocbq = container_of(cq_event, struct lpfc_iocbq,
 896                                                  cq_event);
 897                         lpfc_sli_release_iocbq(phba, rspiocbq);
 898                         break;
 899                 case CQE_CODE_RECEIVE:
 900                 case CQE_CODE_RECEIVE_V1:
 901                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
 902                                               cq_event);
 903                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
 904                 }
 905         }
 906 }
 907 
 908 
 909 
 910 
 911 
 912 
 913 
 914 
 915 
 916 
 917 
 918 
 919 static void
 920 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
 921 {
 922         struct lpfc_sli *psli = &phba->sli;
 923         struct lpfc_sli_ring *pring;
 924         struct lpfc_dmabuf *mp, *next_mp;
 925         LIST_HEAD(buflist);
 926         int count;
 927 
 928         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
 929                 lpfc_sli_hbqbuf_free_all(phba);
 930         else {
 931                 
 932                 pring = &psli->sli3_ring[LPFC_ELS_RING];
 933                 spin_lock_irq(&phba->hbalock);
 934                 list_splice_init(&pring->postbufq, &buflist);
 935                 spin_unlock_irq(&phba->hbalock);
 936 
 937                 count = 0;
 938                 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
 939                         list_del(&mp->list);
 940                         count++;
 941                         lpfc_mbuf_free(phba, mp->virt, mp->phys);
 942                         kfree(mp);
 943                 }
 944 
 945                 spin_lock_irq(&phba->hbalock);
 946                 pring->postbufq_cnt -= count;
 947                 spin_unlock_irq(&phba->hbalock);
 948         }
 949 }
 950 
 951 
 952 
 953 
 954 
 955 
 956 
 957 
 958 
 959 
 960 
 961 static void
 962 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
 963 {
 964         struct lpfc_sli *psli = &phba->sli;
 965         struct lpfc_queue *qp = NULL;
 966         struct lpfc_sli_ring *pring;
 967         LIST_HEAD(completions);
 968         int i;
 969         struct lpfc_iocbq *piocb, *next_iocb;
 970 
 971         if (phba->sli_rev != LPFC_SLI_REV4) {
 972                 for (i = 0; i < psli->num_rings; i++) {
 973                         pring = &psli->sli3_ring[i];
 974                         spin_lock_irq(&phba->hbalock);
 975                         
 976 
 977 
 978 
 979                         list_splice_init(&pring->txcmplq, &completions);
 980                         pring->txcmplq_cnt = 0;
 981                         spin_unlock_irq(&phba->hbalock);
 982 
 983                         lpfc_sli_abort_iocb_ring(phba, pring);
 984                 }
 985                 
 986                 lpfc_sli_cancel_iocbs(phba, &completions,
 987                                       IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
 988                 return;
 989         }
 990         list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
 991                 pring = qp->pring;
 992                 if (!pring)
 993                         continue;
 994                 spin_lock_irq(&pring->ring_lock);
 995                 list_for_each_entry_safe(piocb, next_iocb,
 996                                          &pring->txcmplq, list)
 997                         piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
 998                 list_splice_init(&pring->txcmplq, &completions);
 999                 pring->txcmplq_cnt = 0;
1000                 spin_unlock_irq(&pring->ring_lock);
1001                 lpfc_sli_abort_iocb_ring(phba, pring);
1002         }
1003         
1004         lpfc_sli_cancel_iocbs(phba, &completions,
1005                               IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1006 }
1007 
1008 
1009 
1010 
1011 
1012 
1013 
1014 
1015 
1016 
1017 
1018 
1019 
1020 static int
1021 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1022 {
1023         lpfc_hba_free_post_buf(phba);
1024         lpfc_hba_clean_txcmplq(phba);
1025         return 0;
1026 }
1027 
1028 
1029 
1030 
1031 
1032 
1033 
1034 
1035 
1036 
1037 
1038 
1039 static int
1040 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1041 {
1042         struct lpfc_io_buf *psb, *psb_next;
1043         struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
1044         struct lpfc_sli4_hdw_queue *qp;
1045         LIST_HEAD(aborts);
1046         LIST_HEAD(nvme_aborts);
1047         LIST_HEAD(nvmet_aborts);
1048         struct lpfc_sglq *sglq_entry = NULL;
1049         int cnt, idx;
1050 
1051 
1052         lpfc_sli_hbqbuf_free_all(phba);
1053         lpfc_hba_clean_txcmplq(phba);
1054 
1055         
1056 
1057 
1058 
1059 
1060 
1061         spin_lock_irq(&phba->hbalock);  
1062                                         
1063         
1064 
1065 
1066         spin_lock(&phba->sli4_hba.sgl_list_lock);
1067         list_for_each_entry(sglq_entry,
1068                 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1069                 sglq_entry->state = SGL_FREED;
1070 
1071         list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1072                         &phba->sli4_hba.lpfc_els_sgl_list);
1073 
1074 
1075         spin_unlock(&phba->sli4_hba.sgl_list_lock);
1076 
1077         
1078 
1079 
1080         cnt = 0;
1081         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1082                 qp = &phba->sli4_hba.hdwq[idx];
1083 
1084                 spin_lock(&qp->abts_io_buf_list_lock);
1085                 list_splice_init(&qp->lpfc_abts_io_buf_list,
1086                                  &aborts);
1087 
1088                 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1089                         psb->pCmd = NULL;
1090                         psb->status = IOSTAT_SUCCESS;
1091                         cnt++;
1092                 }
1093                 spin_lock(&qp->io_buf_list_put_lock);
1094                 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1095                 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1096                 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1097                 qp->abts_scsi_io_bufs = 0;
1098                 qp->abts_nvme_io_bufs = 0;
1099                 spin_unlock(&qp->io_buf_list_put_lock);
1100                 spin_unlock(&qp->abts_io_buf_list_lock);
1101         }
1102         spin_unlock_irq(&phba->hbalock);
1103 
1104         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1105                 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1106                 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1107                                  &nvmet_aborts);
1108                 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1109                 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1110                         ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
1111                         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1112                 }
1113         }
1114 
1115         lpfc_sli4_free_sp_events(phba);
1116         return cnt;
1117 }
1118 
1119 
1120 
1121 
1122 
1123 
1124 
1125 
1126 
1127 
1128 
1129 
1130 int
1131 lpfc_hba_down_post(struct lpfc_hba *phba)
1132 {
1133         return (*phba->lpfc_hba_down_post)(phba);
1134 }
1135 
1136 
1137 
1138 
1139 
1140 
1141 
1142 
1143 
1144 
1145 
1146 
1147 
1148 static void
1149 lpfc_hb_timeout(struct timer_list *t)
1150 {
1151         struct lpfc_hba *phba;
1152         uint32_t tmo_posted;
1153         unsigned long iflag;
1154 
1155         phba = from_timer(phba, t, hb_tmofunc);
1156 
1157         
1158         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1159         tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1160         if (!tmo_posted)
1161                 phba->pport->work_port_events |= WORKER_HB_TMO;
1162         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1163 
1164         
1165         if (!tmo_posted)
1166                 lpfc_worker_wake_up(phba);
1167         return;
1168 }
1169 
1170 
1171 
1172 
1173 
1174 
1175 
1176 
1177 
1178 
1179 
1180 
1181 
1182 static void
1183 lpfc_rrq_timeout(struct timer_list *t)
1184 {
1185         struct lpfc_hba *phba;
1186         unsigned long iflag;
1187 
1188         phba = from_timer(phba, t, rrq_tmr);
1189         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1190         if (!(phba->pport->load_flag & FC_UNLOADING))
1191                 phba->hba_flag |= HBA_RRQ_ACTIVE;
1192         else
1193                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1194         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1195 
1196         if (!(phba->pport->load_flag & FC_UNLOADING))
1197                 lpfc_worker_wake_up(phba);
1198 }
1199 
1200 
1201 
1202 
1203 
1204 
1205 
1206 
1207 
1208 
1209 
1210 
1211 
1212 
1213 
1214 
1215 
1216 static void
1217 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1218 {
1219         unsigned long drvr_flag;
1220 
1221         spin_lock_irqsave(&phba->hbalock, drvr_flag);
1222         phba->hb_outstanding = 0;
1223         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1224 
1225         
1226         mempool_free(pmboxq, phba->mbox_mem_pool);
1227         if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1228                 !(phba->link_state == LPFC_HBA_ERROR) &&
1229                 !(phba->pport->load_flag & FC_UNLOADING))
1230                 mod_timer(&phba->hb_tmofunc,
1231                           jiffies +
1232                           msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1233         return;
1234 }
1235 
1236 static void
1237 lpfc_hb_eq_delay_work(struct work_struct *work)
1238 {
1239         struct lpfc_hba *phba = container_of(to_delayed_work(work),
1240                                              struct lpfc_hba, eq_delay_work);
1241         struct lpfc_eq_intr_info *eqi, *eqi_new;
1242         struct lpfc_queue *eq, *eq_next;
1243         unsigned char *eqcnt = NULL;
1244         uint32_t usdelay;
1245         int i;
1246         bool update = false;
1247 
1248         if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1249                 return;
1250 
1251         if (phba->link_state == LPFC_HBA_ERROR ||
1252             phba->pport->fc_flag & FC_OFFLINE_MODE)
1253                 goto requeue;
1254 
1255         eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char),
1256                         GFP_KERNEL);
1257         if (!eqcnt)
1258                 goto requeue;
1259 
1260         if (phba->cfg_irq_chann > 1) {
1261                 
1262                 for (i = 0; i < phba->cfg_irq_chann; i++) {
1263                         
1264                         eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1265                         if (!eq)
1266                                 continue;
1267                         if (eq->q_mode) {
1268                                 update = true;
1269                                 break;
1270                         }
1271                         if (eqcnt[eq->last_cpu] < 2)
1272                                 eqcnt[eq->last_cpu]++;
1273                 }
1274         } else
1275                 update = true;
1276 
1277         for_each_present_cpu(i) {
1278                 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1279                 if (!update && eqcnt[i] < 2) {
1280                         eqi->icnt = 0;
1281                         continue;
1282                 }
1283 
1284                 usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
1285                            LPFC_EQ_DELAY_STEP;
1286                 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1287                         usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1288 
1289                 eqi->icnt = 0;
1290 
1291                 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1292                         if (eq->last_cpu != i) {
1293                                 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1294                                                       eq->last_cpu);
1295                                 list_move_tail(&eq->cpu_list, &eqi_new->list);
1296                                 continue;
1297                         }
1298                         if (usdelay != eq->q_mode)
1299                                 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1300                                                          usdelay);
1301                 }
1302         }
1303 
1304         kfree(eqcnt);
1305 
1306 requeue:
1307         queue_delayed_work(phba->wq, &phba->eq_delay_work,
1308                            msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1309 }
1310 
1311 
1312 
1313 
1314 
1315 
1316 
1317 
1318 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1319 {
1320         u32 i;
1321         u32 hwq_count;
1322 
1323         hwq_count = phba->cfg_hdw_queue;
1324         for (i = 0; i < hwq_count; i++) {
1325                 
1326                 lpfc_adjust_pvt_pool_count(phba, i);
1327 
1328                 
1329                 lpfc_adjust_high_watermark(phba, i);
1330 
1331 #ifdef LPFC_MXP_STAT
1332                 
1333                 lpfc_snapshot_mxp(phba, i);
1334 #endif
1335         }
1336 }
1337 
1338 
1339 
1340 
1341 
1342 
1343 
1344 
1345 
1346 
1347 
1348 
1349 
1350 
1351 
1352 
1353 
1354 void
1355 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1356 {
1357         struct lpfc_vport **vports;
1358         LPFC_MBOXQ_t *pmboxq;
1359         struct lpfc_dmabuf *buf_ptr;
1360         int retval, i;
1361         struct lpfc_sli *psli = &phba->sli;
1362         LIST_HEAD(completions);
1363 
1364         if (phba->cfg_xri_rebalancing) {
1365                 
1366                 lpfc_hb_mxp_handler(phba);
1367         }
1368 
1369         vports = lpfc_create_vport_work_array(phba);
1370         if (vports != NULL)
1371                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1372                         lpfc_rcv_seq_check_edtov(vports[i]);
1373                         lpfc_fdmi_change_check(vports[i]);
1374                 }
1375         lpfc_destroy_vport_work_array(phba, vports);
1376 
1377         if ((phba->link_state == LPFC_HBA_ERROR) ||
1378                 (phba->pport->load_flag & FC_UNLOADING) ||
1379                 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1380                 return;
1381 
1382         spin_lock_irq(&phba->pport->work_port_lock);
1383 
1384         if (time_after(phba->last_completion_time +
1385                         msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1386                         jiffies)) {
1387                 spin_unlock_irq(&phba->pport->work_port_lock);
1388                 if (!phba->hb_outstanding)
1389                         mod_timer(&phba->hb_tmofunc,
1390                                 jiffies +
1391                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1392                 else
1393                         mod_timer(&phba->hb_tmofunc,
1394                                 jiffies +
1395                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1396                 return;
1397         }
1398         spin_unlock_irq(&phba->pport->work_port_lock);
1399 
1400         if (phba->elsbuf_cnt &&
1401                 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1402                 spin_lock_irq(&phba->hbalock);
1403                 list_splice_init(&phba->elsbuf, &completions);
1404                 phba->elsbuf_cnt = 0;
1405                 phba->elsbuf_prev_cnt = 0;
1406                 spin_unlock_irq(&phba->hbalock);
1407 
1408                 while (!list_empty(&completions)) {
1409                         list_remove_head(&completions, buf_ptr,
1410                                 struct lpfc_dmabuf, list);
1411                         lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1412                         kfree(buf_ptr);
1413                 }
1414         }
1415         phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1416 
1417         
1418         if (phba->cfg_enable_hba_heartbeat) {
1419                 if (!phba->hb_outstanding) {
1420                         if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1421                                 (list_empty(&psli->mboxq))) {
1422                                 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1423                                                         GFP_KERNEL);
1424                                 if (!pmboxq) {
1425                                         mod_timer(&phba->hb_tmofunc,
1426                                                  jiffies +
1427                                                  msecs_to_jiffies(1000 *
1428                                                  LPFC_HB_MBOX_INTERVAL));
1429                                         return;
1430                                 }
1431 
1432                                 lpfc_heart_beat(phba, pmboxq);
1433                                 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1434                                 pmboxq->vport = phba->pport;
1435                                 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1436                                                 MBX_NOWAIT);
1437 
1438                                 if (retval != MBX_BUSY &&
1439                                         retval != MBX_SUCCESS) {
1440                                         mempool_free(pmboxq,
1441                                                         phba->mbox_mem_pool);
1442                                         mod_timer(&phba->hb_tmofunc,
1443                                                 jiffies +
1444                                                 msecs_to_jiffies(1000 *
1445                                                 LPFC_HB_MBOX_INTERVAL));
1446                                         return;
1447                                 }
1448                                 phba->skipped_hb = 0;
1449                                 phba->hb_outstanding = 1;
1450                         } else if (time_before_eq(phba->last_completion_time,
1451                                         phba->skipped_hb)) {
1452                                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1453                                         "2857 Last completion time not "
1454                                         " updated in %d ms\n",
1455                                         jiffies_to_msecs(jiffies
1456                                                  - phba->last_completion_time));
1457                         } else
1458                                 phba->skipped_hb = jiffies;
1459 
1460                         mod_timer(&phba->hb_tmofunc,
1461                                  jiffies +
1462                                  msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1463                         return;
1464                 } else {
1465                         
1466 
1467 
1468 
1469 
1470                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1471                                         "0459 Adapter heartbeat still out"
1472                                         "standing:last compl time was %d ms.\n",
1473                                         jiffies_to_msecs(jiffies
1474                                                  - phba->last_completion_time));
1475                         mod_timer(&phba->hb_tmofunc,
1476                                 jiffies +
1477                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1478                 }
1479         } else {
1480                         mod_timer(&phba->hb_tmofunc,
1481                                 jiffies +
1482                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1483         }
1484 }
1485 
1486 
1487 
1488 
1489 
1490 
1491 
1492 
1493 static void
1494 lpfc_offline_eratt(struct lpfc_hba *phba)
1495 {
1496         struct lpfc_sli   *psli = &phba->sli;
1497 
1498         spin_lock_irq(&phba->hbalock);
1499         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1500         spin_unlock_irq(&phba->hbalock);
1501         lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1502 
1503         lpfc_offline(phba);
1504         lpfc_reset_barrier(phba);
1505         spin_lock_irq(&phba->hbalock);
1506         lpfc_sli_brdreset(phba);
1507         spin_unlock_irq(&phba->hbalock);
1508         lpfc_hba_down_post(phba);
1509         lpfc_sli_brdready(phba, HS_MBRDY);
1510         lpfc_unblock_mgmt_io(phba);
1511         phba->link_state = LPFC_HBA_ERROR;
1512         return;
1513 }
1514 
1515 
1516 
1517 
1518 
1519 
1520 
1521 
1522 void
1523 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1524 {
1525         spin_lock_irq(&phba->hbalock);
1526         phba->link_state = LPFC_HBA_ERROR;
1527         spin_unlock_irq(&phba->hbalock);
1528 
1529         lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1530         lpfc_sli_flush_io_rings(phba);
1531         lpfc_offline(phba);
1532         lpfc_hba_down_post(phba);
1533         lpfc_unblock_mgmt_io(phba);
1534 }
1535 
1536 
1537 
1538 
1539 
1540 
1541 
1542 
1543 
1544 
1545 static void
1546 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1547 {
1548         uint32_t old_host_status = phba->work_hs;
1549         struct lpfc_sli *psli = &phba->sli;
1550 
1551         
1552 
1553 
1554         if (pci_channel_offline(phba->pcidev)) {
1555                 spin_lock_irq(&phba->hbalock);
1556                 phba->hba_flag &= ~DEFER_ERATT;
1557                 spin_unlock_irq(&phba->hbalock);
1558                 return;
1559         }
1560 
1561         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1562                 "0479 Deferred Adapter Hardware Error "
1563                 "Data: x%x x%x x%x\n",
1564                 phba->work_hs,
1565                 phba->work_status[0], phba->work_status[1]);
1566 
1567         spin_lock_irq(&phba->hbalock);
1568         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1569         spin_unlock_irq(&phba->hbalock);
1570 
1571 
1572         
1573 
1574 
1575 
1576 
1577         lpfc_sli_abort_fcp_rings(phba);
1578 
1579         
1580 
1581 
1582 
1583         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1584         lpfc_offline(phba);
1585 
1586         
1587         while (phba->work_hs & HS_FFER1) {
1588                 msleep(100);
1589                 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1590                         phba->work_hs = UNPLUG_ERR ;
1591                         break;
1592                 }
1593                 
1594                 if (phba->pport->load_flag & FC_UNLOADING) {
1595                         phba->work_hs = 0;
1596                         break;
1597                 }
1598         }
1599 
1600         
1601 
1602 
1603 
1604 
1605         if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1606                 phba->work_hs = old_host_status & ~HS_FFER1;
1607 
1608         spin_lock_irq(&phba->hbalock);
1609         phba->hba_flag &= ~DEFER_ERATT;
1610         spin_unlock_irq(&phba->hbalock);
1611         phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1612         phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1613 }
1614 
1615 static void
1616 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1617 {
1618         struct lpfc_board_event_header board_event;
1619         struct Scsi_Host *shost;
1620 
1621         board_event.event_type = FC_REG_BOARD_EVENT;
1622         board_event.subcategory = LPFC_EVENT_PORTINTERR;
1623         shost = lpfc_shost_from_vport(phba->pport);
1624         fc_host_post_vendor_event(shost, fc_get_event_number(),
1625                                   sizeof(board_event),
1626                                   (char *) &board_event,
1627                                   LPFC_NL_VENDOR_ID);
1628 }
1629 
1630 
1631 
1632 
1633 
1634 
1635 
1636 
1637 
1638 
1639 
1640 static void
1641 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1642 {
1643         struct lpfc_vport *vport = phba->pport;
1644         struct lpfc_sli   *psli = &phba->sli;
1645         uint32_t event_data;
1646         unsigned long temperature;
1647         struct temp_event temp_event_data;
1648         struct Scsi_Host  *shost;
1649 
1650         
1651 
1652 
1653         if (pci_channel_offline(phba->pcidev)) {
1654                 spin_lock_irq(&phba->hbalock);
1655                 phba->hba_flag &= ~DEFER_ERATT;
1656                 spin_unlock_irq(&phba->hbalock);
1657                 return;
1658         }
1659 
1660         
1661         if (!phba->cfg_enable_hba_reset)
1662                 return;
1663 
1664         
1665         lpfc_board_errevt_to_mgmt(phba);
1666 
1667         if (phba->hba_flag & DEFER_ERATT)
1668                 lpfc_handle_deferred_eratt(phba);
1669 
1670         if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1671                 if (phba->work_hs & HS_FFER6)
1672                         
1673                         lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1674                                         "1301 Re-establishing Link "
1675                                         "Data: x%x x%x x%x\n",
1676                                         phba->work_hs, phba->work_status[0],
1677                                         phba->work_status[1]);
1678                 if (phba->work_hs & HS_FFER8)
1679                         
1680                         lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1681                                         "2861 Host Authentication device "
1682                                         "zeroization Data:x%x x%x x%x\n",
1683                                         phba->work_hs, phba->work_status[0],
1684                                         phba->work_status[1]);
1685 
1686                 spin_lock_irq(&phba->hbalock);
1687                 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1688                 spin_unlock_irq(&phba->hbalock);
1689 
1690                 
1691 
1692 
1693 
1694 
1695 
1696                 lpfc_sli_abort_fcp_rings(phba);
1697 
1698                 
1699 
1700 
1701 
1702                 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1703                 lpfc_offline(phba);
1704                 lpfc_sli_brdrestart(phba);
1705                 if (lpfc_online(phba) == 0) {   
1706                         lpfc_unblock_mgmt_io(phba);
1707                         return;
1708                 }
1709                 lpfc_unblock_mgmt_io(phba);
1710         } else if (phba->work_hs & HS_CRIT_TEMP) {
1711                 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1712                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1713                 temp_event_data.event_code = LPFC_CRIT_TEMP;
1714                 temp_event_data.data = (uint32_t)temperature;
1715 
1716                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1717                                 "0406 Adapter maximum temperature exceeded "
1718                                 "(%ld), taking this port offline "
1719                                 "Data: x%x x%x x%x\n",
1720                                 temperature, phba->work_hs,
1721                                 phba->work_status[0], phba->work_status[1]);
1722 
1723                 shost = lpfc_shost_from_vport(phba->pport);
1724                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1725                                           sizeof(temp_event_data),
1726                                           (char *) &temp_event_data,
1727                                           SCSI_NL_VID_TYPE_PCI
1728                                           | PCI_VENDOR_ID_EMULEX);
1729 
1730                 spin_lock_irq(&phba->hbalock);
1731                 phba->over_temp_state = HBA_OVER_TEMP;
1732                 spin_unlock_irq(&phba->hbalock);
1733                 lpfc_offline_eratt(phba);
1734 
1735         } else {
1736                 
1737 
1738 
1739 
1740                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1741                                 "0457 Adapter Hardware Error "
1742                                 "Data: x%x x%x x%x\n",
1743                                 phba->work_hs,
1744                                 phba->work_status[0], phba->work_status[1]);
1745 
1746                 event_data = FC_REG_DUMP_EVENT;
1747                 shost = lpfc_shost_from_vport(vport);
1748                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1749                                 sizeof(event_data), (char *) &event_data,
1750                                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1751 
1752                 lpfc_offline_eratt(phba);
1753         }
1754         return;
1755 }
1756 
1757 
1758 
1759 
1760 
1761 
1762 
1763 
1764 
1765 
1766 
1767 
1768 static int
1769 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1770                             bool en_rn_msg)
1771 {
1772         int rc;
1773         uint32_t intr_mode;
1774 
1775         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1776             LPFC_SLI_INTF_IF_TYPE_2) {
1777                 
1778 
1779 
1780 
1781                 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1782                 if (rc)
1783                         return rc;
1784         }
1785 
1786         
1787         if (en_rn_msg)
1788                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1789                                 "2887 Reset Needed: Attempting Port "
1790                                 "Recovery...\n");
1791         lpfc_offline_prep(phba, mbx_action);
1792         lpfc_sli_flush_io_rings(phba);
1793         lpfc_offline(phba);
1794         
1795         lpfc_sli4_disable_intr(phba);
1796         rc = lpfc_sli_brdrestart(phba);
1797         if (rc) {
1798                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1799                                 "6309 Failed to restart board\n");
1800                 return rc;
1801         }
1802         
1803         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1804         if (intr_mode == LPFC_INTR_ERROR) {
1805                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1806                                 "3175 Failed to enable interrupt\n");
1807                 return -EIO;
1808         }
1809         phba->intr_mode = intr_mode;
1810         rc = lpfc_online(phba);
1811         if (rc == 0)
1812                 lpfc_unblock_mgmt_io(phba);
1813 
1814         return rc;
1815 }
1816 
1817 
1818 
1819 
1820 
1821 
1822 
1823 
1824 static void
1825 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1826 {
1827         struct lpfc_vport *vport = phba->pport;
1828         uint32_t event_data;
1829         struct Scsi_Host *shost;
1830         uint32_t if_type;
1831         struct lpfc_register portstat_reg = {0};
1832         uint32_t reg_err1, reg_err2;
1833         uint32_t uerrlo_reg, uemasklo_reg;
1834         uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1835         bool en_rn_msg = true;
1836         struct temp_event temp_event_data;
1837         struct lpfc_register portsmphr_reg;
1838         int rc, i;
1839 
1840         
1841 
1842 
1843         if (pci_channel_offline(phba->pcidev)) {
1844                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1845                                 "3166 pci channel is offline\n");
1846                 lpfc_sli4_offline_eratt(phba);
1847                 return;
1848         }
1849 
1850         memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1851         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1852         switch (if_type) {
1853         case LPFC_SLI_INTF_IF_TYPE_0:
1854                 pci_rd_rc1 = lpfc_readl(
1855                                 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1856                                 &uerrlo_reg);
1857                 pci_rd_rc2 = lpfc_readl(
1858                                 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1859                                 &uemasklo_reg);
1860                 
1861                 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1862                         return;
1863                 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1864                         lpfc_sli4_offline_eratt(phba);
1865                         return;
1866                 }
1867                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1868                                 "7623 Checking UE recoverable");
1869 
1870                 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1871                         if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1872                                        &portsmphr_reg.word0))
1873                                 continue;
1874 
1875                         smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1876                                                    &portsmphr_reg);
1877                         if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1878                             LPFC_PORT_SEM_UE_RECOVERABLE)
1879                                 break;
1880                         
1881                         msleep(1000);
1882                 }
1883 
1884                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1885                                 "4827 smphr_port_status x%x : Waited %dSec",
1886                                 smphr_port_status, i);
1887 
1888                 
1889                 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1890                     LPFC_PORT_SEM_UE_RECOVERABLE) {
1891                         for (i = 0; i < 20; i++) {
1892                                 msleep(1000);
1893                                 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1894                                     &portsmphr_reg.word0) &&
1895                                     (LPFC_POST_STAGE_PORT_READY ==
1896                                      bf_get(lpfc_port_smphr_port_status,
1897                                      &portsmphr_reg))) {
1898                                         rc = lpfc_sli4_port_sta_fn_reset(phba,
1899                                                 LPFC_MBX_NO_WAIT, en_rn_msg);
1900                                         if (rc == 0)
1901                                                 return;
1902                                         lpfc_printf_log(phba,
1903                                                 KERN_ERR, LOG_INIT,
1904                                                 "4215 Failed to recover UE");
1905                                         break;
1906                                 }
1907                         }
1908                 }
1909                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1910                                 "7624 Firmware not ready: Failing UE recovery,"
1911                                 " waited %dSec", i);
1912                 phba->link_state = LPFC_HBA_ERROR;
1913                 break;
1914 
1915         case LPFC_SLI_INTF_IF_TYPE_2:
1916         case LPFC_SLI_INTF_IF_TYPE_6:
1917                 pci_rd_rc1 = lpfc_readl(
1918                                 phba->sli4_hba.u.if_type2.STATUSregaddr,
1919                                 &portstat_reg.word0);
1920                 
1921                 if (pci_rd_rc1 == -EIO) {
1922                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1923                                 "3151 PCI bus read access failure: x%x\n",
1924                                 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1925                         lpfc_sli4_offline_eratt(phba);
1926                         return;
1927                 }
1928                 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1929                 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1930                 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1931                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1932                                 "2889 Port Overtemperature event, "
1933                                 "taking port offline Data: x%x x%x\n",
1934                                 reg_err1, reg_err2);
1935 
1936                         phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
1937                         temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1938                         temp_event_data.event_code = LPFC_CRIT_TEMP;
1939                         temp_event_data.data = 0xFFFFFFFF;
1940 
1941                         shost = lpfc_shost_from_vport(phba->pport);
1942                         fc_host_post_vendor_event(shost, fc_get_event_number(),
1943                                                   sizeof(temp_event_data),
1944                                                   (char *)&temp_event_data,
1945                                                   SCSI_NL_VID_TYPE_PCI
1946                                                   | PCI_VENDOR_ID_EMULEX);
1947 
1948                         spin_lock_irq(&phba->hbalock);
1949                         phba->over_temp_state = HBA_OVER_TEMP;
1950                         spin_unlock_irq(&phba->hbalock);
1951                         lpfc_sli4_offline_eratt(phba);
1952                         return;
1953                 }
1954                 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1955                     reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
1956                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1957                                         "3143 Port Down: Firmware Update "
1958                                         "Detected\n");
1959                         en_rn_msg = false;
1960                 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1961                          reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1962                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1963                                         "3144 Port Down: Debug Dump\n");
1964                 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1965                          reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1966                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1967                                         "3145 Port Down: Provisioning\n");
1968 
1969                 
1970                 if (!phba->cfg_enable_hba_reset)
1971                         return;
1972 
1973                 
1974                 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1975                                 en_rn_msg);
1976                 if (rc == 0) {
1977                         
1978                         if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1979                             reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1980                                 return;
1981                         else
1982                                 break;
1983                 }
1984                 
1985                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1986                                 "3152 Unrecoverable error\n");
1987                 phba->link_state = LPFC_HBA_ERROR;
1988                 break;
1989         case LPFC_SLI_INTF_IF_TYPE_1:
1990         default:
1991                 break;
1992         }
1993         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1994                         "3123 Report dump event to upper layer\n");
1995         
1996         lpfc_board_errevt_to_mgmt(phba);
1997 
1998         event_data = FC_REG_DUMP_EVENT;
1999         shost = lpfc_shost_from_vport(vport);
2000         fc_host_post_vendor_event(shost, fc_get_event_number(),
2001                                   sizeof(event_data), (char *) &event_data,
2002                                   SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2003 }
2004 
2005 
2006 
2007 
2008 
2009 
2010 
2011 
2012 
2013 
2014 
2015 
2016 void
2017 lpfc_handle_eratt(struct lpfc_hba *phba)
2018 {
2019         (*phba->lpfc_handle_eratt)(phba);
2020 }
2021 
2022 
2023 
2024 
2025 
2026 
2027 
2028 
2029 void
2030 lpfc_handle_latt(struct lpfc_hba *phba)
2031 {
2032         struct lpfc_vport *vport = phba->pport;
2033         struct lpfc_sli   *psli = &phba->sli;
2034         LPFC_MBOXQ_t *pmb;
2035         volatile uint32_t control;
2036         struct lpfc_dmabuf *mp;
2037         int rc = 0;
2038 
2039         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2040         if (!pmb) {
2041                 rc = 1;
2042                 goto lpfc_handle_latt_err_exit;
2043         }
2044 
2045         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2046         if (!mp) {
2047                 rc = 2;
2048                 goto lpfc_handle_latt_free_pmb;
2049         }
2050 
2051         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2052         if (!mp->virt) {
2053                 rc = 3;
2054                 goto lpfc_handle_latt_free_mp;
2055         }
2056 
2057         
2058         lpfc_els_flush_all_cmd(phba);
2059 
2060         psli->slistat.link_event++;
2061         lpfc_read_topology(phba, pmb, mp);
2062         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2063         pmb->vport = vport;
2064         
2065         phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2066         rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2067         if (rc == MBX_NOT_FINISHED) {
2068                 rc = 4;
2069                 goto lpfc_handle_latt_free_mbuf;
2070         }
2071 
2072         
2073         spin_lock_irq(&phba->hbalock);
2074         writel(HA_LATT, phba->HAregaddr);
2075         readl(phba->HAregaddr); 
2076         spin_unlock_irq(&phba->hbalock);
2077 
2078         return;
2079 
2080 lpfc_handle_latt_free_mbuf:
2081         phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2082         lpfc_mbuf_free(phba, mp->virt, mp->phys);
2083 lpfc_handle_latt_free_mp:
2084         kfree(mp);
2085 lpfc_handle_latt_free_pmb:
2086         mempool_free(pmb, phba->mbox_mem_pool);
2087 lpfc_handle_latt_err_exit:
2088         
2089         spin_lock_irq(&phba->hbalock);
2090         psli->sli_flag |= LPFC_PROCESS_LA;
2091         control = readl(phba->HCregaddr);
2092         control |= HC_LAINT_ENA;
2093         writel(control, phba->HCregaddr);
2094         readl(phba->HCregaddr); 
2095 
2096         
2097         writel(HA_LATT, phba->HAregaddr);
2098         readl(phba->HAregaddr); 
2099         spin_unlock_irq(&phba->hbalock);
2100         lpfc_linkdown(phba);
2101         phba->link_state = LPFC_HBA_ERROR;
2102 
2103         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2104                      "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2105 
2106         return;
2107 }
2108 
2109 
2110 
2111 
2112 
2113 
2114 
2115 
2116 
2117 
2118 
2119 
2120 
2121 
2122 
2123 int
2124 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2125 {
2126         uint8_t lenlo, lenhi;
2127         int Length;
2128         int i, j;
2129         int finished = 0;
2130         int index = 0;
2131 
2132         if (!vpd)
2133                 return 0;
2134 
2135         
2136         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2137                         "0455 Vital Product Data: x%x x%x x%x x%x\n",
2138                         (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2139                         (uint32_t) vpd[3]);
2140         while (!finished && (index < (len - 4))) {
2141                 switch (vpd[index]) {
2142                 case 0x82:
2143                 case 0x91:
2144                         index += 1;
2145                         lenlo = vpd[index];
2146                         index += 1;
2147                         lenhi = vpd[index];
2148                         index += 1;
2149                         i = ((((unsigned short)lenhi) << 8) + lenlo);
2150                         index += i;
2151                         break;
2152                 case 0x90:
2153                         index += 1;
2154                         lenlo = vpd[index];
2155                         index += 1;
2156                         lenhi = vpd[index];
2157                         index += 1;
2158                         Length = ((((unsigned short)lenhi) << 8) + lenlo);
2159                         if (Length > len - index)
2160                                 Length = len - index;
2161                         while (Length > 0) {
2162                         
2163                         if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2164                                 index += 2;
2165                                 i = vpd[index];
2166                                 index += 1;
2167                                 j = 0;
2168                                 Length -= (3+i);
2169                                 while(i--) {
2170                                         phba->SerialNumber[j++] = vpd[index++];
2171                                         if (j == 31)
2172                                                 break;
2173                                 }
2174                                 phba->SerialNumber[j] = 0;
2175                                 continue;
2176                         }
2177                         else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2178                                 phba->vpd_flag |= VPD_MODEL_DESC;
2179                                 index += 2;
2180                                 i = vpd[index];
2181                                 index += 1;
2182                                 j = 0;
2183                                 Length -= (3+i);
2184                                 while(i--) {
2185                                         phba->ModelDesc[j++] = vpd[index++];
2186                                         if (j == 255)
2187                                                 break;
2188                                 }
2189                                 phba->ModelDesc[j] = 0;
2190                                 continue;
2191                         }
2192                         else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2193                                 phba->vpd_flag |= VPD_MODEL_NAME;
2194                                 index += 2;
2195                                 i = vpd[index];
2196                                 index += 1;
2197                                 j = 0;
2198                                 Length -= (3+i);
2199                                 while(i--) {
2200                                         phba->ModelName[j++] = vpd[index++];
2201                                         if (j == 79)
2202                                                 break;
2203                                 }
2204                                 phba->ModelName[j] = 0;
2205                                 continue;
2206                         }
2207                         else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2208                                 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2209                                 index += 2;
2210                                 i = vpd[index];
2211                                 index += 1;
2212                                 j = 0;
2213                                 Length -= (3+i);
2214                                 while(i--) {
2215                                         phba->ProgramType[j++] = vpd[index++];
2216                                         if (j == 255)
2217                                                 break;
2218                                 }
2219                                 phba->ProgramType[j] = 0;
2220                                 continue;
2221                         }
2222                         else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2223                                 phba->vpd_flag |= VPD_PORT;
2224                                 index += 2;
2225                                 i = vpd[index];
2226                                 index += 1;
2227                                 j = 0;
2228                                 Length -= (3+i);
2229                                 while(i--) {
2230                                         if ((phba->sli_rev == LPFC_SLI_REV4) &&
2231                                             (phba->sli4_hba.pport_name_sta ==
2232                                              LPFC_SLI4_PPNAME_GET)) {
2233                                                 j++;
2234                                                 index++;
2235                                         } else
2236                                                 phba->Port[j++] = vpd[index++];
2237                                         if (j == 19)
2238                                                 break;
2239                                 }
2240                                 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2241                                     (phba->sli4_hba.pport_name_sta ==
2242                                      LPFC_SLI4_PPNAME_NON))
2243                                         phba->Port[j] = 0;
2244                                 continue;
2245                         }
2246                         else {
2247                                 index += 2;
2248                                 i = vpd[index];
2249                                 index += 1;
2250                                 index += i;
2251                                 Length -= (3 + i);
2252                         }
2253                 }
2254                 finished = 0;
2255                 break;
2256                 case 0x78:
2257                         finished = 1;
2258                         break;
2259                 default:
2260                         index ++;
2261                         break;
2262                 }
2263         }
2264 
2265         return(1);
2266 }
2267 
2268 
2269 
2270 
2271 
2272 
2273 
2274 
2275 
2276 
2277 
2278 
2279 
2280 static void
2281 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2282 {
2283         lpfc_vpd_t *vp;
2284         uint16_t dev_id = phba->pcidev->device;
2285         int max_speed;
2286         int GE = 0;
2287         int oneConnect = 0; 
2288         struct {
2289                 char *name;
2290                 char *bus;
2291                 char *function;
2292         } m = {"<Unknown>", "", ""};
2293 
2294         if (mdp && mdp[0] != '\0'
2295                 && descp && descp[0] != '\0')
2296                 return;
2297 
2298         if (phba->lmt & LMT_64Gb)
2299                 max_speed = 64;
2300         else if (phba->lmt & LMT_32Gb)
2301                 max_speed = 32;
2302         else if (phba->lmt & LMT_16Gb)
2303                 max_speed = 16;
2304         else if (phba->lmt & LMT_10Gb)
2305                 max_speed = 10;
2306         else if (phba->lmt & LMT_8Gb)
2307                 max_speed = 8;
2308         else if (phba->lmt & LMT_4Gb)
2309                 max_speed = 4;
2310         else if (phba->lmt & LMT_2Gb)
2311                 max_speed = 2;
2312         else if (phba->lmt & LMT_1Gb)
2313                 max_speed = 1;
2314         else
2315                 max_speed = 0;
2316 
2317         vp = &phba->vpd;
2318 
2319         switch (dev_id) {
2320         case PCI_DEVICE_ID_FIREFLY:
2321                 m = (typeof(m)){"LP6000", "PCI",
2322                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2323                 break;
2324         case PCI_DEVICE_ID_SUPERFLY:
2325                 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2326                         m = (typeof(m)){"LP7000", "PCI", ""};
2327                 else
2328                         m = (typeof(m)){"LP7000E", "PCI", ""};
2329                 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2330                 break;
2331         case PCI_DEVICE_ID_DRAGONFLY:
2332                 m = (typeof(m)){"LP8000", "PCI",
2333                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2334                 break;
2335         case PCI_DEVICE_ID_CENTAUR:
2336                 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2337                         m = (typeof(m)){"LP9002", "PCI", ""};
2338                 else
2339                         m = (typeof(m)){"LP9000", "PCI", ""};
2340                 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2341                 break;
2342         case PCI_DEVICE_ID_RFLY:
2343                 m = (typeof(m)){"LP952", "PCI",
2344                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2345                 break;
2346         case PCI_DEVICE_ID_PEGASUS:
2347                 m = (typeof(m)){"LP9802", "PCI-X",
2348                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2349                 break;
2350         case PCI_DEVICE_ID_THOR:
2351                 m = (typeof(m)){"LP10000", "PCI-X",
2352                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2353                 break;
2354         case PCI_DEVICE_ID_VIPER:
2355                 m = (typeof(m)){"LPX1000",  "PCI-X",
2356                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2357                 break;
2358         case PCI_DEVICE_ID_PFLY:
2359                 m = (typeof(m)){"LP982", "PCI-X",
2360                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2361                 break;
2362         case PCI_DEVICE_ID_TFLY:
2363                 m = (typeof(m)){"LP1050", "PCI-X",
2364                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2365                 break;
2366         case PCI_DEVICE_ID_HELIOS:
2367                 m = (typeof(m)){"LP11000", "PCI-X2",
2368                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2369                 break;
2370         case PCI_DEVICE_ID_HELIOS_SCSP:
2371                 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2372                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2373                 break;
2374         case PCI_DEVICE_ID_HELIOS_DCSP:
2375                 m = (typeof(m)){"LP11002-SP",  "PCI-X2",
2376                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2377                 break;
2378         case PCI_DEVICE_ID_NEPTUNE:
2379                 m = (typeof(m)){"LPe1000", "PCIe",
2380                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2381                 break;
2382         case PCI_DEVICE_ID_NEPTUNE_SCSP:
2383                 m = (typeof(m)){"LPe1000-SP", "PCIe",
2384                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2385                 break;
2386         case PCI_DEVICE_ID_NEPTUNE_DCSP:
2387                 m = (typeof(m)){"LPe1002-SP", "PCIe",
2388                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2389                 break;
2390         case PCI_DEVICE_ID_BMID:
2391                 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2392                 break;
2393         case PCI_DEVICE_ID_BSMB:
2394                 m = (typeof(m)){"LP111", "PCI-X2",
2395                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2396                 break;
2397         case PCI_DEVICE_ID_ZEPHYR:
2398                 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2399                 break;
2400         case PCI_DEVICE_ID_ZEPHYR_SCSP:
2401                 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2402                 break;
2403         case PCI_DEVICE_ID_ZEPHYR_DCSP:
2404                 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2405                 GE = 1;
2406                 break;
2407         case PCI_DEVICE_ID_ZMID:
2408                 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2409                 break;
2410         case PCI_DEVICE_ID_ZSMB:
2411                 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2412                 break;
2413         case PCI_DEVICE_ID_LP101:
2414                 m = (typeof(m)){"LP101", "PCI-X",
2415                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2416                 break;
2417         case PCI_DEVICE_ID_LP10000S:
2418                 m = (typeof(m)){"LP10000-S", "PCI",
2419                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2420                 break;
2421         case PCI_DEVICE_ID_LP11000S:
2422                 m = (typeof(m)){"LP11000-S", "PCI-X2",
2423                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2424                 break;
2425         case PCI_DEVICE_ID_LPE11000S:
2426                 m = (typeof(m)){"LPe11000-S", "PCIe",
2427                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2428                 break;
2429         case PCI_DEVICE_ID_SAT:
2430                 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2431                 break;
2432         case PCI_DEVICE_ID_SAT_MID:
2433                 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2434                 break;
2435         case PCI_DEVICE_ID_SAT_SMB:
2436                 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2437                 break;
2438         case PCI_DEVICE_ID_SAT_DCSP:
2439                 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2440                 break;
2441         case PCI_DEVICE_ID_SAT_SCSP:
2442                 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2443                 break;
2444         case PCI_DEVICE_ID_SAT_S:
2445                 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2446                 break;
2447         case PCI_DEVICE_ID_HORNET:
2448                 m = (typeof(m)){"LP21000", "PCIe",
2449                                 "Obsolete, Unsupported FCoE Adapter"};
2450                 GE = 1;
2451                 break;
2452         case PCI_DEVICE_ID_PROTEUS_VF:
2453                 m = (typeof(m)){"LPev12000", "PCIe IOV",
2454                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2455                 break;
2456         case PCI_DEVICE_ID_PROTEUS_PF:
2457                 m = (typeof(m)){"LPev12000", "PCIe IOV",
2458                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2459                 break;
2460         case PCI_DEVICE_ID_PROTEUS_S:
2461                 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2462                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2463                 break;
2464         case PCI_DEVICE_ID_TIGERSHARK:
2465                 oneConnect = 1;
2466                 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2467                 break;
2468         case PCI_DEVICE_ID_TOMCAT:
2469                 oneConnect = 1;
2470                 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2471                 break;
2472         case PCI_DEVICE_ID_FALCON:
2473                 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2474                                 "EmulexSecure Fibre"};
2475                 break;
2476         case PCI_DEVICE_ID_BALIUS:
2477                 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2478                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2479                 break;
2480         case PCI_DEVICE_ID_LANCER_FC:
2481                 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2482                 break;
2483         case PCI_DEVICE_ID_LANCER_FC_VF:
2484                 m = (typeof(m)){"LPe16000", "PCIe",
2485                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2486                 break;
2487         case PCI_DEVICE_ID_LANCER_FCOE:
2488                 oneConnect = 1;
2489                 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2490                 break;
2491         case PCI_DEVICE_ID_LANCER_FCOE_VF:
2492                 oneConnect = 1;
2493                 m = (typeof(m)){"OCe15100", "PCIe",
2494                                 "Obsolete, Unsupported FCoE"};
2495                 break;
2496         case PCI_DEVICE_ID_LANCER_G6_FC:
2497                 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2498                 break;
2499         case PCI_DEVICE_ID_LANCER_G7_FC:
2500                 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2501                 break;
2502         case PCI_DEVICE_ID_SKYHAWK:
2503         case PCI_DEVICE_ID_SKYHAWK_VF:
2504                 oneConnect = 1;
2505                 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2506                 break;
2507         default:
2508                 m = (typeof(m)){"Unknown", "", ""};
2509                 break;
2510         }
2511 
2512         if (mdp && mdp[0] == '\0')
2513                 snprintf(mdp, 79,"%s", m.name);
2514         
2515 
2516 
2517 
2518         if (descp && descp[0] == '\0') {
2519                 if (oneConnect)
2520                         snprintf(descp, 255,
2521                                 "Emulex OneConnect %s, %s Initiator %s",
2522                                 m.name, m.function,
2523                                 phba->Port);
2524                 else if (max_speed == 0)
2525                         snprintf(descp, 255,
2526                                 "Emulex %s %s %s",
2527                                 m.name, m.bus, m.function);
2528                 else
2529                         snprintf(descp, 255,
2530                                 "Emulex %s %d%s %s %s",
2531                                 m.name, max_speed, (GE) ? "GE" : "Gb",
2532                                 m.bus, m.function);
2533         }
2534 }
2535 
2536 
2537 
2538 
2539 
2540 
2541 
2542 
2543 
2544 
2545 
2546 
2547 
2548 int
2549 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2550 {
2551         IOCB_t *icmd;
2552         struct lpfc_iocbq *iocb;
2553         struct lpfc_dmabuf *mp1, *mp2;
2554 
2555         cnt += pring->missbufcnt;
2556 
2557         
2558         while (cnt > 0) {
2559                 
2560                 iocb = lpfc_sli_get_iocbq(phba);
2561                 if (iocb == NULL) {
2562                         pring->missbufcnt = cnt;
2563                         return cnt;
2564                 }
2565                 icmd = &iocb->iocb;
2566 
2567                 
2568                 
2569                 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2570                 if (mp1)
2571                     mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2572                 if (!mp1 || !mp1->virt) {
2573                         kfree(mp1);
2574                         lpfc_sli_release_iocbq(phba, iocb);
2575                         pring->missbufcnt = cnt;
2576                         return cnt;
2577                 }
2578 
2579                 INIT_LIST_HEAD(&mp1->list);
2580                 
2581                 if (cnt > 1) {
2582                         mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2583                         if (mp2)
2584                                 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2585                                                             &mp2->phys);
2586                         if (!mp2 || !mp2->virt) {
2587                                 kfree(mp2);
2588                                 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2589                                 kfree(mp1);
2590                                 lpfc_sli_release_iocbq(phba, iocb);
2591                                 pring->missbufcnt = cnt;
2592                                 return cnt;
2593                         }
2594 
2595                         INIT_LIST_HEAD(&mp2->list);
2596                 } else {
2597                         mp2 = NULL;
2598                 }
2599 
2600                 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2601                 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2602                 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2603                 icmd->ulpBdeCount = 1;
2604                 cnt--;
2605                 if (mp2) {
2606                         icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2607                         icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2608                         icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2609                         cnt--;
2610                         icmd->ulpBdeCount = 2;
2611                 }
2612 
2613                 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2614                 icmd->ulpLe = 1;
2615 
2616                 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2617                     IOCB_ERROR) {
2618                         lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2619                         kfree(mp1);
2620                         cnt++;
2621                         if (mp2) {
2622                                 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2623                                 kfree(mp2);
2624                                 cnt++;
2625                         }
2626                         lpfc_sli_release_iocbq(phba, iocb);
2627                         pring->missbufcnt = cnt;
2628                         return cnt;
2629                 }
2630                 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2631                 if (mp2)
2632                         lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2633         }
2634         pring->missbufcnt = 0;
2635         return 0;
2636 }
2637 
2638 
2639 
2640 
2641 
2642 
2643 
2644 
2645 
2646 
2647 
2648 
2649 static int
2650 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2651 {
2652         struct lpfc_sli *psli = &phba->sli;
2653 
2654         
2655         lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2656         
2657 
2658         return 0;
2659 }
2660 
2661 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2662 
2663 
2664 
2665 
2666 
2667 
2668 
2669 
2670 static void
2671 lpfc_sha_init(uint32_t * HashResultPointer)
2672 {
2673         HashResultPointer[0] = 0x67452301;
2674         HashResultPointer[1] = 0xEFCDAB89;
2675         HashResultPointer[2] = 0x98BADCFE;
2676         HashResultPointer[3] = 0x10325476;
2677         HashResultPointer[4] = 0xC3D2E1F0;
2678 }
2679 
2680 
2681 
2682 
2683 
2684 
2685 
2686 
2687 
2688 
2689 
2690 static void
2691 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2692 {
2693         int t;
2694         uint32_t TEMP;
2695         uint32_t A, B, C, D, E;
2696         t = 16;
2697         do {
2698                 HashWorkingPointer[t] =
2699                     S(1,
2700                       HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2701                                                                      8] ^
2702                       HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2703         } while (++t <= 79);
2704         t = 0;
2705         A = HashResultPointer[0];
2706         B = HashResultPointer[1];
2707         C = HashResultPointer[2];
2708         D = HashResultPointer[3];
2709         E = HashResultPointer[4];
2710 
2711         do {
2712                 if (t < 20) {
2713                         TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2714                 } else if (t < 40) {
2715                         TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2716                 } else if (t < 60) {
2717                         TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2718                 } else {
2719                         TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2720                 }
2721                 TEMP += S(5, A) + E + HashWorkingPointer[t];
2722                 E = D;
2723                 D = C;
2724                 C = S(30, B);
2725                 B = A;
2726                 A = TEMP;
2727         } while (++t <= 79);
2728 
2729         HashResultPointer[0] += A;
2730         HashResultPointer[1] += B;
2731         HashResultPointer[2] += C;
2732         HashResultPointer[3] += D;
2733         HashResultPointer[4] += E;
2734 
2735 }
2736 
2737 
2738 
2739 
2740 
2741 
2742 
2743 
2744 
2745 
2746 
2747 static void
2748 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2749 {
2750         *HashWorking = (*RandomChallenge ^ *HashWorking);
2751 }
2752 
2753 
2754 
2755 
2756 
2757 
2758 
2759 
2760 void
2761 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2762 {
2763         int t;
2764         uint32_t *HashWorking;
2765         uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2766 
2767         HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2768         if (!HashWorking)
2769                 return;
2770 
2771         HashWorking[0] = HashWorking[78] = *pwwnn++;
2772         HashWorking[1] = HashWorking[79] = *pwwnn;
2773 
2774         for (t = 0; t < 7; t++)
2775                 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2776 
2777         lpfc_sha_init(hbainit);
2778         lpfc_sha_iterate(hbainit, HashWorking);
2779         kfree(HashWorking);
2780 }
2781 
2782 
2783 
2784 
2785 
2786 
2787 
2788 
2789 
2790 
2791 void
2792 lpfc_cleanup(struct lpfc_vport *vport)
2793 {
2794         struct lpfc_hba   *phba = vport->phba;
2795         struct lpfc_nodelist *ndlp, *next_ndlp;
2796         int i = 0;
2797 
2798         if (phba->link_state > LPFC_LINK_DOWN)
2799                 lpfc_port_link_failure(vport);
2800 
2801         list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2802                 if (!NLP_CHK_NODE_ACT(ndlp)) {
2803                         ndlp = lpfc_enable_node(vport, ndlp,
2804                                                 NLP_STE_UNUSED_NODE);
2805                         if (!ndlp)
2806                                 continue;
2807                         spin_lock_irq(&phba->ndlp_lock);
2808                         NLP_SET_FREE_REQ(ndlp);
2809                         spin_unlock_irq(&phba->ndlp_lock);
2810                         
2811                         lpfc_nlp_put(ndlp);
2812                         continue;
2813                 }
2814                 spin_lock_irq(&phba->ndlp_lock);
2815                 if (NLP_CHK_FREE_REQ(ndlp)) {
2816                         
2817                         spin_unlock_irq(&phba->ndlp_lock);
2818                         continue;
2819                 } else
2820                         
2821                         NLP_SET_FREE_REQ(ndlp);
2822                 spin_unlock_irq(&phba->ndlp_lock);
2823 
2824                 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2825                     ndlp->nlp_DID == Fabric_DID) {
2826                         
2827                         lpfc_nlp_put(ndlp);
2828                         continue;
2829                 }
2830 
2831                 
2832 
2833 
2834                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2835                         lpfc_nlp_put(ndlp);
2836                         continue;
2837                 }
2838 
2839                 if (ndlp->nlp_type & NLP_FABRIC)
2840                         lpfc_disc_state_machine(vport, ndlp, NULL,
2841                                         NLP_EVT_DEVICE_RECOVERY);
2842 
2843                 lpfc_disc_state_machine(vport, ndlp, NULL,
2844                                              NLP_EVT_DEVICE_RM);
2845         }
2846 
2847         
2848 
2849 
2850 
2851         while (!list_empty(&vport->fc_nodes)) {
2852                 if (i++ > 3000) {
2853                         lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2854                                 "0233 Nodelist not empty\n");
2855                         list_for_each_entry_safe(ndlp, next_ndlp,
2856                                                 &vport->fc_nodes, nlp_listp) {
2857                                 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2858                                                 LOG_NODE,
2859                                                 "0282 did:x%x ndlp:x%px "
2860                                                 "usgmap:x%x refcnt:%d\n",
2861                                                 ndlp->nlp_DID, (void *)ndlp,
2862                                                 ndlp->nlp_usg_map,
2863                                                 kref_read(&ndlp->kref));
2864                         }
2865                         break;
2866                 }
2867 
2868                 
2869                 msleep(10);
2870         }
2871         lpfc_cleanup_vports_rrqs(vport, NULL);
2872 }
2873 
2874 
2875 
2876 
2877 
2878 
2879 
2880 
2881 
2882 void
2883 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2884 {
2885         del_timer_sync(&vport->els_tmofunc);
2886         del_timer_sync(&vport->delayed_disc_tmo);
2887         lpfc_can_disctmo(vport);
2888         return;
2889 }
2890 
2891 
2892 
2893 
2894 
2895 
2896 
2897 
2898 void
2899 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2900 {
2901         
2902         phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2903 
2904         
2905         del_timer(&phba->fcf.redisc_wait);
2906 }
2907 
2908 
2909 
2910 
2911 
2912 
2913 
2914 
2915 
2916 
2917 void
2918 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2919 {
2920         spin_lock_irq(&phba->hbalock);
2921         if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2922                 
2923                 spin_unlock_irq(&phba->hbalock);
2924                 return;
2925         }
2926         __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2927         
2928         phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2929         spin_unlock_irq(&phba->hbalock);
2930 }
2931 
2932 
2933 
2934 
2935 
2936 
2937 
2938 
2939 void
2940 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2941 {
2942         if (phba->pport)
2943                 lpfc_stop_vport_timers(phba->pport);
2944         cancel_delayed_work_sync(&phba->eq_delay_work);
2945         del_timer_sync(&phba->sli.mbox_tmo);
2946         del_timer_sync(&phba->fabric_block_timer);
2947         del_timer_sync(&phba->eratt_poll);
2948         del_timer_sync(&phba->hb_tmofunc);
2949         if (phba->sli_rev == LPFC_SLI_REV4) {
2950                 del_timer_sync(&phba->rrq_tmr);
2951                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2952         }
2953         phba->hb_outstanding = 0;
2954 
2955         switch (phba->pci_dev_grp) {
2956         case LPFC_PCI_DEV_LP:
2957                 
2958                 del_timer_sync(&phba->fcp_poll_timer);
2959                 break;
2960         case LPFC_PCI_DEV_OC:
2961                 
2962                 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2963                 break;
2964         default:
2965                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2966                                 "0297 Invalid device group (x%x)\n",
2967                                 phba->pci_dev_grp);
2968                 break;
2969         }
2970         return;
2971 }
2972 
2973 
2974 
2975 
2976 
2977 
2978 
2979 
2980 
2981 
2982 
2983 static void
2984 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2985 {
2986         unsigned long iflag;
2987         uint8_t actcmd = MBX_HEARTBEAT;
2988         unsigned long timeout;
2989 
2990         spin_lock_irqsave(&phba->hbalock, iflag);
2991         phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2992         spin_unlock_irqrestore(&phba->hbalock, iflag);
2993         if (mbx_action == LPFC_MBX_NO_WAIT)
2994                 return;
2995         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2996         spin_lock_irqsave(&phba->hbalock, iflag);
2997         if (phba->sli.mbox_active) {
2998                 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2999                 
3000 
3001 
3002                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3003                                 phba->sli.mbox_active) * 1000) + jiffies;
3004         }
3005         spin_unlock_irqrestore(&phba->hbalock, iflag);
3006 
3007         
3008         while (phba->sli.mbox_active) {
3009                 
3010                 msleep(2);
3011                 if (time_after(jiffies, timeout)) {
3012                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3013                                 "2813 Mgmt IO is Blocked %x "
3014                                 "- mbox cmd %x still active\n",
3015                                 phba->sli.sli_flag, actcmd);
3016                         break;
3017                 }
3018         }
3019 }
3020 
3021 
3022 
3023 
3024 
3025 
3026 
3027 
3028 
3029 void
3030 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3031 {
3032         struct lpfc_nodelist  *ndlp, *next_ndlp;
3033         struct lpfc_vport **vports;
3034         int i, rpi;
3035         unsigned long flags;
3036 
3037         if (phba->sli_rev != LPFC_SLI_REV4)
3038                 return;
3039 
3040         vports = lpfc_create_vport_work_array(phba);
3041         if (vports == NULL)
3042                 return;
3043 
3044         for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3045                 if (vports[i]->load_flag & FC_UNLOADING)
3046                         continue;
3047 
3048                 list_for_each_entry_safe(ndlp, next_ndlp,
3049                                          &vports[i]->fc_nodes,
3050                                          nlp_listp) {
3051                         if (!NLP_CHK_NODE_ACT(ndlp))
3052                                 continue;
3053                         rpi = lpfc_sli4_alloc_rpi(phba);
3054                         if (rpi == LPFC_RPI_ALLOC_ERROR) {
3055                                 spin_lock_irqsave(&phba->ndlp_lock, flags);
3056                                 NLP_CLR_NODE_ACT(ndlp);
3057                                 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3058                                 continue;
3059                         }
3060                         ndlp->nlp_rpi = rpi;
3061                         lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3062                                          "0009 rpi:%x DID:%x "
3063                                          "flg:%x map:%x x%px\n", ndlp->nlp_rpi,
3064                                          ndlp->nlp_DID, ndlp->nlp_flag,
3065                                          ndlp->nlp_usg_map, ndlp);
3066                 }
3067         }
3068         lpfc_destroy_vport_work_array(phba, vports);
3069 }
3070 
3071 
3072 
3073 
3074 
3075 
3076 
3077 
3078 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3079 {
3080         struct lpfc_sli4_hdw_queue *qp;
3081         struct lpfc_io_buf *lpfc_ncmd;
3082         struct lpfc_io_buf *lpfc_ncmd_next;
3083         struct lpfc_epd_pool *epd_pool;
3084         unsigned long iflag;
3085 
3086         epd_pool = &phba->epd_pool;
3087         qp = &phba->sli4_hba.hdwq[0];
3088 
3089         spin_lock_init(&epd_pool->lock);
3090         spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3091         spin_lock(&epd_pool->lock);
3092         INIT_LIST_HEAD(&epd_pool->list);
3093         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3094                                  &qp->lpfc_io_buf_list_put, list) {
3095                 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3096                 lpfc_ncmd->expedite = true;
3097                 qp->put_io_bufs--;
3098                 epd_pool->count++;
3099                 if (epd_pool->count >= XRI_BATCH)
3100                         break;
3101         }
3102         spin_unlock(&epd_pool->lock);
3103         spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3104 }
3105 
3106 
3107 
3108 
3109 
3110 
3111 
3112 
3113 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3114 {
3115         struct lpfc_sli4_hdw_queue *qp;
3116         struct lpfc_io_buf *lpfc_ncmd;
3117         struct lpfc_io_buf *lpfc_ncmd_next;
3118         struct lpfc_epd_pool *epd_pool;
3119         unsigned long iflag;
3120 
3121         epd_pool = &phba->epd_pool;
3122         qp = &phba->sli4_hba.hdwq[0];
3123 
3124         spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3125         spin_lock(&epd_pool->lock);
3126         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3127                                  &epd_pool->list, list) {
3128                 list_move_tail(&lpfc_ncmd->list,
3129                                &qp->lpfc_io_buf_list_put);
3130                 lpfc_ncmd->flags = false;
3131                 qp->put_io_bufs++;
3132                 epd_pool->count--;
3133         }
3134         spin_unlock(&epd_pool->lock);
3135         spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3136 }
3137 
3138 
3139 
3140 
3141 
3142 
3143 
3144 
3145 
3146 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3147 {
3148         u32 i, j;
3149         u32 hwq_count;
3150         u32 count_per_hwq;
3151         struct lpfc_io_buf *lpfc_ncmd;
3152         struct lpfc_io_buf *lpfc_ncmd_next;
3153         unsigned long iflag;
3154         struct lpfc_sli4_hdw_queue *qp;
3155         struct lpfc_multixri_pool *multixri_pool;
3156         struct lpfc_pbl_pool *pbl_pool;
3157         struct lpfc_pvt_pool *pvt_pool;
3158 
3159         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3160                         "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3161                         phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3162                         phba->sli4_hba.io_xri_cnt);
3163 
3164         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3165                 lpfc_create_expedite_pool(phba);
3166 
3167         hwq_count = phba->cfg_hdw_queue;
3168         count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3169 
3170         for (i = 0; i < hwq_count; i++) {
3171                 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3172 
3173                 if (!multixri_pool) {
3174                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3175                                         "1238 Failed to allocate memory for "
3176                                         "multixri_pool\n");
3177 
3178                         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3179                                 lpfc_destroy_expedite_pool(phba);
3180 
3181                         j = 0;
3182                         while (j < i) {
3183                                 qp = &phba->sli4_hba.hdwq[j];
3184                                 kfree(qp->p_multixri_pool);
3185                                 j++;
3186                         }
3187                         phba->cfg_xri_rebalancing = 0;
3188                         return;
3189                 }
3190 
3191                 qp = &phba->sli4_hba.hdwq[i];
3192                 qp->p_multixri_pool = multixri_pool;
3193 
3194                 multixri_pool->xri_limit = count_per_hwq;
3195                 multixri_pool->rrb_next_hwqid = i;
3196 
3197                 
3198                 pbl_pool = &multixri_pool->pbl_pool;
3199                 spin_lock_init(&pbl_pool->lock);
3200                 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3201                 spin_lock(&pbl_pool->lock);
3202                 INIT_LIST_HEAD(&pbl_pool->list);
3203                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3204                                          &qp->lpfc_io_buf_list_put, list) {
3205                         list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3206                         qp->put_io_bufs--;
3207                         pbl_pool->count++;
3208                 }
3209                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3210                                 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3211                                 pbl_pool->count, i);
3212                 spin_unlock(&pbl_pool->lock);
3213                 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3214 
3215                 
3216                 pvt_pool = &multixri_pool->pvt_pool;
3217                 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3218                 pvt_pool->low_watermark = XRI_BATCH;
3219                 spin_lock_init(&pvt_pool->lock);
3220                 spin_lock_irqsave(&pvt_pool->lock, iflag);
3221                 INIT_LIST_HEAD(&pvt_pool->list);
3222                 pvt_pool->count = 0;
3223                 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3224         }
3225 }
3226 
3227 
3228 
3229 
3230 
3231 
3232 
3233 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3234 {
3235         u32 i;
3236         u32 hwq_count;
3237         struct lpfc_io_buf *lpfc_ncmd;
3238         struct lpfc_io_buf *lpfc_ncmd_next;
3239         unsigned long iflag;
3240         struct lpfc_sli4_hdw_queue *qp;
3241         struct lpfc_multixri_pool *multixri_pool;
3242         struct lpfc_pbl_pool *pbl_pool;
3243         struct lpfc_pvt_pool *pvt_pool;
3244 
3245         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3246                 lpfc_destroy_expedite_pool(phba);
3247 
3248         if (!(phba->pport->load_flag & FC_UNLOADING))
3249                 lpfc_sli_flush_io_rings(phba);
3250 
3251         hwq_count = phba->cfg_hdw_queue;
3252 
3253         for (i = 0; i < hwq_count; i++) {
3254                 qp = &phba->sli4_hba.hdwq[i];
3255                 multixri_pool = qp->p_multixri_pool;
3256                 if (!multixri_pool)
3257                         continue;
3258 
3259                 qp->p_multixri_pool = NULL;
3260 
3261                 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3262 
3263                 
3264                 pbl_pool = &multixri_pool->pbl_pool;
3265                 spin_lock(&pbl_pool->lock);
3266 
3267                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3268                                 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3269                                 pbl_pool->count, i);
3270 
3271                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3272                                          &pbl_pool->list, list) {
3273                         list_move_tail(&lpfc_ncmd->list,
3274                                        &qp->lpfc_io_buf_list_put);
3275                         qp->put_io_bufs++;
3276                         pbl_pool->count--;
3277                 }
3278 
3279                 INIT_LIST_HEAD(&pbl_pool->list);
3280                 pbl_pool->count = 0;
3281 
3282                 spin_unlock(&pbl_pool->lock);
3283 
3284                 
3285                 pvt_pool = &multixri_pool->pvt_pool;
3286                 spin_lock(&pvt_pool->lock);
3287 
3288                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3289                                 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3290                                 pvt_pool->count, i);
3291 
3292                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3293                                          &pvt_pool->list, list) {
3294                         list_move_tail(&lpfc_ncmd->list,
3295                                        &qp->lpfc_io_buf_list_put);
3296                         qp->put_io_bufs++;
3297                         pvt_pool->count--;
3298                 }
3299 
3300                 INIT_LIST_HEAD(&pvt_pool->list);
3301                 pvt_pool->count = 0;
3302 
3303                 spin_unlock(&pvt_pool->lock);
3304                 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3305 
3306                 kfree(multixri_pool);
3307         }
3308 }
3309 
3310 
3311 
3312 
3313 
3314 
3315 
3316 
3317 
3318 
3319 
3320 
3321 
3322 int
3323 lpfc_online(struct lpfc_hba *phba)
3324 {
3325         struct lpfc_vport *vport;
3326         struct lpfc_vport **vports;
3327         int i, error = 0;
3328         bool vpis_cleared = false;
3329 
3330         if (!phba)
3331                 return 0;
3332         vport = phba->pport;
3333 
3334         if (!(vport->fc_flag & FC_OFFLINE_MODE))
3335                 return 0;
3336 
3337         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3338                         "0458 Bring Adapter online\n");
3339 
3340         lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3341 
3342         if (phba->sli_rev == LPFC_SLI_REV4) {
3343                 if (lpfc_sli4_hba_setup(phba)) { 
3344                         lpfc_unblock_mgmt_io(phba);
3345                         return 1;
3346                 }
3347                 spin_lock_irq(&phba->hbalock);
3348                 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3349                         vpis_cleared = true;
3350                 spin_unlock_irq(&phba->hbalock);
3351 
3352                 
3353 
3354 
3355                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3356                                 !phba->nvmet_support) {
3357                         error = lpfc_nvme_create_localport(phba->pport);
3358                         if (error)
3359                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3360                                         "6132 NVME restore reg failed "
3361                                         "on nvmei error x%x\n", error);
3362                 }
3363         } else {
3364                 lpfc_sli_queue_init(phba);
3365                 if (lpfc_sli_hba_setup(phba)) { 
3366                         lpfc_unblock_mgmt_io(phba);
3367                         return 1;
3368                 }
3369         }
3370 
3371         vports = lpfc_create_vport_work_array(phba);
3372         if (vports != NULL) {
3373                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3374                         struct Scsi_Host *shost;
3375                         shost = lpfc_shost_from_vport(vports[i]);
3376                         spin_lock_irq(shost->host_lock);
3377                         vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3378                         if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3379                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3380                         if (phba->sli_rev == LPFC_SLI_REV4) {
3381                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3382                                 if ((vpis_cleared) &&
3383                                     (vports[i]->port_type !=
3384                                         LPFC_PHYSICAL_PORT))
3385                                         vports[i]->vpi = 0;
3386                         }
3387                         spin_unlock_irq(shost->host_lock);
3388                 }
3389         }
3390         lpfc_destroy_vport_work_array(phba, vports);
3391 
3392         if (phba->cfg_xri_rebalancing)
3393                 lpfc_create_multixri_pools(phba);
3394 
3395         lpfc_cpuhp_add(phba);
3396 
3397         lpfc_unblock_mgmt_io(phba);
3398         return 0;
3399 }
3400 
3401 
3402 
3403 
3404 
3405 
3406 
3407 
3408 
3409 
3410 
3411 
3412 void
3413 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3414 {
3415         unsigned long iflag;
3416 
3417         spin_lock_irqsave(&phba->hbalock, iflag);
3418         phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3419         spin_unlock_irqrestore(&phba->hbalock, iflag);
3420 }
3421 
3422 
3423 
3424 
3425 
3426 
3427 
3428 
3429 
3430 void
3431 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3432 {
3433         struct lpfc_vport *vport = phba->pport;
3434         struct lpfc_nodelist  *ndlp, *next_ndlp;
3435         struct lpfc_vport **vports;
3436         struct Scsi_Host *shost;
3437         int i;
3438 
3439         if (vport->fc_flag & FC_OFFLINE_MODE)
3440                 return;
3441 
3442         lpfc_block_mgmt_io(phba, mbx_action);
3443 
3444         lpfc_linkdown(phba);
3445 
3446         
3447         vports = lpfc_create_vport_work_array(phba);
3448         if (vports != NULL) {
3449                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3450                         if (vports[i]->load_flag & FC_UNLOADING)
3451                                 continue;
3452                         shost = lpfc_shost_from_vport(vports[i]);
3453                         spin_lock_irq(shost->host_lock);
3454                         vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3455                         vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3456                         vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3457                         spin_unlock_irq(shost->host_lock);
3458 
3459                         shost = lpfc_shost_from_vport(vports[i]);
3460                         list_for_each_entry_safe(ndlp, next_ndlp,
3461                                                  &vports[i]->fc_nodes,
3462                                                  nlp_listp) {
3463                                 if (!NLP_CHK_NODE_ACT(ndlp))
3464                                         continue;
3465                                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
3466                                         continue;
3467                                 if (ndlp->nlp_type & NLP_FABRIC) {
3468                                         lpfc_disc_state_machine(vports[i], ndlp,
3469                                                 NULL, NLP_EVT_DEVICE_RECOVERY);
3470                                         lpfc_disc_state_machine(vports[i], ndlp,
3471                                                 NULL, NLP_EVT_DEVICE_RM);
3472                                 }
3473                                 spin_lock_irq(shost->host_lock);
3474                                 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3475                                 spin_unlock_irq(shost->host_lock);
3476                                 
3477 
3478 
3479 
3480 
3481                                 if (phba->sli_rev == LPFC_SLI_REV4) {
3482                                         lpfc_printf_vlog(ndlp->vport,
3483                                                          KERN_INFO, LOG_NODE,
3484                                                          "0011 lpfc_offline: "
3485                                                          "ndlp:x%px did %x "
3486                                                          "usgmap:x%x rpi:%x\n",
3487                                                          ndlp, ndlp->nlp_DID,
3488                                                          ndlp->nlp_usg_map,
3489                                                          ndlp->nlp_rpi);
3490 
3491                                         lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3492                                 }
3493                                 lpfc_unreg_rpi(vports[i], ndlp);
3494                         }
3495                 }
3496         }
3497         lpfc_destroy_vport_work_array(phba, vports);
3498 
3499         lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3500 
3501         if (phba->wq)
3502                 flush_workqueue(phba->wq);
3503 }
3504 
3505 
3506 
3507 
3508 
3509 
3510 
3511 
3512 
3513 void
3514 lpfc_offline(struct lpfc_hba *phba)
3515 {
3516         struct Scsi_Host  *shost;
3517         struct lpfc_vport **vports;
3518         int i;
3519 
3520         if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3521                 return;
3522 
3523         
3524         lpfc_stop_port(phba);
3525 
3526         
3527 
3528 
3529         lpfc_nvmet_destroy_targetport(phba);
3530         lpfc_nvme_destroy_localport(phba->pport);
3531 
3532         vports = lpfc_create_vport_work_array(phba);
3533         if (vports != NULL)
3534                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3535                         lpfc_stop_vport_timers(vports[i]);
3536         lpfc_destroy_vport_work_array(phba, vports);
3537         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3538                         "0460 Bring Adapter offline\n");
3539         
3540 
3541         lpfc_sli_hba_down(phba);
3542         spin_lock_irq(&phba->hbalock);
3543         phba->work_ha = 0;
3544         spin_unlock_irq(&phba->hbalock);
3545         vports = lpfc_create_vport_work_array(phba);
3546         if (vports != NULL)
3547                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3548                         shost = lpfc_shost_from_vport(vports[i]);
3549                         spin_lock_irq(shost->host_lock);
3550                         vports[i]->work_port_events = 0;
3551                         vports[i]->fc_flag |= FC_OFFLINE_MODE;
3552                         spin_unlock_irq(shost->host_lock);
3553                 }
3554         lpfc_destroy_vport_work_array(phba, vports);
3555         __lpfc_cpuhp_remove(phba);
3556 
3557         if (phba->cfg_xri_rebalancing)
3558                 lpfc_destroy_multixri_pools(phba);
3559 }
3560 
3561 
3562 
3563 
3564 
3565 
3566 
3567 
3568 
3569 static void
3570 lpfc_scsi_free(struct lpfc_hba *phba)
3571 {
3572         struct lpfc_io_buf *sb, *sb_next;
3573 
3574         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3575                 return;
3576 
3577         spin_lock_irq(&phba->hbalock);
3578 
3579         
3580 
3581         spin_lock(&phba->scsi_buf_list_put_lock);
3582         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3583                                  list) {
3584                 list_del(&sb->list);
3585                 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3586                               sb->dma_handle);
3587                 kfree(sb);
3588                 phba->total_scsi_bufs--;
3589         }
3590         spin_unlock(&phba->scsi_buf_list_put_lock);
3591 
3592         spin_lock(&phba->scsi_buf_list_get_lock);
3593         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3594                                  list) {
3595                 list_del(&sb->list);
3596                 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3597                               sb->dma_handle);
3598                 kfree(sb);
3599                 phba->total_scsi_bufs--;
3600         }
3601         spin_unlock(&phba->scsi_buf_list_get_lock);
3602         spin_unlock_irq(&phba->hbalock);
3603 }
3604 
3605 
3606 
3607 
3608 
3609 
3610 
3611 
3612 
3613 void
3614 lpfc_io_free(struct lpfc_hba *phba)
3615 {
3616         struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3617         struct lpfc_sli4_hdw_queue *qp;
3618         int idx;
3619 
3620         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3621                 qp = &phba->sli4_hba.hdwq[idx];
3622                 
3623                 spin_lock(&qp->io_buf_list_put_lock);
3624                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3625                                          &qp->lpfc_io_buf_list_put,
3626                                          list) {
3627                         list_del(&lpfc_ncmd->list);
3628                         qp->put_io_bufs--;
3629                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3630                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3631                         if (phba->cfg_xpsgl && !phba->nvmet_support)
3632                                 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3633                         lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3634                         kfree(lpfc_ncmd);
3635                         qp->total_io_bufs--;
3636                 }
3637                 spin_unlock(&qp->io_buf_list_put_lock);
3638 
3639                 spin_lock(&qp->io_buf_list_get_lock);
3640                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3641                                          &qp->lpfc_io_buf_list_get,
3642                                          list) {
3643                         list_del(&lpfc_ncmd->list);
3644                         qp->get_io_bufs--;
3645                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3646                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3647                         if (phba->cfg_xpsgl && !phba->nvmet_support)
3648                                 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3649                         lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3650                         kfree(lpfc_ncmd);
3651                         qp->total_io_bufs--;
3652                 }
3653                 spin_unlock(&qp->io_buf_list_get_lock);
3654         }
3655 }
3656 
3657 
3658 
3659 
3660 
3661 
3662 
3663 
3664 
3665 
3666 
3667 
3668 
3669 int
3670 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3671 {
3672         struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3673         uint16_t i, lxri, xri_cnt, els_xri_cnt;
3674         LIST_HEAD(els_sgl_list);
3675         int rc;
3676 
3677         
3678 
3679 
3680         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3681 
3682         if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3683                 
3684                 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3685                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3686                                 "3157 ELS xri-sgl count increased from "
3687                                 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3688                                 els_xri_cnt);
3689                 
3690                 for (i = 0; i < xri_cnt; i++) {
3691                         sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3692                                              GFP_KERNEL);
3693                         if (sglq_entry == NULL) {
3694                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3695                                                 "2562 Failure to allocate an "
3696                                                 "ELS sgl entry:%d\n", i);
3697                                 rc = -ENOMEM;
3698                                 goto out_free_mem;
3699                         }
3700                         sglq_entry->buff_type = GEN_BUFF_TYPE;
3701                         sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3702                                                            &sglq_entry->phys);
3703                         if (sglq_entry->virt == NULL) {
3704                                 kfree(sglq_entry);
3705                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3706                                                 "2563 Failure to allocate an "
3707                                                 "ELS mbuf:%d\n", i);
3708                                 rc = -ENOMEM;
3709                                 goto out_free_mem;
3710                         }
3711                         sglq_entry->sgl = sglq_entry->virt;
3712                         memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3713                         sglq_entry->state = SGL_FREED;
3714                         list_add_tail(&sglq_entry->list, &els_sgl_list);
3715                 }
3716                 spin_lock_irq(&phba->hbalock);
3717                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3718                 list_splice_init(&els_sgl_list,
3719                                  &phba->sli4_hba.lpfc_els_sgl_list);
3720                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3721                 spin_unlock_irq(&phba->hbalock);
3722         } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3723                 
3724                 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3725                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3726                                 "3158 ELS xri-sgl count decreased from "
3727                                 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3728                                 els_xri_cnt);
3729                 spin_lock_irq(&phba->hbalock);
3730                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3731                 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3732                                  &els_sgl_list);
3733                 
3734                 for (i = 0; i < xri_cnt; i++) {
3735                         list_remove_head(&els_sgl_list,
3736                                          sglq_entry, struct lpfc_sglq, list);
3737                         if (sglq_entry) {
3738                                 __lpfc_mbuf_free(phba, sglq_entry->virt,
3739                                                  sglq_entry->phys);
3740                                 kfree(sglq_entry);
3741                         }
3742                 }
3743                 list_splice_init(&els_sgl_list,
3744                                  &phba->sli4_hba.lpfc_els_sgl_list);
3745                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3746                 spin_unlock_irq(&phba->hbalock);
3747         } else
3748                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3749                                 "3163 ELS xri-sgl count unchanged: %d\n",
3750                                 els_xri_cnt);
3751         phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3752 
3753         
3754         sglq_entry = NULL;
3755         sglq_entry_next = NULL;
3756         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3757                                  &phba->sli4_hba.lpfc_els_sgl_list, list) {
3758                 lxri = lpfc_sli4_next_xritag(phba);
3759                 if (lxri == NO_XRI) {
3760                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3761                                         "2400 Failed to allocate xri for "
3762                                         "ELS sgl\n");
3763                         rc = -ENOMEM;
3764                         goto out_free_mem;
3765                 }
3766                 sglq_entry->sli4_lxritag = lxri;
3767                 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3768         }
3769         return 0;
3770 
3771 out_free_mem:
3772         lpfc_free_els_sgl_list(phba);
3773         return rc;
3774 }
3775 
3776 
3777 
3778 
3779 
3780 
3781 
3782 
3783 
3784 
3785 
3786 
3787 
3788 int
3789 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3790 {
3791         struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3792         uint16_t i, lxri, xri_cnt, els_xri_cnt;
3793         uint16_t nvmet_xri_cnt;
3794         LIST_HEAD(nvmet_sgl_list);
3795         int rc;
3796 
3797         
3798 
3799 
3800         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3801 
3802         
3803         nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3804         if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3805                 
3806                 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3807                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3808                                 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3809                                 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3810                 
3811                 for (i = 0; i < xri_cnt; i++) {
3812                         sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3813                                              GFP_KERNEL);
3814                         if (sglq_entry == NULL) {
3815                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3816                                                 "6303 Failure to allocate an "
3817                                                 "NVMET sgl entry:%d\n", i);
3818                                 rc = -ENOMEM;
3819                                 goto out_free_mem;
3820                         }
3821                         sglq_entry->buff_type = NVMET_BUFF_TYPE;
3822                         sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3823                                                            &sglq_entry->phys);
3824                         if (sglq_entry->virt == NULL) {
3825                                 kfree(sglq_entry);
3826                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3827                                                 "6304 Failure to allocate an "
3828                                                 "NVMET buf:%d\n", i);
3829                                 rc = -ENOMEM;
3830                                 goto out_free_mem;
3831                         }
3832                         sglq_entry->sgl = sglq_entry->virt;
3833                         memset(sglq_entry->sgl, 0,
3834                                phba->cfg_sg_dma_buf_size);
3835                         sglq_entry->state = SGL_FREED;
3836                         list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3837                 }
3838                 spin_lock_irq(&phba->hbalock);
3839                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3840                 list_splice_init(&nvmet_sgl_list,
3841                                  &phba->sli4_hba.lpfc_nvmet_sgl_list);
3842                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3843                 spin_unlock_irq(&phba->hbalock);
3844         } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3845                 
3846                 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3847                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3848                                 "6305 NVMET xri-sgl count decreased from "
3849                                 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3850                                 nvmet_xri_cnt);
3851                 spin_lock_irq(&phba->hbalock);
3852                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3853                 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3854                                  &nvmet_sgl_list);
3855                 
3856                 for (i = 0; i < xri_cnt; i++) {
3857                         list_remove_head(&nvmet_sgl_list,
3858                                          sglq_entry, struct lpfc_sglq, list);
3859                         if (sglq_entry) {
3860                                 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3861                                                     sglq_entry->phys);
3862                                 kfree(sglq_entry);
3863                         }
3864                 }
3865                 list_splice_init(&nvmet_sgl_list,
3866                                  &phba->sli4_hba.lpfc_nvmet_sgl_list);
3867                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3868                 spin_unlock_irq(&phba->hbalock);
3869         } else
3870                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3871                                 "6306 NVMET xri-sgl count unchanged: %d\n",
3872                                 nvmet_xri_cnt);
3873         phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3874 
3875         
3876         sglq_entry = NULL;
3877         sglq_entry_next = NULL;
3878         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3879                                  &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3880                 lxri = lpfc_sli4_next_xritag(phba);
3881                 if (lxri == NO_XRI) {
3882                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3883                                         "6307 Failed to allocate xri for "
3884                                         "NVMET sgl\n");
3885                         rc = -ENOMEM;
3886                         goto out_free_mem;
3887                 }
3888                 sglq_entry->sli4_lxritag = lxri;
3889                 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3890         }
3891         return 0;
3892 
3893 out_free_mem:
3894         lpfc_free_nvmet_sgl_list(phba);
3895         return rc;
3896 }
3897 
3898 int
3899 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3900 {
3901         LIST_HEAD(blist);
3902         struct lpfc_sli4_hdw_queue *qp;
3903         struct lpfc_io_buf *lpfc_cmd;
3904         struct lpfc_io_buf *iobufp, *prev_iobufp;
3905         int idx, cnt, xri, inserted;
3906 
3907         cnt = 0;
3908         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3909                 qp = &phba->sli4_hba.hdwq[idx];
3910                 spin_lock_irq(&qp->io_buf_list_get_lock);
3911                 spin_lock(&qp->io_buf_list_put_lock);
3912 
3913                 
3914                 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
3915                 list_splice(&qp->lpfc_io_buf_list_put, &blist);
3916                 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
3917                 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
3918                 cnt += qp->get_io_bufs + qp->put_io_bufs;
3919                 qp->get_io_bufs = 0;
3920                 qp->put_io_bufs = 0;
3921                 qp->total_io_bufs = 0;
3922                 spin_unlock(&qp->io_buf_list_put_lock);
3923                 spin_unlock_irq(&qp->io_buf_list_get_lock);
3924         }
3925 
3926         
3927 
3928 
3929 
3930 
3931         for (idx = 0; idx < cnt; idx++) {
3932                 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
3933                 if (!lpfc_cmd)
3934                         return cnt;
3935                 if (idx == 0) {
3936                         list_add_tail(&lpfc_cmd->list, cbuf);
3937                         continue;
3938                 }
3939                 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
3940                 inserted = 0;
3941                 prev_iobufp = NULL;
3942                 list_for_each_entry(iobufp, cbuf, list) {
3943                         if (xri < iobufp->cur_iocbq.sli4_xritag) {
3944                                 if (prev_iobufp)
3945                                         list_add(&lpfc_cmd->list,
3946                                                  &prev_iobufp->list);
3947                                 else
3948                                         list_add(&lpfc_cmd->list, cbuf);
3949                                 inserted = 1;
3950                                 break;
3951                         }
3952                         prev_iobufp = iobufp;
3953                 }
3954                 if (!inserted)
3955                         list_add_tail(&lpfc_cmd->list, cbuf);
3956         }
3957         return cnt;
3958 }
3959 
3960 int
3961 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
3962 {
3963         struct lpfc_sli4_hdw_queue *qp;
3964         struct lpfc_io_buf *lpfc_cmd;
3965         int idx, cnt;
3966 
3967         qp = phba->sli4_hba.hdwq;
3968         cnt = 0;
3969         while (!list_empty(cbuf)) {
3970                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3971                         list_remove_head(cbuf, lpfc_cmd,
3972                                          struct lpfc_io_buf, list);
3973                         if (!lpfc_cmd)
3974                                 return cnt;
3975                         cnt++;
3976                         qp = &phba->sli4_hba.hdwq[idx];
3977                         lpfc_cmd->hdwq_no = idx;
3978                         lpfc_cmd->hdwq = qp;
3979                         lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
3980                         lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
3981                         spin_lock(&qp->io_buf_list_put_lock);
3982                         list_add_tail(&lpfc_cmd->list,
3983                                       &qp->lpfc_io_buf_list_put);
3984                         qp->put_io_bufs++;
3985                         qp->total_io_bufs++;
3986                         spin_unlock(&qp->io_buf_list_put_lock);
3987                 }
3988         }
3989         return cnt;
3990 }
3991 
3992 
3993 
3994 
3995 
3996 
3997 
3998 
3999 
4000 
4001 
4002 
4003 
4004 int
4005 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4006 {
4007         struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4008         uint16_t i, lxri, els_xri_cnt;
4009         uint16_t io_xri_cnt, io_xri_max;
4010         LIST_HEAD(io_sgl_list);
4011         int rc, cnt;
4012 
4013         
4014 
4015 
4016 
4017         
4018         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4019         io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4020         phba->sli4_hba.io_xri_max = io_xri_max;
4021 
4022         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4023                         "6074 Current allocated XRI sgl count:%d, "
4024                         "maximum XRI count:%d\n",
4025                         phba->sli4_hba.io_xri_cnt,
4026                         phba->sli4_hba.io_xri_max);
4027 
4028         cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4029 
4030         if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4031                 
4032                 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4033                                         phba->sli4_hba.io_xri_max;
4034                 
4035                 for (i = 0; i < io_xri_cnt; i++) {
4036                         list_remove_head(&io_sgl_list, lpfc_ncmd,
4037                                          struct lpfc_io_buf, list);
4038                         if (lpfc_ncmd) {
4039                                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4040                                               lpfc_ncmd->data,
4041                                               lpfc_ncmd->dma_handle);
4042                                 kfree(lpfc_ncmd);
4043                         }
4044                 }
4045                 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4046         }
4047 
4048         
4049         lpfc_ncmd = NULL;
4050         lpfc_ncmd_next = NULL;
4051         phba->sli4_hba.io_xri_cnt = cnt;
4052         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4053                                  &io_sgl_list, list) {
4054                 lxri = lpfc_sli4_next_xritag(phba);
4055                 if (lxri == NO_XRI) {
4056                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4057                                         "6075 Failed to allocate xri for "
4058                                         "nvme buffer\n");
4059                         rc = -ENOMEM;
4060                         goto out_free_mem;
4061                 }
4062                 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4063                 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4064         }
4065         cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4066         return 0;
4067 
4068 out_free_mem:
4069         lpfc_io_free(phba);
4070         return rc;
4071 }
4072 
4073 
4074 
4075 
4076 
4077 
4078 
4079 
4080 
4081 
4082 
4083 
4084 
4085 
4086 
4087 int
4088 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4089 {
4090         struct lpfc_io_buf *lpfc_ncmd;
4091         struct lpfc_iocbq *pwqeq;
4092         uint16_t iotag, lxri = 0;
4093         int bcnt, num_posted;
4094         LIST_HEAD(prep_nblist);
4095         LIST_HEAD(post_nblist);
4096         LIST_HEAD(nvme_nblist);
4097 
4098         phba->sli4_hba.io_xri_cnt = 0;
4099         for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4100                 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4101                 if (!lpfc_ncmd)
4102                         break;
4103                 
4104 
4105 
4106 
4107 
4108                 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4109                                                   GFP_KERNEL,
4110                                                   &lpfc_ncmd->dma_handle);
4111                 if (!lpfc_ncmd->data) {
4112                         kfree(lpfc_ncmd);
4113                         break;
4114                 }
4115 
4116                 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4117                         INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4118                 } else {
4119                         
4120 
4121 
4122 
4123                         if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4124                             (((unsigned long)(lpfc_ncmd->data) &
4125                             (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4126                                 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
4127                                                 "3369 Memory alignment err: "
4128                                                 "addr=%lx\n",
4129                                                 (unsigned long)lpfc_ncmd->data);
4130                                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4131                                               lpfc_ncmd->data,
4132                                               lpfc_ncmd->dma_handle);
4133                                 kfree(lpfc_ncmd);
4134                                 break;
4135                         }
4136                 }
4137 
4138                 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4139 
4140                 lxri = lpfc_sli4_next_xritag(phba);
4141                 if (lxri == NO_XRI) {
4142                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4143                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4144                         kfree(lpfc_ncmd);
4145                         break;
4146                 }
4147                 pwqeq = &lpfc_ncmd->cur_iocbq;
4148 
4149                 
4150                 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4151                 if (iotag == 0) {
4152                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4153                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4154                         kfree(lpfc_ncmd);
4155                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
4156                                         "6121 Failed to allocate IOTAG for"
4157                                         " XRI:0x%x\n", lxri);
4158                         lpfc_sli4_free_xri(phba, lxri);
4159                         break;
4160                 }
4161                 pwqeq->sli4_lxritag = lxri;
4162                 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4163                 pwqeq->context1 = lpfc_ncmd;
4164 
4165                 
4166                 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4167                 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4168                 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4169                 spin_lock_init(&lpfc_ncmd->buf_lock);
4170 
4171                 
4172                 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4173                 phba->sli4_hba.io_xri_cnt++;
4174         }
4175         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4176                         "6114 Allocate %d out of %d requested new NVME "
4177                         "buffers\n", bcnt, num_to_alloc);
4178 
4179         
4180         if (!list_empty(&post_nblist))
4181                 num_posted = lpfc_sli4_post_io_sgl_list(
4182                                 phba, &post_nblist, bcnt);
4183         else
4184                 num_posted = 0;
4185 
4186         return num_posted;
4187 }
4188 
4189 static uint64_t
4190 lpfc_get_wwpn(struct lpfc_hba *phba)
4191 {
4192         uint64_t wwn;
4193         int rc;
4194         LPFC_MBOXQ_t *mboxq;
4195         MAILBOX_t *mb;
4196 
4197         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4198                                                 GFP_KERNEL);
4199         if (!mboxq)
4200                 return (uint64_t)-1;
4201 
4202         
4203         lpfc_read_nv(phba, mboxq);
4204         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4205         if (rc != MBX_SUCCESS) {
4206                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4207                                 "6019 Mailbox failed , mbxCmd x%x "
4208                                 "READ_NV, mbxStatus x%x\n",
4209                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4210                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4211                 mempool_free(mboxq, phba->mbox_mem_pool);
4212                 return (uint64_t) -1;
4213         }
4214         mb = &mboxq->u.mb;
4215         memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4216         
4217         mempool_free(mboxq, phba->mbox_mem_pool);
4218         if (phba->sli_rev == LPFC_SLI_REV4)
4219                 return be64_to_cpu(wwn);
4220         else
4221                 return rol64(wwn, 32);
4222 }
4223 
4224 
4225 
4226 
4227 
4228 
4229 
4230 
4231 
4232 
4233 
4234 
4235 
4236 
4237 
4238 
4239 
4240 struct lpfc_vport *
4241 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4242 {
4243         struct lpfc_vport *vport;
4244         struct Scsi_Host  *shost = NULL;
4245         int error = 0;
4246         int i;
4247         uint64_t wwn;
4248         bool use_no_reset_hba = false;
4249         int rc;
4250 
4251         if (lpfc_no_hba_reset_cnt) {
4252                 if (phba->sli_rev < LPFC_SLI_REV4 &&
4253                     dev == &phba->pcidev->dev) {
4254                         
4255                         lpfc_sli_brdrestart(phba);
4256                         rc = lpfc_sli_chipset_init(phba);
4257                         if (rc)
4258                                 return NULL;
4259                 }
4260                 wwn = lpfc_get_wwpn(phba);
4261         }
4262 
4263         for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4264                 if (wwn == lpfc_no_hba_reset[i]) {
4265                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4266                                         "6020 Setting use_no_reset port=%llx\n",
4267                                         wwn);
4268                         use_no_reset_hba = true;
4269                         break;
4270                 }
4271         }
4272 
4273         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4274                 if (dev != &phba->pcidev->dev) {
4275                         shost = scsi_host_alloc(&lpfc_vport_template,
4276                                                 sizeof(struct lpfc_vport));
4277                 } else {
4278                         if (!use_no_reset_hba)
4279                                 shost = scsi_host_alloc(&lpfc_template,
4280                                                 sizeof(struct lpfc_vport));
4281                         else
4282                                 shost = scsi_host_alloc(&lpfc_template_no_hr,
4283                                                 sizeof(struct lpfc_vport));
4284                 }
4285         } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
4286                 shost = scsi_host_alloc(&lpfc_template_nvme,
4287                                         sizeof(struct lpfc_vport));
4288         }
4289         if (!shost)
4290                 goto out;
4291 
4292         vport = (struct lpfc_vport *) shost->hostdata;
4293         vport->phba = phba;
4294         vport->load_flag |= FC_LOADING;
4295         vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4296         vport->fc_rscn_flush = 0;
4297         lpfc_get_vport_cfgparam(vport);
4298 
4299         
4300         vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4301 
4302         shost->unique_id = instance;
4303         shost->max_id = LPFC_MAX_TARGET;
4304         shost->max_lun = vport->cfg_max_luns;
4305         shost->this_id = -1;
4306         shost->max_cmd_len = 16;
4307 
4308         if (phba->sli_rev == LPFC_SLI_REV4) {
4309                 if (!phba->cfg_fcp_mq_threshold ||
4310                     phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4311                         phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4312 
4313                 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4314                                             phba->cfg_fcp_mq_threshold);
4315 
4316                 shost->dma_boundary =
4317                         phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4318 
4319                 if (phba->cfg_xpsgl && !phba->nvmet_support)
4320                         shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4321                 else
4322                         shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4323         } else
4324                 
4325 
4326 
4327                 shost->nr_hw_queues = 1;
4328 
4329         
4330 
4331 
4332 
4333 
4334         shost->can_queue = phba->cfg_hba_queue_depth - 10;
4335         if (dev != &phba->pcidev->dev) {
4336                 shost->transportt = lpfc_vport_transport_template;
4337                 vport->port_type = LPFC_NPIV_PORT;
4338         } else {
4339                 shost->transportt = lpfc_transport_template;
4340                 vport->port_type = LPFC_PHYSICAL_PORT;
4341         }
4342 
4343         
4344         INIT_LIST_HEAD(&vport->fc_nodes);
4345         INIT_LIST_HEAD(&vport->rcv_buffer_list);
4346         spin_lock_init(&vport->work_port_lock);
4347 
4348         timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4349 
4350         timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4351 
4352         timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4353 
4354         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4355                 lpfc_setup_bg(phba, shost);
4356 
4357         error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4358         if (error)
4359                 goto out_put_shost;
4360 
4361         spin_lock_irq(&phba->port_list_lock);
4362         list_add_tail(&vport->listentry, &phba->port_list);
4363         spin_unlock_irq(&phba->port_list_lock);
4364         return vport;
4365 
4366 out_put_shost:
4367         scsi_host_put(shost);
4368 out:
4369         return NULL;
4370 }
4371 
4372 
4373 
4374 
4375 
4376 
4377 
4378 
4379 void
4380 destroy_port(struct lpfc_vport *vport)
4381 {
4382         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4383         struct lpfc_hba  *phba = vport->phba;
4384 
4385         lpfc_debugfs_terminate(vport);
4386         fc_remove_host(shost);
4387         scsi_remove_host(shost);
4388 
4389         spin_lock_irq(&phba->port_list_lock);
4390         list_del_init(&vport->listentry);
4391         spin_unlock_irq(&phba->port_list_lock);
4392 
4393         lpfc_cleanup(vport);
4394         return;
4395 }
4396 
4397 
4398 
4399 
4400 
4401 
4402 
4403 
4404 
4405 
4406 
4407 int
4408 lpfc_get_instance(void)
4409 {
4410         int ret;
4411 
4412         ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4413         return ret < 0 ? -1 : ret;
4414 }
4415 
4416 
4417 
4418 
4419 
4420 
4421 
4422 
4423 
4424 
4425 
4426 
4427 
4428 
4429 
4430 
4431 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4432 {
4433         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4434         struct lpfc_hba   *phba = vport->phba;
4435         int stat = 0;
4436 
4437         spin_lock_irq(shost->host_lock);
4438 
4439         if (vport->load_flag & FC_UNLOADING) {
4440                 stat = 1;
4441                 goto finished;
4442         }
4443         if (time >= msecs_to_jiffies(30 * 1000)) {
4444                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4445                                 "0461 Scanning longer than 30 "
4446                                 "seconds.  Continuing initialization\n");
4447                 stat = 1;
4448                 goto finished;
4449         }
4450         if (time >= msecs_to_jiffies(15 * 1000) &&
4451             phba->link_state <= LPFC_LINK_DOWN) {
4452                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4453                                 "0465 Link down longer than 15 "
4454                                 "seconds.  Continuing initialization\n");
4455                 stat = 1;
4456                 goto finished;
4457         }
4458 
4459         if (vport->port_state != LPFC_VPORT_READY)
4460                 goto finished;
4461         if (vport->num_disc_nodes || vport->fc_prli_sent)
4462                 goto finished;
4463         if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4464                 goto finished;
4465         if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4466                 goto finished;
4467 
4468         stat = 1;
4469 
4470 finished:
4471         spin_unlock_irq(shost->host_lock);
4472         return stat;
4473 }
4474 
4475 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4476 {
4477         struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4478         struct lpfc_hba   *phba = vport->phba;
4479 
4480         fc_host_supported_speeds(shost) = 0;
4481         if (phba->lmt & LMT_128Gb)
4482                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4483         if (phba->lmt & LMT_64Gb)
4484                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4485         if (phba->lmt & LMT_32Gb)
4486                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4487         if (phba->lmt & LMT_16Gb)
4488                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4489         if (phba->lmt & LMT_10Gb)
4490                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4491         if (phba->lmt & LMT_8Gb)
4492                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4493         if (phba->lmt & LMT_4Gb)
4494                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4495         if (phba->lmt & LMT_2Gb)
4496                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4497         if (phba->lmt & LMT_1Gb)
4498                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4499 }
4500 
4501 
4502 
4503 
4504 
4505 
4506 
4507 
4508 void lpfc_host_attrib_init(struct Scsi_Host *shost)
4509 {
4510         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4511         struct lpfc_hba   *phba = vport->phba;
4512         
4513 
4514 
4515 
4516         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4517         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4518         fc_host_supported_classes(shost) = FC_COS_CLASS3;
4519 
4520         memset(fc_host_supported_fc4s(shost), 0,
4521                sizeof(fc_host_supported_fc4s(shost)));
4522         fc_host_supported_fc4s(shost)[2] = 1;
4523         fc_host_supported_fc4s(shost)[7] = 1;
4524 
4525         lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4526                                  sizeof fc_host_symbolic_name(shost));
4527 
4528         lpfc_host_supported_speeds_set(shost);
4529 
4530         fc_host_maxframe_size(shost) =
4531                 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4532                 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4533 
4534         fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4535 
4536         
4537         memset(fc_host_active_fc4s(shost), 0,
4538                sizeof(fc_host_active_fc4s(shost)));
4539         fc_host_active_fc4s(shost)[2] = 1;
4540         fc_host_active_fc4s(shost)[7] = 1;
4541 
4542         fc_host_max_npiv_vports(shost) = phba->max_vpi;
4543         spin_lock_irq(shost->host_lock);
4544         vport->load_flag &= ~FC_LOADING;
4545         spin_unlock_irq(shost->host_lock);
4546 }
4547 
4548 
4549 
4550 
4551 
4552 
4553 
4554 
4555 
4556 static void
4557 lpfc_stop_port_s3(struct lpfc_hba *phba)
4558 {
4559         
4560         writel(0, phba->HCregaddr);
4561         readl(phba->HCregaddr); 
4562         
4563         writel(0xffffffff, phba->HAregaddr);
4564         readl(phba->HAregaddr); 
4565 
4566         
4567         lpfc_stop_hba_timers(phba);
4568         phba->pport->work_port_events = 0;
4569 }
4570 
4571 
4572 
4573 
4574 
4575 
4576 
4577 
4578 
4579 static void
4580 lpfc_stop_port_s4(struct lpfc_hba *phba)
4581 {
4582         
4583         lpfc_stop_hba_timers(phba);
4584         if (phba->pport)
4585                 phba->pport->work_port_events = 0;
4586         phba->sli4_hba.intr_enable = 0;
4587 }
4588 
4589 
4590 
4591 
4592 
4593 
4594 
4595 
4596 void
4597 lpfc_stop_port(struct lpfc_hba *phba)
4598 {
4599         phba->lpfc_stop_port(phba);
4600 
4601         if (phba->wq)
4602                 flush_workqueue(phba->wq);
4603 }
4604 
4605 
4606 
4607 
4608 
4609 
4610 
4611 void
4612 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4613 {
4614         unsigned long fcf_redisc_wait_tmo =
4615                 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4616         
4617         mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4618         spin_lock_irq(&phba->hbalock);
4619         
4620         phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4621         
4622         phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4623         spin_unlock_irq(&phba->hbalock);
4624 }
4625 
4626 
4627 
4628 
4629 
4630 
4631 
4632 
4633 
4634 
4635 
4636 static void
4637 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4638 {
4639         struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4640 
4641         
4642         spin_lock_irq(&phba->hbalock);
4643         if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4644                 spin_unlock_irq(&phba->hbalock);
4645                 return;
4646         }
4647         
4648         phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4649         
4650         phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4651         spin_unlock_irq(&phba->hbalock);
4652         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4653                         "2776 FCF rediscover quiescent timer expired\n");
4654         
4655         lpfc_worker_wake_up(phba);
4656 }
4657 
4658 
4659 
4660 
4661 
4662 
4663 
4664 
4665 static void
4666 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4667                            struct lpfc_acqe_link *acqe_link)
4668 {
4669         switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4670         case LPFC_ASYNC_LINK_FAULT_NONE:
4671         case LPFC_ASYNC_LINK_FAULT_LOCAL:
4672         case LPFC_ASYNC_LINK_FAULT_REMOTE:
4673         case LPFC_ASYNC_LINK_FAULT_LR_LRR:
4674                 break;
4675         default:
4676                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4677                                 "0398 Unknown link fault code: x%x\n",
4678                                 bf_get(lpfc_acqe_link_fault, acqe_link));
4679                 break;
4680         }
4681 }
4682 
4683 
4684 
4685 
4686 
4687 
4688 
4689 
4690 
4691 
4692 
4693 static uint8_t
4694 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4695                           struct lpfc_acqe_link *acqe_link)
4696 {
4697         uint8_t att_type;
4698 
4699         switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4700         case LPFC_ASYNC_LINK_STATUS_DOWN:
4701         case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
4702                 att_type = LPFC_ATT_LINK_DOWN;
4703                 break;
4704         case LPFC_ASYNC_LINK_STATUS_UP:
4705                 
4706                 att_type = LPFC_ATT_RESERVED;
4707                 break;
4708         case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
4709                 att_type = LPFC_ATT_LINK_UP;
4710                 break;
4711         default:
4712                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4713                                 "0399 Invalid link attention type: x%x\n",
4714                                 bf_get(lpfc_acqe_link_status, acqe_link));
4715                 att_type = LPFC_ATT_RESERVED;
4716                 break;
4717         }
4718         return att_type;
4719 }
4720 
4721 
4722 
4723 
4724 
4725 
4726 
4727 
4728 
4729 uint32_t
4730 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4731 {
4732         uint32_t link_speed;
4733 
4734         if (!lpfc_is_link_up(phba))
4735                 return 0;
4736 
4737         if (phba->sli_rev <= LPFC_SLI_REV3) {
4738                 switch (phba->fc_linkspeed) {
4739                 case LPFC_LINK_SPEED_1GHZ:
4740                         link_speed = 1000;
4741                         break;
4742                 case LPFC_LINK_SPEED_2GHZ:
4743                         link_speed = 2000;
4744                         break;
4745                 case LPFC_LINK_SPEED_4GHZ:
4746                         link_speed = 4000;
4747                         break;
4748                 case LPFC_LINK_SPEED_8GHZ:
4749                         link_speed = 8000;
4750                         break;
4751                 case LPFC_LINK_SPEED_10GHZ:
4752                         link_speed = 10000;
4753                         break;
4754                 case LPFC_LINK_SPEED_16GHZ:
4755                         link_speed = 16000;
4756                         break;
4757                 default:
4758                         link_speed = 0;
4759                 }
4760         } else {
4761                 if (phba->sli4_hba.link_state.logical_speed)
4762                         link_speed =
4763                               phba->sli4_hba.link_state.logical_speed;
4764                 else
4765                         link_speed = phba->sli4_hba.link_state.speed;
4766         }
4767         return link_speed;
4768 }
4769 
4770 
4771 
4772 
4773 
4774 
4775 
4776 
4777 
4778 
4779 
4780 
4781 static uint32_t
4782 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4783                            uint8_t speed_code)
4784 {
4785         uint32_t port_speed;
4786 
4787         switch (evt_code) {
4788         case LPFC_TRAILER_CODE_LINK:
4789                 switch (speed_code) {
4790                 case LPFC_ASYNC_LINK_SPEED_ZERO:
4791                         port_speed = 0;
4792                         break;
4793                 case LPFC_ASYNC_LINK_SPEED_10MBPS:
4794                         port_speed = 10;
4795                         break;
4796                 case LPFC_ASYNC_LINK_SPEED_100MBPS:
4797                         port_speed = 100;
4798                         break;
4799                 case LPFC_ASYNC_LINK_SPEED_1GBPS:
4800                         port_speed = 1000;
4801                         break;
4802                 case LPFC_ASYNC_LINK_SPEED_10GBPS:
4803                         port_speed = 10000;
4804                         break;
4805                 case LPFC_ASYNC_LINK_SPEED_20GBPS:
4806                         port_speed = 20000;
4807                         break;
4808                 case LPFC_ASYNC_LINK_SPEED_25GBPS:
4809                         port_speed = 25000;
4810                         break;
4811                 case LPFC_ASYNC_LINK_SPEED_40GBPS:
4812                         port_speed = 40000;
4813                         break;
4814                 default:
4815                         port_speed = 0;
4816                 }
4817                 break;
4818         case LPFC_TRAILER_CODE_FC:
4819                 switch (speed_code) {
4820                 case LPFC_FC_LA_SPEED_UNKNOWN:
4821                         port_speed = 0;
4822                         break;
4823                 case LPFC_FC_LA_SPEED_1G:
4824                         port_speed = 1000;
4825                         break;
4826                 case LPFC_FC_LA_SPEED_2G:
4827                         port_speed = 2000;
4828                         break;
4829                 case LPFC_FC_LA_SPEED_4G:
4830                         port_speed = 4000;
4831                         break;
4832                 case LPFC_FC_LA_SPEED_8G:
4833                         port_speed = 8000;
4834                         break;
4835                 case LPFC_FC_LA_SPEED_10G:
4836                         port_speed = 10000;
4837                         break;
4838                 case LPFC_FC_LA_SPEED_16G:
4839                         port_speed = 16000;
4840                         break;
4841                 case LPFC_FC_LA_SPEED_32G:
4842                         port_speed = 32000;
4843                         break;
4844                 case LPFC_FC_LA_SPEED_64G:
4845                         port_speed = 64000;
4846                         break;
4847                 case LPFC_FC_LA_SPEED_128G:
4848                         port_speed = 128000;
4849                         break;
4850                 default:
4851                         port_speed = 0;
4852                 }
4853                 break;
4854         default:
4855                 port_speed = 0;
4856         }
4857         return port_speed;
4858 }
4859 
4860 
4861 
4862 
4863 
4864 
4865 
4866 
4867 static void
4868 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
4869                          struct lpfc_acqe_link *acqe_link)
4870 {
4871         struct lpfc_dmabuf *mp;
4872         LPFC_MBOXQ_t *pmb;
4873         MAILBOX_t *mb;
4874         struct lpfc_mbx_read_top *la;
4875         uint8_t att_type;
4876         int rc;
4877 
4878         att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
4879         if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
4880                 return;
4881         phba->fcoe_eventtag = acqe_link->event_tag;
4882         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4883         if (!pmb) {
4884                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4885                                 "0395 The mboxq allocation failed\n");
4886                 return;
4887         }
4888         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4889         if (!mp) {
4890                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4891                                 "0396 The lpfc_dmabuf allocation failed\n");
4892                 goto out_free_pmb;
4893         }
4894         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4895         if (!mp->virt) {
4896                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4897                                 "0397 The mbuf allocation failed\n");
4898                 goto out_free_dmabuf;
4899         }
4900 
4901         
4902         lpfc_els_flush_all_cmd(phba);
4903 
4904         
4905         phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
4906 
4907         
4908         phba->sli.slistat.link_event++;
4909 
4910         
4911         lpfc_read_topology(phba, pmb, mp);
4912         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4913         pmb->vport = phba->pport;
4914 
4915         
4916         phba->sli4_hba.link_state.speed =
4917                         lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
4918                                 bf_get(lpfc_acqe_link_speed, acqe_link));
4919         phba->sli4_hba.link_state.duplex =
4920                                 bf_get(lpfc_acqe_link_duplex, acqe_link);
4921         phba->sli4_hba.link_state.status =
4922                                 bf_get(lpfc_acqe_link_status, acqe_link);
4923         phba->sli4_hba.link_state.type =
4924                                 bf_get(lpfc_acqe_link_type, acqe_link);
4925         phba->sli4_hba.link_state.number =
4926                                 bf_get(lpfc_acqe_link_number, acqe_link);
4927         phba->sli4_hba.link_state.fault =
4928                                 bf_get(lpfc_acqe_link_fault, acqe_link);
4929         phba->sli4_hba.link_state.logical_speed =
4930                         bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
4931 
4932         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4933                         "2900 Async FC/FCoE Link event - Speed:%dGBit "
4934                         "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
4935                         "Logical speed:%dMbps Fault:%d\n",
4936                         phba->sli4_hba.link_state.speed,
4937                         phba->sli4_hba.link_state.topology,
4938                         phba->sli4_hba.link_state.status,
4939                         phba->sli4_hba.link_state.type,
4940                         phba->sli4_hba.link_state.number,
4941                         phba->sli4_hba.link_state.logical_speed,
4942                         phba->sli4_hba.link_state.fault);
4943         
4944 
4945 
4946 
4947         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
4948                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4949                 if (rc == MBX_NOT_FINISHED)
4950                         goto out_free_dmabuf;
4951                 return;
4952         }
4953         
4954 
4955 
4956 
4957 
4958         
4959         mb = &pmb->u.mb;
4960         mb->mbxStatus = MBX_SUCCESS;
4961 
4962         
4963         lpfc_sli4_parse_latt_fault(phba, acqe_link);
4964 
4965         
4966         la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
4967         la->eventTag = acqe_link->event_tag;
4968         bf_set(lpfc_mbx_read_top_att_type, la, att_type);
4969         bf_set(lpfc_mbx_read_top_link_spd, la,
4970                (bf_get(lpfc_acqe_link_speed, acqe_link)));
4971 
4972         
4973         bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
4974         bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
4975         bf_set(lpfc_mbx_read_top_il, la, 0);
4976         bf_set(lpfc_mbx_read_top_pb, la, 0);
4977         bf_set(lpfc_mbx_read_top_fa, la, 0);
4978         bf_set(lpfc_mbx_read_top_mm, la, 0);
4979 
4980         
4981         lpfc_mbx_cmpl_read_topology(phba, pmb);
4982 
4983         return;
4984 
4985 out_free_dmabuf:
4986         kfree(mp);
4987 out_free_pmb:
4988         mempool_free(pmb, phba->mbox_mem_pool);
4989 }
4990 
4991 
4992 
4993 
4994 
4995 
4996 
4997 
4998 
4999 
5000 
5001 
5002 
5003 static uint8_t
5004 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5005 {
5006         uint8_t port_speed;
5007 
5008         switch (speed_code) {
5009         case LPFC_FC_LA_SPEED_1G:
5010                 port_speed = LPFC_LINK_SPEED_1GHZ;
5011                 break;
5012         case LPFC_FC_LA_SPEED_2G:
5013                 port_speed = LPFC_LINK_SPEED_2GHZ;
5014                 break;
5015         case LPFC_FC_LA_SPEED_4G:
5016                 port_speed = LPFC_LINK_SPEED_4GHZ;
5017                 break;
5018         case LPFC_FC_LA_SPEED_8G:
5019                 port_speed = LPFC_LINK_SPEED_8GHZ;
5020                 break;
5021         case LPFC_FC_LA_SPEED_16G:
5022                 port_speed = LPFC_LINK_SPEED_16GHZ;
5023                 break;
5024         case LPFC_FC_LA_SPEED_32G:
5025                 port_speed = LPFC_LINK_SPEED_32GHZ;
5026                 break;
5027         case LPFC_FC_LA_SPEED_64G:
5028                 port_speed = LPFC_LINK_SPEED_64GHZ;
5029                 break;
5030         case LPFC_FC_LA_SPEED_128G:
5031                 port_speed = LPFC_LINK_SPEED_128GHZ;
5032                 break;
5033         case LPFC_FC_LA_SPEED_256G:
5034                 port_speed = LPFC_LINK_SPEED_256GHZ;
5035                 break;
5036         default:
5037                 port_speed = 0;
5038                 break;
5039         }
5040 
5041         return port_speed;
5042 }
5043 
5044 #define trunk_link_status(__idx)\
5045         bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5046                ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5047                 "Link up" : "Link down") : "NA"
5048 
5049 #define trunk_port_fault(__idx)\
5050         bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5051                (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5052 
5053 static void
5054 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5055                               struct lpfc_acqe_fc_la *acqe_fc)
5056 {
5057         uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5058         uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5059 
5060         phba->sli4_hba.link_state.speed =
5061                 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5062                                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5063 
5064         phba->sli4_hba.link_state.logical_speed =
5065                                 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5066         
5067         phba->fc_linkspeed =
5068                  lpfc_async_link_speed_to_read_top(
5069                                 phba,
5070                                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5071 
5072         if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5073                 phba->trunk_link.link0.state =
5074                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5075                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5076                 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
5077         }
5078         if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5079                 phba->trunk_link.link1.state =
5080                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5081                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5082                 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
5083         }
5084         if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5085                 phba->trunk_link.link2.state =
5086                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5087                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5088                 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
5089         }
5090         if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5091                 phba->trunk_link.link3.state =
5092                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5093                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5094                 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
5095         }
5096 
5097         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5098                         "2910 Async FC Trunking Event - Speed:%d\n"
5099                         "\tLogical speed:%d "
5100                         "port0: %s port1: %s port2: %s port3: %s\n",
5101                         phba->sli4_hba.link_state.speed,
5102                         phba->sli4_hba.link_state.logical_speed,
5103                         trunk_link_status(0), trunk_link_status(1),
5104                         trunk_link_status(2), trunk_link_status(3));
5105 
5106         if (port_fault)
5107                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5108                                 "3202 trunk error:0x%x (%s) seen on port0:%s "
5109                                 
5110 
5111 
5112 
5113 
5114                                 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5115                                 "UNDEFINED. update driver." : trunk_errmsg[err],
5116                                 trunk_port_fault(0), trunk_port_fault(1),
5117                                 trunk_port_fault(2), trunk_port_fault(3));
5118 }
5119 
5120 
5121 
5122 
5123 
5124 
5125 
5126 
5127 
5128 
5129 
5130 static void
5131 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
5132 {
5133         struct lpfc_dmabuf *mp;
5134         LPFC_MBOXQ_t *pmb;
5135         MAILBOX_t *mb;
5136         struct lpfc_mbx_read_top *la;
5137         int rc;
5138 
5139         if (bf_get(lpfc_trailer_type, acqe_fc) !=
5140             LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
5141                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5142                                 "2895 Non FC link Event detected.(%d)\n",
5143                                 bf_get(lpfc_trailer_type, acqe_fc));
5144                 return;
5145         }
5146 
5147         if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5148             LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5149                 lpfc_update_trunk_link_status(phba, acqe_fc);
5150                 return;
5151         }
5152 
5153         
5154         phba->sli4_hba.link_state.speed =
5155                         lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5156                                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5157         phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
5158         phba->sli4_hba.link_state.topology =
5159                                 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
5160         phba->sli4_hba.link_state.status =
5161                                 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
5162         phba->sli4_hba.link_state.type =
5163                                 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
5164         phba->sli4_hba.link_state.number =
5165                                 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
5166         phba->sli4_hba.link_state.fault =
5167                                 bf_get(lpfc_acqe_link_fault, acqe_fc);
5168 
5169         if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5170             LPFC_FC_LA_TYPE_LINK_DOWN)
5171                 phba->sli4_hba.link_state.logical_speed = 0;
5172         else if (!phba->sli4_hba.conf_trunk)
5173                 phba->sli4_hba.link_state.logical_speed =
5174                                 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5175 
5176         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5177                         "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5178                         "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5179                         "%dMbps Fault:%d\n",
5180                         phba->sli4_hba.link_state.speed,
5181                         phba->sli4_hba.link_state.topology,
5182                         phba->sli4_hba.link_state.status,
5183                         phba->sli4_hba.link_state.type,
5184                         phba->sli4_hba.link_state.number,
5185                         phba->sli4_hba.link_state.logical_speed,
5186                         phba->sli4_hba.link_state.fault);
5187         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5188         if (!pmb) {
5189                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5190                                 "2897 The mboxq allocation failed\n");
5191                 return;
5192         }
5193         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5194         if (!mp) {
5195                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5196                                 "2898 The lpfc_dmabuf allocation failed\n");
5197                 goto out_free_pmb;
5198         }
5199         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5200         if (!mp->virt) {
5201                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5202                                 "2899 The mbuf allocation failed\n");
5203                 goto out_free_dmabuf;
5204         }
5205 
5206         
5207         lpfc_els_flush_all_cmd(phba);
5208 
5209         
5210         phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5211 
5212         
5213         phba->sli.slistat.link_event++;
5214 
5215         
5216         lpfc_read_topology(phba, pmb, mp);
5217         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5218         pmb->vport = phba->pport;
5219 
5220         if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
5221                 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
5222 
5223                 switch (phba->sli4_hba.link_state.status) {
5224                 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
5225                         phba->link_flag |= LS_MDS_LINK_DOWN;
5226                         break;
5227                 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
5228                         phba->link_flag |= LS_MDS_LOOPBACK;
5229                         break;
5230                 default:
5231                         break;
5232                 }
5233 
5234                 
5235                 mb = &pmb->u.mb;
5236                 mb->mbxStatus = MBX_SUCCESS;
5237 
5238                 
5239                 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
5240 
5241                 
5242                 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
5243                 la->eventTag = acqe_fc->event_tag;
5244 
5245                 if (phba->sli4_hba.link_state.status ==
5246                     LPFC_FC_LA_TYPE_UNEXP_WWPN) {
5247                         bf_set(lpfc_mbx_read_top_att_type, la,
5248                                LPFC_FC_LA_TYPE_UNEXP_WWPN);
5249                 } else {
5250                         bf_set(lpfc_mbx_read_top_att_type, la,
5251                                LPFC_FC_LA_TYPE_LINK_DOWN);
5252                 }
5253                 
5254                 lpfc_mbx_cmpl_read_topology(phba, pmb);
5255 
5256                 return;
5257         }
5258 
5259         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5260         if (rc == MBX_NOT_FINISHED)
5261                 goto out_free_dmabuf;
5262         return;
5263 
5264 out_free_dmabuf:
5265         kfree(mp);
5266 out_free_pmb:
5267         mempool_free(pmb, phba->mbox_mem_pool);
5268 }
5269 
5270 
5271 
5272 
5273 
5274 
5275 
5276 
5277 static void
5278 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
5279 {
5280         char port_name;
5281         char message[128];
5282         uint8_t status;
5283         uint8_t evt_type;
5284         uint8_t operational = 0;
5285         struct temp_event temp_event_data;
5286         struct lpfc_acqe_misconfigured_event *misconfigured;
5287         struct Scsi_Host  *shost;
5288         struct lpfc_vport **vports;
5289         int rc, i;
5290 
5291         evt_type = bf_get(lpfc_trailer_type, acqe_sli);
5292 
5293         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5294                         "2901 Async SLI event - Event Data1:x%08x Event Data2:"
5295                         "x%08x SLI Event Type:%d\n",
5296                         acqe_sli->event_data1, acqe_sli->event_data2,
5297                         evt_type);
5298 
5299         port_name = phba->Port[0];
5300         if (port_name == 0x00)
5301                 port_name = '?'; 
5302 
5303         switch (evt_type) {
5304         case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
5305                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5306                 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
5307                 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5308 
5309                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5310                                 "3190 Over Temperature:%d Celsius- Port Name %c\n",
5311                                 acqe_sli->event_data1, port_name);
5312 
5313                 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
5314                 shost = lpfc_shost_from_vport(phba->pport);
5315                 fc_host_post_vendor_event(shost, fc_get_event_number(),
5316                                           sizeof(temp_event_data),
5317                                           (char *)&temp_event_data,
5318                                           SCSI_NL_VID_TYPE_PCI
5319                                           | PCI_VENDOR_ID_EMULEX);
5320                 break;
5321         case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
5322                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5323                 temp_event_data.event_code = LPFC_NORMAL_TEMP;
5324                 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5325 
5326                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5327                                 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5328                                 acqe_sli->event_data1, port_name);
5329 
5330                 shost = lpfc_shost_from_vport(phba->pport);
5331                 fc_host_post_vendor_event(shost, fc_get_event_number(),
5332                                           sizeof(temp_event_data),
5333                                           (char *)&temp_event_data,
5334                                           SCSI_NL_VID_TYPE_PCI
5335                                           | PCI_VENDOR_ID_EMULEX);
5336                 break;
5337         case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
5338                 misconfigured = (struct lpfc_acqe_misconfigured_event *)
5339                                         &acqe_sli->event_data1;
5340 
5341                 
5342                 switch (phba->sli4_hba.lnk_info.lnk_no) {
5343                 case LPFC_LINK_NUMBER_0:
5344                         status = bf_get(lpfc_sli_misconfigured_port0_state,
5345                                         &misconfigured->theEvent);
5346                         operational = bf_get(lpfc_sli_misconfigured_port0_op,
5347                                         &misconfigured->theEvent);
5348                         break;
5349                 case LPFC_LINK_NUMBER_1:
5350                         status = bf_get(lpfc_sli_misconfigured_port1_state,
5351                                         &misconfigured->theEvent);
5352                         operational = bf_get(lpfc_sli_misconfigured_port1_op,
5353                                         &misconfigured->theEvent);
5354                         break;
5355                 case LPFC_LINK_NUMBER_2:
5356                         status = bf_get(lpfc_sli_misconfigured_port2_state,
5357                                         &misconfigured->theEvent);
5358                         operational = bf_get(lpfc_sli_misconfigured_port2_op,
5359                                         &misconfigured->theEvent);
5360                         break;
5361                 case LPFC_LINK_NUMBER_3:
5362                         status = bf_get(lpfc_sli_misconfigured_port3_state,
5363                                         &misconfigured->theEvent);
5364                         operational = bf_get(lpfc_sli_misconfigured_port3_op,
5365                                         &misconfigured->theEvent);
5366                         break;
5367                 default:
5368                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5369                                         "3296 "
5370                                         "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5371                                         "event: Invalid link %d",
5372                                         phba->sli4_hba.lnk_info.lnk_no);
5373                         return;
5374                 }
5375 
5376                 
5377                 if (phba->sli4_hba.lnk_info.optic_state == status)
5378                         return;
5379 
5380                 switch (status) {
5381                 case LPFC_SLI_EVENT_STATUS_VALID:
5382                         sprintf(message, "Physical Link is functional");
5383                         break;
5384                 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
5385                         sprintf(message, "Optics faulted/incorrectly "
5386                                 "installed/not installed - Reseat optics, "
5387                                 "if issue not resolved, replace.");
5388                         break;
5389                 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
5390                         sprintf(message,
5391                                 "Optics of two types installed - Remove one "
5392                                 "optic or install matching pair of optics.");
5393                         break;
5394                 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
5395                         sprintf(message, "Incompatible optics - Replace with "
5396                                 "compatible optics for card to function.");
5397                         break;
5398                 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
5399                         sprintf(message, "Unqualified optics - Replace with "
5400                                 "Avago optics for Warranty and Technical "
5401                                 "Support - Link is%s operational",
5402                                 (operational) ? " not" : "");
5403                         break;
5404                 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
5405                         sprintf(message, "Uncertified optics - Replace with "
5406                                 "Avago-certified optics to enable link "
5407                                 "operation - Link is%s operational",
5408                                 (operational) ? " not" : "");
5409                         break;
5410                 default:
5411                         
5412                         sprintf(message, "Unknown event status x%02x", status);
5413                         break;
5414                 }
5415 
5416                 
5417                 rc = lpfc_sli4_read_config(phba);
5418                 if (rc) {
5419                         phba->lmt = 0;
5420                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5421                                         "3194 Unable to retrieve supported "
5422                                         "speeds, rc = 0x%x\n", rc);
5423                 }
5424                 vports = lpfc_create_vport_work_array(phba);
5425                 if (vports != NULL) {
5426                         for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5427                                         i++) {
5428                                 shost = lpfc_shost_from_vport(vports[i]);
5429                                 lpfc_host_supported_speeds_set(shost);
5430                         }
5431                 }
5432                 lpfc_destroy_vport_work_array(phba, vports);
5433 
5434                 phba->sli4_hba.lnk_info.optic_state = status;
5435                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5436                                 "3176 Port Name %c %s\n", port_name, message);
5437                 break;
5438         case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
5439                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5440                                 "3192 Remote DPort Test Initiated - "
5441                                 "Event Data1:x%08x Event Data2: x%08x\n",
5442                                 acqe_sli->event_data1, acqe_sli->event_data2);
5443                 break;
5444         default:
5445                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5446                                 "3193 Async SLI event - Event Data1:x%08x Event Data2:"
5447                                 "x%08x SLI Event Type:%d\n",
5448                                 acqe_sli->event_data1, acqe_sli->event_data2,
5449                                 evt_type);
5450                 break;
5451         }
5452 }
5453 
5454 
5455 
5456 
5457 
5458 
5459 
5460 
5461 
5462 
5463 
5464 static struct lpfc_nodelist *
5465 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
5466 {
5467         struct lpfc_nodelist *ndlp;
5468         struct Scsi_Host *shost;
5469         struct lpfc_hba *phba;
5470 
5471         if (!vport)
5472                 return NULL;
5473         phba = vport->phba;
5474         if (!phba)
5475                 return NULL;
5476         ndlp = lpfc_findnode_did(vport, Fabric_DID);
5477         if (!ndlp) {
5478                 
5479                 ndlp = lpfc_nlp_init(vport, Fabric_DID);
5480                 if (!ndlp)
5481                         return 0;
5482                 
5483                 ndlp->nlp_type |= NLP_FABRIC;
5484                 
5485                 lpfc_enqueue_node(vport, ndlp);
5486         } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5487                 
5488                 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
5489                 if (!ndlp)
5490                         return 0;
5491         }
5492         if ((phba->pport->port_state < LPFC_FLOGI) &&
5493                 (phba->pport->port_state != LPFC_VPORT_FAILED))
5494                 return NULL;
5495         
5496         if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
5497                 && (vport->port_state != LPFC_VPORT_FAILED))
5498                 return NULL;
5499         shost = lpfc_shost_from_vport(vport);
5500         if (!shost)
5501                 return NULL;
5502         lpfc_linkdown_port(vport);
5503         lpfc_cleanup_pending_mbox(vport);
5504         spin_lock_irq(shost->host_lock);
5505         vport->fc_flag |= FC_VPORT_CVL_RCVD;
5506         spin_unlock_irq(shost->host_lock);
5507 
5508         return ndlp;
5509 }
5510 
5511 
5512 
5513 
5514 
5515 
5516 
5517 
5518 static void
5519 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
5520 {
5521         struct lpfc_vport **vports;
5522         int i;
5523 
5524         vports = lpfc_create_vport_work_array(phba);
5525         if (vports)
5526                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
5527                         lpfc_sli4_perform_vport_cvl(vports[i]);
5528         lpfc_destroy_vport_work_array(phba, vports);
5529 }
5530 
5531 
5532 
5533 
5534 
5535 
5536 
5537 
5538 static void
5539 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
5540                         struct lpfc_acqe_fip *acqe_fip)
5541 {
5542         uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
5543         int rc;
5544         struct lpfc_vport *vport;
5545         struct lpfc_nodelist *ndlp;
5546         struct Scsi_Host  *shost;
5547         int active_vlink_present;
5548         struct lpfc_vport **vports;
5549         int i;
5550 
5551         phba->fc_eventTag = acqe_fip->event_tag;
5552         phba->fcoe_eventtag = acqe_fip->event_tag;
5553         switch (event_type) {
5554         case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5555         case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5556                 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
5557                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5558                                         LOG_DISCOVERY,
5559                                         "2546 New FCF event, evt_tag:x%x, "
5560                                         "index:x%x\n",
5561                                         acqe_fip->event_tag,
5562                                         acqe_fip->index);
5563                 else
5564                         lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5565                                         LOG_DISCOVERY,
5566                                         "2788 FCF param modified event, "
5567                                         "evt_tag:x%x, index:x%x\n",
5568                                         acqe_fip->event_tag,
5569                                         acqe_fip->index);
5570                 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5571                         
5572 
5573 
5574 
5575 
5576                         lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5577                                         LOG_DISCOVERY,
5578                                         "2779 Read FCF (x%x) for updating "
5579                                         "roundrobin FCF failover bmask\n",
5580                                         acqe_fip->index);
5581                         rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
5582                 }
5583 
5584                 
5585                 spin_lock_irq(&phba->hbalock);
5586                 if (phba->hba_flag & FCF_TS_INPROG) {
5587                         spin_unlock_irq(&phba->hbalock);
5588                         break;
5589                 }
5590                 
5591                 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
5592                         spin_unlock_irq(&phba->hbalock);
5593                         break;
5594                 }
5595 
5596                 
5597                 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
5598                         spin_unlock_irq(&phba->hbalock);
5599                         break;
5600                 }
5601                 spin_unlock_irq(&phba->hbalock);
5602 
5603                 
5604                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5605                                 "2770 Start FCF table scan per async FCF "
5606                                 "event, evt_tag:x%x, index:x%x\n",
5607                                 acqe_fip->event_tag, acqe_fip->index);
5608                 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5609                                                      LPFC_FCOE_FCF_GET_FIRST);
5610                 if (rc)
5611                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5612                                         "2547 Issue FCF scan read FCF mailbox "
5613                                         "command failed (x%x)\n", rc);
5614                 break;
5615 
5616         case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
5617                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5618                         "2548 FCF Table full count 0x%x tag 0x%x\n",
5619                         bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5620                         acqe_fip->event_tag);
5621                 break;
5622 
5623         case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
5624                 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5625                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5626                         "2549 FCF (x%x) disconnected from network, "
5627                         "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
5628                 
5629 
5630 
5631 
5632                 spin_lock_irq(&phba->hbalock);
5633                 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5634                     (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
5635                         spin_unlock_irq(&phba->hbalock);
5636                         
5637                         lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
5638                         break;
5639                 }
5640                 spin_unlock_irq(&phba->hbalock);
5641 
5642                 
5643                 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
5644                         break;
5645 
5646                 
5647 
5648 
5649 
5650 
5651 
5652                 spin_lock_irq(&phba->hbalock);
5653                 
5654                 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5655                 spin_unlock_irq(&phba->hbalock);
5656 
5657                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5658                                 "2771 Start FCF fast failover process due to "
5659                                 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5660                                 "\n", acqe_fip->event_tag, acqe_fip->index);
5661                 rc = lpfc_sli4_redisc_fcf_table(phba);
5662                 if (rc) {
5663                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5664                                         LOG_DISCOVERY,
5665                                         "2772 Issue FCF rediscover mailbox "
5666                                         "command failed, fail through to FCF "
5667                                         "dead event\n");
5668                         spin_lock_irq(&phba->hbalock);
5669                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5670                         spin_unlock_irq(&phba->hbalock);
5671                         
5672 
5673 
5674 
5675                         lpfc_sli4_fcf_dead_failthrough(phba);
5676                 } else {
5677                         
5678                         lpfc_sli4_clear_fcf_rr_bmask(phba);
5679                         
5680 
5681 
5682 
5683                         lpfc_sli4_perform_all_vport_cvl(phba);
5684                 }
5685                 break;
5686         case LPFC_FIP_EVENT_TYPE_CVL:
5687                 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5688                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5689                         "2718 Clear Virtual Link Received for VPI 0x%x"
5690                         " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
5691 
5692                 vport = lpfc_find_vport_by_vpid(phba,
5693                                                 acqe_fip->index);
5694                 ndlp = lpfc_sli4_perform_vport_cvl(vport);
5695                 if (!ndlp)
5696                         break;
5697                 active_vlink_present = 0;
5698 
5699                 vports = lpfc_create_vport_work_array(phba);
5700                 if (vports) {
5701                         for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5702                                         i++) {
5703                                 if ((!(vports[i]->fc_flag &
5704                                         FC_VPORT_CVL_RCVD)) &&
5705                                         (vports[i]->port_state > LPFC_FDISC)) {
5706                                         active_vlink_present = 1;
5707                                         break;
5708                                 }
5709                         }
5710                         lpfc_destroy_vport_work_array(phba, vports);
5711                 }
5712 
5713                 
5714 
5715 
5716 
5717 
5718                 if (!(vport->load_flag & FC_UNLOADING) &&
5719                                         active_vlink_present) {
5720                         
5721 
5722 
5723 
5724                         mod_timer(&ndlp->nlp_delayfunc,
5725                                   jiffies + msecs_to_jiffies(1000));
5726                         shost = lpfc_shost_from_vport(vport);
5727                         spin_lock_irq(shost->host_lock);
5728                         ndlp->nlp_flag |= NLP_DELAY_TMO;
5729                         spin_unlock_irq(shost->host_lock);
5730                         ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5731                         vport->port_state = LPFC_FDISC;
5732                 } else {
5733                         
5734 
5735 
5736 
5737 
5738 
5739 
5740                         spin_lock_irq(&phba->hbalock);
5741                         if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5742                                 spin_unlock_irq(&phba->hbalock);
5743                                 break;
5744                         }
5745                         
5746                         phba->fcf.fcf_flag |= FCF_ACVL_DISC;
5747                         spin_unlock_irq(&phba->hbalock);
5748                         lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5749                                         LOG_DISCOVERY,
5750                                         "2773 Start FCF failover per CVL, "
5751                                         "evt_tag:x%x\n", acqe_fip->event_tag);
5752                         rc = lpfc_sli4_redisc_fcf_table(phba);
5753                         if (rc) {
5754                                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5755                                                 LOG_DISCOVERY,
5756                                                 "2774 Issue FCF rediscover "
5757                                                 "mailbox command failed, "
5758                                                 "through to CVL event\n");
5759                                 spin_lock_irq(&phba->hbalock);
5760                                 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
5761                                 spin_unlock_irq(&phba->hbalock);
5762                                 
5763 
5764 
5765 
5766                                 lpfc_retry_pport_discovery(phba);
5767                         } else
5768                                 
5769 
5770 
5771 
5772                                 lpfc_sli4_clear_fcf_rr_bmask(phba);
5773                 }
5774                 break;
5775         default:
5776                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5777                         "0288 Unknown FCoE event type 0x%x event tag "
5778                         "0x%x\n", event_type, acqe_fip->event_tag);
5779                 break;
5780         }
5781 }
5782 
5783 
5784 
5785 
5786 
5787 
5788 
5789 
5790 static void
5791 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
5792                          struct lpfc_acqe_dcbx *acqe_dcbx)
5793 {
5794         phba->fc_eventTag = acqe_dcbx->event_tag;
5795         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5796                         "0290 The SLI4 DCBX asynchronous event is not "
5797                         "handled yet\n");
5798 }
5799 
5800 
5801 
5802 
5803 
5804 
5805 
5806 
5807 
5808 
5809 static void
5810 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
5811                          struct lpfc_acqe_grp5 *acqe_grp5)
5812 {
5813         uint16_t prev_ll_spd;
5814 
5815         phba->fc_eventTag = acqe_grp5->event_tag;
5816         phba->fcoe_eventtag = acqe_grp5->event_tag;
5817         prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
5818         phba->sli4_hba.link_state.logical_speed =
5819                 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
5820         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5821                         "2789 GRP5 Async Event: Updating logical link speed "
5822                         "from %dMbps to %dMbps\n", prev_ll_spd,
5823                         phba->sli4_hba.link_state.logical_speed);
5824 }
5825 
5826 
5827 
5828 
5829 
5830 
5831 
5832 
5833 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
5834 {
5835         struct lpfc_cq_event *cq_event;
5836 
5837         
5838         spin_lock_irq(&phba->hbalock);
5839         phba->hba_flag &= ~ASYNC_EVENT;
5840         spin_unlock_irq(&phba->hbalock);
5841         
5842         while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
5843                 
5844                 spin_lock_irq(&phba->hbalock);
5845                 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
5846                                  cq_event, struct lpfc_cq_event, list);
5847                 spin_unlock_irq(&phba->hbalock);
5848                 
5849                 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
5850                 case LPFC_TRAILER_CODE_LINK:
5851                         lpfc_sli4_async_link_evt(phba,
5852                                                  &cq_event->cqe.acqe_link);
5853                         break;
5854                 case LPFC_TRAILER_CODE_FCOE:
5855                         lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
5856                         break;
5857                 case LPFC_TRAILER_CODE_DCBX:
5858                         lpfc_sli4_async_dcbx_evt(phba,
5859                                                  &cq_event->cqe.acqe_dcbx);
5860                         break;
5861                 case LPFC_TRAILER_CODE_GRP5:
5862                         lpfc_sli4_async_grp5_evt(phba,
5863                                                  &cq_event->cqe.acqe_grp5);
5864                         break;
5865                 case LPFC_TRAILER_CODE_FC:
5866                         lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
5867                         break;
5868                 case LPFC_TRAILER_CODE_SLI:
5869                         lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
5870                         break;
5871                 default:
5872                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5873                                         "1804 Invalid asynchrous event code: "
5874                                         "x%x\n", bf_get(lpfc_trailer_code,
5875                                         &cq_event->cqe.mcqe_cmpl));
5876                         break;
5877                 }
5878                 
5879                 lpfc_sli4_cq_event_release(phba, cq_event);
5880         }
5881 }
5882 
5883 
5884 
5885 
5886 
5887 
5888 
5889 
5890 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
5891 {
5892         int rc;
5893 
5894         spin_lock_irq(&phba->hbalock);
5895         
5896         phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
5897         
5898         phba->fcf.failover_rec.flag = 0;
5899         
5900         phba->fcf.fcf_flag |= FCF_REDISC_FOV;
5901         spin_unlock_irq(&phba->hbalock);
5902 
5903         
5904         lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5905                         "2777 Start post-quiescent FCF table scan\n");
5906         rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
5907         if (rc)
5908                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5909                                 "2747 Issue FCF scan read FCF mailbox "
5910                                 "command failed 0x%x\n", rc);
5911 }
5912 
5913 
5914 
5915 
5916 
5917 
5918 
5919 
5920 
5921 
5922 
5923 int
5924 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5925 {
5926         int rc;
5927 
5928         
5929         phba->pci_dev_grp = dev_grp;
5930 
5931         
5932         if (dev_grp == LPFC_PCI_DEV_OC)
5933                 phba->sli_rev = LPFC_SLI_REV4;
5934 
5935         
5936         rc = lpfc_init_api_table_setup(phba, dev_grp);
5937         if (rc)
5938                 return -ENODEV;
5939         
5940         rc = lpfc_scsi_api_table_setup(phba, dev_grp);
5941         if (rc)
5942                 return -ENODEV;
5943         
5944         rc = lpfc_sli_api_table_setup(phba, dev_grp);
5945         if (rc)
5946                 return -ENODEV;
5947         
5948         rc = lpfc_mbox_api_table_setup(phba, dev_grp);
5949         if (rc)
5950                 return -ENODEV;
5951 
5952         return 0;
5953 }
5954 
5955 
5956 
5957 
5958 
5959 
5960 
5961 
5962 
5963 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
5964 {
5965         switch (intr_mode) {
5966         case 0:
5967                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5968                                 "0470 Enable INTx interrupt mode.\n");
5969                 break;
5970         case 1:
5971                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5972                                 "0481 Enabled MSI interrupt mode.\n");
5973                 break;
5974         case 2:
5975                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5976                                 "0480 Enabled MSI-X interrupt mode.\n");
5977                 break;
5978         default:
5979                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5980                                 "0482 Illegal interrupt mode.\n");
5981                 break;
5982         }
5983         return;
5984 }
5985 
5986 
5987 
5988 
5989 
5990 
5991 
5992 
5993 
5994 
5995 
5996 
5997 static int
5998 lpfc_enable_pci_dev(struct lpfc_hba *phba)
5999 {
6000         struct pci_dev *pdev;
6001 
6002         
6003         if (!phba->pcidev)
6004                 goto out_error;
6005         else
6006                 pdev = phba->pcidev;
6007         
6008         if (pci_enable_device_mem(pdev))
6009                 goto out_error;
6010         
6011         if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
6012                 goto out_disable_device;
6013         
6014         pci_set_master(pdev);
6015         pci_try_set_mwi(pdev);
6016         pci_save_state(pdev);
6017 
6018         
6019         if (pci_is_pcie(pdev))
6020                 pdev->needs_freset = 1;
6021 
6022         return 0;
6023 
6024 out_disable_device:
6025         pci_disable_device(pdev);
6026 out_error:
6027         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6028                         "1401 Failed to enable pci device\n");
6029         return -ENODEV;
6030 }
6031 
6032 
6033 
6034 
6035 
6036 
6037 
6038 
6039 static void
6040 lpfc_disable_pci_dev(struct lpfc_hba *phba)
6041 {
6042         struct pci_dev *pdev;
6043 
6044         
6045         if (!phba->pcidev)
6046                 return;
6047         else
6048                 pdev = phba->pcidev;
6049         
6050         pci_release_mem_regions(pdev);
6051         pci_disable_device(pdev);
6052 
6053         return;
6054 }
6055 
6056 
6057 
6058 
6059 
6060 
6061 
6062 
6063 
6064 
6065 void
6066 lpfc_reset_hba(struct lpfc_hba *phba)
6067 {
6068         
6069         if (!phba->cfg_enable_hba_reset) {
6070                 phba->link_state = LPFC_HBA_ERROR;
6071                 return;
6072         }
6073         if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
6074                 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6075         else
6076                 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
6077         lpfc_offline(phba);
6078         lpfc_sli_brdrestart(phba);
6079         lpfc_online(phba);
6080         lpfc_unblock_mgmt_io(phba);
6081 }
6082 
6083 
6084 
6085 
6086 
6087 
6088 
6089 
6090 
6091 
6092 
6093 uint16_t
6094 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6095 {
6096         struct pci_dev *pdev = phba->pcidev;
6097         uint16_t nr_virtfn;
6098         int pos;
6099 
6100         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6101         if (pos == 0)
6102                 return 0;
6103 
6104         pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6105         return nr_virtfn;
6106 }
6107 
6108 
6109 
6110 
6111 
6112 
6113 
6114 
6115 
6116 
6117 
6118 
6119 int
6120 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6121 {
6122         struct pci_dev *pdev = phba->pcidev;
6123         uint16_t max_nr_vfn;
6124         int rc;
6125 
6126         max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6127         if (nr_vfn > max_nr_vfn) {
6128                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6129                                 "3057 Requested vfs (%d) greater than "
6130                                 "supported vfs (%d)", nr_vfn, max_nr_vfn);
6131                 return -EINVAL;
6132         }
6133 
6134         rc = pci_enable_sriov(pdev, nr_vfn);
6135         if (rc) {
6136                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6137                                 "2806 Failed to enable sriov on this device "
6138                                 "with vfn number nr_vf:%d, rc:%d\n",
6139                                 nr_vfn, rc);
6140         } else
6141                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6142                                 "2807 Successful enable sriov on this device "
6143                                 "with vfn number nr_vf:%d\n", nr_vfn);
6144         return rc;
6145 }
6146 
6147 
6148 
6149 
6150 
6151 
6152 
6153 
6154 
6155 
6156 
6157 
6158 static int
6159 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
6160 {
6161         struct lpfc_sli *psli = &phba->sli;
6162 
6163         
6164 
6165 
6166         atomic_set(&phba->fast_event_count, 0);
6167         spin_lock_init(&phba->hbalock);
6168 
6169         
6170         spin_lock_init(&phba->ndlp_lock);
6171 
6172         
6173         spin_lock_init(&phba->port_list_lock);
6174         INIT_LIST_HEAD(&phba->port_list);
6175 
6176         INIT_LIST_HEAD(&phba->work_list);
6177         init_waitqueue_head(&phba->wait_4_mlo_m_q);
6178 
6179         
6180         init_waitqueue_head(&phba->work_waitq);
6181 
6182         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6183                         "1403 Protocols supported %s %s %s\n",
6184                         ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
6185                                 "SCSI" : " "),
6186                         ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
6187                                 "NVME" : " "),
6188                         (phba->nvmet_support ? "NVMET" : " "));
6189 
6190         
6191         spin_lock_init(&phba->scsi_buf_list_get_lock);
6192         INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6193         spin_lock_init(&phba->scsi_buf_list_put_lock);
6194         INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
6195 
6196         
6197         INIT_LIST_HEAD(&phba->fabric_iocb_list);
6198 
6199         
6200         INIT_LIST_HEAD(&phba->elsbuf);
6201 
6202         
6203         INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
6204 
6205         
6206         spin_lock_init(&phba->devicelock);
6207         INIT_LIST_HEAD(&phba->luns);
6208 
6209         
6210         timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
6211         
6212         timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
6213         
6214         timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
6215         
6216         timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
6217 
6218         INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6219 
6220         return 0;
6221 }
6222 
6223 
6224 
6225 
6226 
6227 
6228 
6229 
6230 
6231 
6232 
6233 
6234 static int
6235 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6236 {
6237         int rc, entry_sz;
6238 
6239         
6240 
6241 
6242 
6243         
6244         timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
6245 
6246         
6247         phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
6248         phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
6249 
6250         
6251         lpfc_get_cfgparam(phba);
6252         
6253 
6254         rc = lpfc_setup_driver_resource_phase1(phba);
6255         if (rc)
6256                 return -ENODEV;
6257 
6258         if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
6259                 phba->menlo_flag |= HBA_MENLO_SUPPORT;
6260                 
6261                 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
6262                         phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
6263         }
6264 
6265         if (!phba->sli.sli3_ring)
6266                 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
6267                                               sizeof(struct lpfc_sli_ring),
6268                                               GFP_KERNEL);
6269         if (!phba->sli.sli3_ring)
6270                 return -ENOMEM;
6271 
6272         
6273 
6274 
6275 
6276 
6277         
6278         lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
6279         lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
6280         lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
6281 
6282         if (phba->sli_rev == LPFC_SLI_REV4)
6283                 entry_sz = sizeof(struct sli4_sge);
6284         else
6285                 entry_sz = sizeof(struct ulp_bde64);
6286 
6287         
6288         if (phba->cfg_enable_bg) {
6289                 
6290 
6291 
6292 
6293 
6294 
6295 
6296 
6297 
6298                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6299                         sizeof(struct fcp_rsp) +
6300                         (LPFC_MAX_SG_SEG_CNT * entry_sz);
6301 
6302                 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
6303                         phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
6304 
6305                 
6306                 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
6307         } else {
6308                 
6309 
6310 
6311 
6312 
6313                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6314                         sizeof(struct fcp_rsp) +
6315                         ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
6316 
6317                 
6318                 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
6319         }
6320 
6321         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6322                         "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
6323                         phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6324                         phba->cfg_total_seg_cnt);
6325 
6326         phba->max_vpi = LPFC_MAX_VPI;
6327         
6328         phba->max_vports = 0;
6329 
6330         
6331 
6332 
6333         lpfc_sli_setup(phba);
6334         lpfc_sli_queue_init(phba);
6335 
6336         
6337         if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6338                 return -ENOMEM;
6339 
6340         phba->lpfc_sg_dma_buf_pool =
6341                 dma_pool_create("lpfc_sg_dma_buf_pool",
6342                                 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
6343                                 BPL_ALIGN_SZ, 0);
6344 
6345         if (!phba->lpfc_sg_dma_buf_pool)
6346                 goto fail_free_mem;
6347 
6348         phba->lpfc_cmd_rsp_buf_pool =
6349                         dma_pool_create("lpfc_cmd_rsp_buf_pool",
6350                                         &phba->pcidev->dev,
6351                                         sizeof(struct fcp_cmnd) +
6352                                         sizeof(struct fcp_rsp),
6353                                         BPL_ALIGN_SZ, 0);
6354 
6355         if (!phba->lpfc_cmd_rsp_buf_pool)
6356                 goto fail_free_dma_buf_pool;
6357 
6358         
6359 
6360 
6361 
6362         if (phba->cfg_sriov_nr_virtfn > 0) {
6363                 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6364                                                  phba->cfg_sriov_nr_virtfn);
6365                 if (rc) {
6366                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6367                                         "2808 Requested number of SR-IOV "
6368                                         "virtual functions (%d) is not "
6369                                         "supported\n",
6370                                         phba->cfg_sriov_nr_virtfn);
6371                         phba->cfg_sriov_nr_virtfn = 0;
6372                 }
6373         }
6374 
6375         return 0;
6376 
6377 fail_free_dma_buf_pool:
6378         dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6379         phba->lpfc_sg_dma_buf_pool = NULL;
6380 fail_free_mem:
6381         lpfc_mem_free(phba);
6382         return -ENOMEM;
6383 }
6384 
6385 
6386 
6387 
6388 
6389 
6390 
6391 
6392 static void
6393 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
6394 {
6395         
6396         lpfc_mem_free_all(phba);
6397 
6398         return;
6399 }
6400 
6401 
6402 
6403 
6404 
6405 
6406 
6407 
6408 
6409 
6410 
6411 
6412 static int
6413 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6414 {
6415         LPFC_MBOXQ_t *mboxq;
6416         MAILBOX_t *mb;
6417         int rc, i, max_buf_size;
6418         uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
6419         struct lpfc_mqe *mqe;
6420         int longs;
6421         int extra;
6422         uint64_t wwn;
6423         u32 if_type;
6424         u32 if_fam;
6425 
6426         phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6427         phba->sli4_hba.num_possible_cpu = num_possible_cpus();
6428         phba->sli4_hba.curr_disp_cpu = 0;
6429 
6430         
6431         lpfc_get_cfgparam(phba);
6432 
6433         
6434         rc = lpfc_setup_driver_resource_phase1(phba);
6435         if (rc)
6436                 return -ENODEV;
6437 
6438         
6439         rc = lpfc_sli4_post_status_check(phba);
6440         if (rc)
6441                 return -ENODEV;
6442 
6443         
6444 
6445         
6446         phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6447 
6448         
6449 
6450 
6451 
6452         timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
6453 
6454         
6455         timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
6456 
6457         
6458 
6459 
6460 
6461         memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
6462                 sizeof(struct lpfc_mbox_ext_buf_ctx));
6463         INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
6464 
6465         phba->max_vpi = LPFC_MAX_VPI;
6466 
6467         
6468         phba->max_vports = 0;
6469 
6470         
6471         phba->valid_vlan = 0;
6472         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
6473         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
6474         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
6475 
6476         
6477 
6478 
6479 
6480 
6481 
6482         
6483         INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
6484         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6485         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
6486 
6487         
6488 
6489 
6490         
6491         spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
6492         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
6493 
6494         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6495                 
6496                 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
6497                 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6498                 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
6499                 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6500                 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
6501         }
6502 
6503         
6504         spin_lock_init(&phba->sli4_hba.sgl_list_lock);
6505         spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
6506 
6507         
6508 
6509 
6510 
6511         
6512         INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6513         
6514         INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
6515         
6516         INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
6517         
6518         INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
6519         
6520         INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6521         
6522         INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6523 
6524         
6525         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6526         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6527         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6528         INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6529 
6530         
6531 
6532 
6533         INIT_LIST_HEAD(&phba->sli.mboxq);
6534         INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6535 
6536         
6537         phba->sli4_hba.lnk_info.optic_state = 0xff;
6538 
6539         
6540         rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6541         if (rc)
6542                 return -ENOMEM;
6543 
6544         
6545         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
6546             LPFC_SLI_INTF_IF_TYPE_2) {
6547                 rc = lpfc_pci_function_reset(phba);
6548                 if (unlikely(rc)) {
6549                         rc = -ENODEV;
6550                         goto out_free_mem;
6551                 }
6552                 phba->temp_sensor_support = 1;
6553         }
6554 
6555         
6556         rc = lpfc_create_bootstrap_mbox(phba);
6557         if (unlikely(rc))
6558                 goto out_free_mem;
6559 
6560         
6561         rc = lpfc_setup_endian_order(phba);
6562         if (unlikely(rc))
6563                 goto out_free_bsmbx;
6564 
6565         
6566         rc = lpfc_sli4_read_config(phba);
6567         if (unlikely(rc))
6568                 goto out_free_bsmbx;
6569         rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
6570         if (unlikely(rc))
6571                 goto out_free_bsmbx;
6572 
6573         
6574         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6575             LPFC_SLI_INTF_IF_TYPE_0) {
6576                 rc = lpfc_pci_function_reset(phba);
6577                 if (unlikely(rc))
6578                         goto out_free_bsmbx;
6579         }
6580 
6581         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6582                                                        GFP_KERNEL);
6583         if (!mboxq) {
6584                 rc = -ENOMEM;
6585                 goto out_free_bsmbx;
6586         }
6587 
6588         
6589         phba->nvmet_support = 0;
6590         if (lpfc_enable_nvmet_cnt) {
6591 
6592                 
6593                 lpfc_read_nv(phba, mboxq);
6594                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6595                 if (rc != MBX_SUCCESS) {
6596                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6597                                         "6016 Mailbox failed , mbxCmd x%x "
6598                                         "READ_NV, mbxStatus x%x\n",
6599                                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6600                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6601                         mempool_free(mboxq, phba->mbox_mem_pool);
6602                         rc = -EIO;
6603                         goto out_free_bsmbx;
6604                 }
6605                 mb = &mboxq->u.mb;
6606                 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6607                        sizeof(uint64_t));
6608                 wwn = cpu_to_be64(wwn);
6609                 phba->sli4_hba.wwnn.u.name = wwn;
6610                 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6611                        sizeof(uint64_t));
6612                 
6613                 wwn = cpu_to_be64(wwn);
6614                 phba->sli4_hba.wwpn.u.name = wwn;
6615 
6616                 
6617                 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6618                         if (wwn == lpfc_enable_nvmet[i]) {
6619 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
6620                                 if (lpfc_nvmet_mem_alloc(phba))
6621                                         break;
6622 
6623                                 phba->nvmet_support = 1; 
6624 
6625                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6626                                                 "6017 NVME Target %016llx\n",
6627                                                 wwn);
6628 #else
6629                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6630                                                 "6021 Can't enable NVME Target."
6631                                                 " NVME_TARGET_FC infrastructure"
6632                                                 " is not in kernel\n");
6633 #endif
6634                                 
6635                                 phba->cfg_xri_rebalancing = 0;
6636                                 break;
6637                         }
6638                 }
6639         }
6640 
6641         lpfc_nvme_mod_param_dep(phba);
6642 
6643         
6644         lpfc_supported_pages(mboxq);
6645         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6646         if (!rc) {
6647                 mqe = &mboxq->u.mqe;
6648                 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
6649                        LPFC_MAX_SUPPORTED_PAGES);
6650                 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
6651                         switch (pn_page[i]) {
6652                         case LPFC_SLI4_PARAMETERS:
6653                                 phba->sli4_hba.pc_sli4_params.supported = 1;
6654                                 break;
6655                         default:
6656                                 break;
6657                         }
6658                 }
6659                 
6660                 if (phba->sli4_hba.pc_sli4_params.supported)
6661                         rc = lpfc_pc_sli4_params_get(phba, mboxq);
6662                 if (rc) {
6663                         mempool_free(mboxq, phba->mbox_mem_pool);
6664                         rc = -EIO;
6665                         goto out_free_bsmbx;
6666                 }
6667         }
6668 
6669         
6670 
6671 
6672 
6673 
6674         rc = lpfc_get_sli4_parameters(phba, mboxq);
6675         if (rc) {
6676                 if_type = bf_get(lpfc_sli_intf_if_type,
6677                                  &phba->sli4_hba.sli_intf);
6678                 if_fam = bf_get(lpfc_sli_intf_sli_family,
6679                                 &phba->sli4_hba.sli_intf);
6680                 if (phba->sli4_hba.extents_in_use &&
6681                     phba->sli4_hba.rpi_hdrs_in_use) {
6682                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6683                                 "2999 Unsupported SLI4 Parameters "
6684                                 "Extents and RPI headers enabled.\n");
6685                         if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6686                             if_fam ==  LPFC_SLI_INTF_FAMILY_BE2) {
6687                                 mempool_free(mboxq, phba->mbox_mem_pool);
6688                                 rc = -EIO;
6689                                 goto out_free_bsmbx;
6690                         }
6691                 }
6692                 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6693                       if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6694                         mempool_free(mboxq, phba->mbox_mem_pool);
6695                         rc = -EIO;
6696                         goto out_free_bsmbx;
6697                 }
6698         }
6699 
6700         
6701 
6702 
6703 
6704         extra = 2;
6705         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6706                 extra++;
6707 
6708         
6709 
6710 
6711 
6712 
6713         max_buf_size = (2 * SLI4_PAGE_SIZE);
6714 
6715         
6716 
6717 
6718 
6719         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6720                 
6721 
6722                 
6723 
6724 
6725 
6726 
6727 
6728 
6729 
6730 
6731                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6732                                 sizeof(struct fcp_rsp) + max_buf_size;
6733 
6734                 
6735                 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6736 
6737                 
6738 
6739 
6740 
6741                 if (phba->cfg_enable_bg &&
6742                     phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6743                         phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6744                 else
6745                         phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6746 
6747         } else {
6748                 
6749 
6750 
6751 
6752 
6753                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6754                                 sizeof(struct fcp_rsp) +
6755                                 ((phba->cfg_sg_seg_cnt + extra) *
6756                                 sizeof(struct sli4_sge));
6757 
6758                 
6759                 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
6760                 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6761 
6762                 
6763 
6764 
6765 
6766         }
6767 
6768         if (phba->cfg_xpsgl && !phba->nvmet_support)
6769                 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
6770         else if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
6771                 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
6772         else
6773                 phba->cfg_sg_dma_buf_size =
6774                                 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
6775 
6776         phba->border_sge_num = phba->cfg_sg_dma_buf_size /
6777                                sizeof(struct sli4_sge);
6778 
6779         
6780         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6781                 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
6782                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
6783                                         "6300 Reducing NVME sg segment "
6784                                         "cnt to %d\n",
6785                                         LPFC_MAX_NVME_SEG_CNT);
6786                         phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
6787                 } else
6788                         phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
6789         }
6790 
6791         
6792         lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
6793         lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
6794         lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
6795 
6796         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6797                         "9087 sg_seg_cnt:%d dmabuf_size:%d "
6798                         "total:%d scsi:%d nvme:%d\n",
6799                         phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6800                         phba->cfg_total_seg_cnt,  phba->cfg_scsi_seg_cnt,
6801                         phba->cfg_nvme_seg_cnt);
6802 
6803         if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
6804                 i = phba->cfg_sg_dma_buf_size;
6805         else
6806                 i = SLI4_PAGE_SIZE;
6807 
6808         phba->lpfc_sg_dma_buf_pool =
6809                         dma_pool_create("lpfc_sg_dma_buf_pool",
6810                                         &phba->pcidev->dev,
6811                                         phba->cfg_sg_dma_buf_size,
6812                                         i, 0);
6813         if (!phba->lpfc_sg_dma_buf_pool)
6814                 goto out_free_bsmbx;
6815 
6816         phba->lpfc_cmd_rsp_buf_pool =
6817                         dma_pool_create("lpfc_cmd_rsp_buf_pool",
6818                                         &phba->pcidev->dev,
6819                                         sizeof(struct fcp_cmnd) +
6820                                         sizeof(struct fcp_rsp),
6821                                         i, 0);
6822         if (!phba->lpfc_cmd_rsp_buf_pool)
6823                 goto out_free_sg_dma_buf;
6824 
6825         mempool_free(mboxq, phba->mbox_mem_pool);
6826 
6827         
6828         lpfc_sli4_oas_verify(phba);
6829 
6830         
6831         lpfc_sli4_ras_init(phba);
6832 
6833         
6834         rc = lpfc_sli4_queue_verify(phba);
6835         if (rc)
6836                 goto out_free_cmd_rsp_buf;
6837 
6838         
6839         rc = lpfc_sli4_cq_event_pool_create(phba);
6840         if (rc)
6841                 goto out_free_cmd_rsp_buf;
6842 
6843         
6844         lpfc_init_sgl_list(phba);
6845 
6846         
6847         rc = lpfc_init_active_sgl_array(phba);
6848         if (rc) {
6849                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6850                                 "1430 Failed to initialize sgl list.\n");
6851                 goto out_destroy_cq_event_pool;
6852         }
6853         rc = lpfc_sli4_init_rpi_hdrs(phba);
6854         if (rc) {
6855                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6856                                 "1432 Failed to initialize rpi headers.\n");
6857                 goto out_free_active_sgl;
6858         }
6859 
6860         
6861         longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
6862         phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
6863                                          GFP_KERNEL);
6864         if (!phba->fcf.fcf_rr_bmask) {
6865                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6866                                 "2759 Failed allocate memory for FCF round "
6867                                 "robin failover bmask\n");
6868                 rc = -ENOMEM;
6869                 goto out_remove_rpi_hdrs;
6870         }
6871 
6872         phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
6873                                             sizeof(struct lpfc_hba_eq_hdl),
6874                                             GFP_KERNEL);
6875         if (!phba->sli4_hba.hba_eq_hdl) {
6876                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6877                                 "2572 Failed allocate memory for "
6878                                 "fast-path per-EQ handle array\n");
6879                 rc = -ENOMEM;
6880                 goto out_free_fcf_rr_bmask;
6881         }
6882 
6883         phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
6884                                         sizeof(struct lpfc_vector_map_info),
6885                                         GFP_KERNEL);
6886         if (!phba->sli4_hba.cpu_map) {
6887                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6888                                 "3327 Failed allocate memory for msi-x "
6889                                 "interrupt vector mapping\n");
6890                 rc = -ENOMEM;
6891                 goto out_free_hba_eq_hdl;
6892         }
6893 
6894         phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
6895         if (!phba->sli4_hba.eq_info) {
6896                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6897                                 "3321 Failed allocation for per_cpu stats\n");
6898                 rc = -ENOMEM;
6899                 goto out_free_hba_cpu_map;
6900         }
6901         
6902 
6903 
6904 
6905         if (phba->cfg_sriov_nr_virtfn > 0) {
6906                 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6907                                                  phba->cfg_sriov_nr_virtfn);
6908                 if (rc) {
6909                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6910                                         "3020 Requested number of SR-IOV "
6911                                         "virtual functions (%d) is not "
6912                                         "supported\n",
6913                                         phba->cfg_sriov_nr_virtfn);
6914                         phba->cfg_sriov_nr_virtfn = 0;
6915                 }
6916         }
6917 
6918         return 0;
6919 
6920 out_free_hba_cpu_map:
6921         kfree(phba->sli4_hba.cpu_map);
6922 out_free_hba_eq_hdl:
6923         kfree(phba->sli4_hba.hba_eq_hdl);
6924 out_free_fcf_rr_bmask:
6925         kfree(phba->fcf.fcf_rr_bmask);
6926 out_remove_rpi_hdrs:
6927         lpfc_sli4_remove_rpi_hdrs(phba);
6928 out_free_active_sgl:
6929         lpfc_free_active_sgl(phba);
6930 out_destroy_cq_event_pool:
6931         lpfc_sli4_cq_event_pool_destroy(phba);
6932 out_free_cmd_rsp_buf:
6933         dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
6934         phba->lpfc_cmd_rsp_buf_pool = NULL;
6935 out_free_sg_dma_buf:
6936         dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6937         phba->lpfc_sg_dma_buf_pool = NULL;
6938 out_free_bsmbx:
6939         lpfc_destroy_bootstrap_mbox(phba);
6940 out_free_mem:
6941         lpfc_mem_free(phba);
6942         return rc;
6943 }
6944 
6945 
6946 
6947 
6948 
6949 
6950 
6951 
6952 static void
6953 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
6954 {
6955         struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6956 
6957         free_percpu(phba->sli4_hba.eq_info);
6958 
6959         
6960         kfree(phba->sli4_hba.cpu_map);
6961         phba->sli4_hba.num_possible_cpu = 0;
6962         phba->sli4_hba.num_present_cpu = 0;
6963         phba->sli4_hba.curr_disp_cpu = 0;
6964 
6965         
6966         kfree(phba->sli4_hba.hba_eq_hdl);
6967 
6968         
6969         lpfc_sli4_remove_rpi_hdrs(phba);
6970         lpfc_sli4_remove_rpis(phba);
6971 
6972         
6973         kfree(phba->fcf.fcf_rr_bmask);
6974 
6975         
6976         lpfc_free_active_sgl(phba);
6977         lpfc_free_els_sgl_list(phba);
6978         lpfc_free_nvmet_sgl_list(phba);
6979 
6980         
6981         lpfc_sli4_cq_event_release_all(phba);
6982         lpfc_sli4_cq_event_pool_destroy(phba);
6983 
6984         
6985         lpfc_sli4_dealloc_resource_identifiers(phba);
6986 
6987         
6988         lpfc_destroy_bootstrap_mbox(phba);
6989 
6990         
6991         lpfc_mem_free_all(phba);
6992 
6993         
6994         list_for_each_entry_safe(conn_entry, next_conn_entry,
6995                 &phba->fcf_conn_rec_list, list) {
6996                 list_del_init(&conn_entry->list);
6997                 kfree(conn_entry);
6998         }
6999 
7000         return;
7001 }
7002 
7003 
7004 
7005 
7006 
7007 
7008 
7009 
7010 
7011 
7012 
7013 int
7014 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7015 {
7016         phba->lpfc_hba_init_link = lpfc_hba_init_link;
7017         phba->lpfc_hba_down_link = lpfc_hba_down_link;
7018         phba->lpfc_selective_reset = lpfc_selective_reset;
7019         switch (dev_grp) {
7020         case LPFC_PCI_DEV_LP:
7021                 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
7022                 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
7023                 phba->lpfc_stop_port = lpfc_stop_port_s3;
7024                 break;
7025         case LPFC_PCI_DEV_OC:
7026                 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
7027                 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
7028                 phba->lpfc_stop_port = lpfc_stop_port_s4;
7029                 break;
7030         default:
7031                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7032                                 "1431 Invalid HBA PCI-device group: 0x%x\n",
7033                                 dev_grp);
7034                 return -ENODEV;
7035                 break;
7036         }
7037         return 0;
7038 }
7039 
7040 
7041 
7042 
7043 
7044 
7045 
7046 
7047 
7048 
7049 
7050 
7051 static int
7052 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
7053 {
7054         int error;
7055 
7056         
7057         phba->worker_thread = kthread_run(lpfc_do_work, phba,
7058                                           "lpfc_worker_%d", phba->brd_no);
7059         if (IS_ERR(phba->worker_thread)) {
7060                 error = PTR_ERR(phba->worker_thread);
7061                 return error;
7062         }
7063 
7064         return 0;
7065 }
7066 
7067 
7068 
7069 
7070 
7071 
7072 
7073 
7074 
7075 static void
7076 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
7077 {
7078         if (phba->wq) {
7079                 flush_workqueue(phba->wq);
7080                 destroy_workqueue(phba->wq);
7081                 phba->wq = NULL;
7082         }
7083 
7084         
7085         if (phba->worker_thread)
7086                 kthread_stop(phba->worker_thread);
7087 }
7088 
7089 
7090 
7091 
7092 
7093 
7094 
7095 void
7096 lpfc_free_iocb_list(struct lpfc_hba *phba)
7097 {
7098         struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
7099 
7100         spin_lock_irq(&phba->hbalock);
7101         list_for_each_entry_safe(iocbq_entry, iocbq_next,
7102                                  &phba->lpfc_iocb_list, list) {
7103                 list_del(&iocbq_entry->list);
7104                 kfree(iocbq_entry);
7105                 phba->total_iocbq_bufs--;
7106         }
7107         spin_unlock_irq(&phba->hbalock);
7108 
7109         return;
7110 }
7111 
7112 
7113 
7114 
7115 
7116 
7117 
7118 
7119 
7120 
7121 
7122 
7123 int
7124 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
7125 {
7126         struct lpfc_iocbq *iocbq_entry = NULL;
7127         uint16_t iotag;
7128         int i;
7129 
7130         
7131         INIT_LIST_HEAD(&phba->lpfc_iocb_list);
7132         for (i = 0; i < iocb_count; i++) {
7133                 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
7134                 if (iocbq_entry == NULL) {
7135                         printk(KERN_ERR "%s: only allocated %d iocbs of "
7136                                 "expected %d count. Unloading driver.\n",
7137                                 __func__, i, LPFC_IOCB_LIST_CNT);
7138                         goto out_free_iocbq;
7139                 }
7140 
7141                 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
7142                 if (iotag == 0) {
7143                         kfree(iocbq_entry);
7144                         printk(KERN_ERR "%s: failed to allocate IOTAG. "
7145                                 "Unloading driver.\n", __func__);
7146                         goto out_free_iocbq;
7147                 }
7148                 iocbq_entry->sli4_lxritag = NO_XRI;
7149                 iocbq_entry->sli4_xritag = NO_XRI;
7150 
7151                 spin_lock_irq(&phba->hbalock);
7152                 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
7153                 phba->total_iocbq_bufs++;
7154                 spin_unlock_irq(&phba->hbalock);
7155         }
7156 
7157         return 0;
7158 
7159 out_free_iocbq:
7160         lpfc_free_iocb_list(phba);
7161 
7162         return -ENOMEM;
7163 }
7164 
7165 
7166 
7167 
7168 
7169 
7170 
7171 
7172 void
7173 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
7174 {
7175         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7176 
7177         list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
7178                 list_del(&sglq_entry->list);
7179                 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
7180                 kfree(sglq_entry);
7181         }
7182 }
7183 
7184 
7185 
7186 
7187 
7188 
7189 
7190 static void
7191 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
7192 {
7193         LIST_HEAD(sglq_list);
7194 
7195         
7196         spin_lock_irq(&phba->hbalock);
7197         spin_lock(&phba->sli4_hba.sgl_list_lock);
7198         list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
7199         spin_unlock(&phba->sli4_hba.sgl_list_lock);
7200         spin_unlock_irq(&phba->hbalock);
7201 
7202         
7203         lpfc_free_sgl_list(phba, &sglq_list);
7204 }
7205 
7206 
7207 
7208 
7209 
7210 
7211 
7212 static void
7213 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
7214 {
7215         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7216         LIST_HEAD(sglq_list);
7217 
7218         
7219         spin_lock_irq(&phba->hbalock);
7220         spin_lock(&phba->sli4_hba.sgl_list_lock);
7221         list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
7222         spin_unlock(&phba->sli4_hba.sgl_list_lock);
7223         spin_unlock_irq(&phba->hbalock);
7224 
7225         
7226         list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
7227                 list_del(&sglq_entry->list);
7228                 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
7229                 kfree(sglq_entry);
7230         }
7231 
7232         
7233 
7234 
7235 
7236         phba->sli4_hba.nvmet_xri_cnt = 0;
7237 }
7238 
7239 
7240 
7241 
7242 
7243 
7244 
7245 
7246 static int
7247 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
7248 {
7249         int size;
7250         size = sizeof(struct lpfc_sglq *);
7251         size *= phba->sli4_hba.max_cfg_param.max_xri;
7252 
7253         phba->sli4_hba.lpfc_sglq_active_list =
7254                 kzalloc(size, GFP_KERNEL);
7255         if (!phba->sli4_hba.lpfc_sglq_active_list)
7256                 return -ENOMEM;
7257         return 0;
7258 }
7259 
7260 
7261 
7262 
7263 
7264 
7265 
7266 
7267 
7268 static void
7269 lpfc_free_active_sgl(struct lpfc_hba *phba)
7270 {
7271         kfree(phba->sli4_hba.lpfc_sglq_active_list);
7272 }
7273 
7274 
7275 
7276 
7277 
7278 
7279 
7280 
7281 
7282 static void
7283 lpfc_init_sgl_list(struct lpfc_hba *phba)
7284 {
7285         
7286         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
7287         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7288         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
7289         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7290 
7291         
7292         phba->sli4_hba.els_xri_cnt = 0;
7293 
7294         
7295         phba->sli4_hba.io_xri_cnt = 0;
7296 }
7297 
7298 
7299 
7300 
7301 
7302 
7303 
7304 
7305 
7306 
7307 
7308 
7309 
7310 
7311 
7312 int
7313 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7314 {
7315         int rc = 0;
7316         struct lpfc_rpi_hdr *rpi_hdr;
7317 
7318         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
7319         if (!phba->sli4_hba.rpi_hdrs_in_use)
7320                 return rc;
7321         if (phba->sli4_hba.extents_in_use)
7322                 return -EIO;
7323 
7324         rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
7325         if (!rpi_hdr) {
7326                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7327                                 "0391 Error during rpi post operation\n");
7328                 lpfc_sli4_remove_rpis(phba);
7329                 rc = -ENODEV;
7330         }
7331 
7332         return rc;
7333 }
7334 
7335 
7336 
7337 
7338 
7339 
7340 
7341 
7342 
7343 
7344 
7345 
7346 
7347 
7348 struct lpfc_rpi_hdr *
7349 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
7350 {
7351         uint16_t rpi_limit, curr_rpi_range;
7352         struct lpfc_dmabuf *dmabuf;
7353         struct lpfc_rpi_hdr *rpi_hdr;
7354 
7355         
7356 
7357 
7358 
7359 
7360         if (!phba->sli4_hba.rpi_hdrs_in_use)
7361                 return NULL;
7362         if (phba->sli4_hba.extents_in_use)
7363                 return NULL;
7364 
7365         
7366         rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
7367 
7368         spin_lock_irq(&phba->hbalock);
7369         
7370 
7371 
7372 
7373 
7374         curr_rpi_range = phba->sli4_hba.next_rpi;
7375         spin_unlock_irq(&phba->hbalock);
7376 
7377         
7378         if (curr_rpi_range == rpi_limit)
7379                 return NULL;
7380 
7381         
7382 
7383 
7384 
7385         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7386         if (!dmabuf)
7387                 return NULL;
7388 
7389         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7390                                           LPFC_HDR_TEMPLATE_SIZE,
7391                                           &dmabuf->phys, GFP_KERNEL);
7392         if (!dmabuf->virt) {
7393                 rpi_hdr = NULL;
7394                 goto err_free_dmabuf;
7395         }
7396 
7397         if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
7398                 rpi_hdr = NULL;
7399                 goto err_free_coherent;
7400         }
7401 
7402         
7403         rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
7404         if (!rpi_hdr)
7405                 goto err_free_coherent;
7406 
7407         rpi_hdr->dmabuf = dmabuf;
7408         rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
7409         rpi_hdr->page_count = 1;
7410         spin_lock_irq(&phba->hbalock);
7411 
7412         
7413         rpi_hdr->start_rpi = curr_rpi_range;
7414         rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
7415         list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
7416 
7417         spin_unlock_irq(&phba->hbalock);
7418         return rpi_hdr;
7419 
7420  err_free_coherent:
7421         dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
7422                           dmabuf->virt, dmabuf->phys);
7423  err_free_dmabuf:
7424         kfree(dmabuf);
7425         return NULL;
7426 }
7427 
7428 
7429 
7430 
7431 
7432 
7433 
7434 
7435 
7436 
7437 void
7438 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
7439 {
7440         struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
7441 
7442         if (!phba->sli4_hba.rpi_hdrs_in_use)
7443                 goto exit;
7444 
7445         list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
7446                                  &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
7447                 list_del(&rpi_hdr->list);
7448                 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
7449                                   rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
7450                 kfree(rpi_hdr->dmabuf);
7451                 kfree(rpi_hdr);
7452         }
7453  exit:
7454         
7455         phba->sli4_hba.next_rpi = 0;
7456 }
7457 
7458 
7459 
7460 
7461 
7462 
7463 
7464 
7465 
7466 
7467 
7468 
7469 
7470 static struct lpfc_hba *
7471 lpfc_hba_alloc(struct pci_dev *pdev)
7472 {
7473         struct lpfc_hba *phba;
7474 
7475         
7476         phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
7477         if (!phba) {
7478                 dev_err(&pdev->dev, "failed to allocate hba struct\n");
7479                 return NULL;
7480         }
7481 
7482         
7483         phba->pcidev = pdev;
7484 
7485         
7486         phba->brd_no = lpfc_get_instance();
7487         if (phba->brd_no < 0) {
7488                 kfree(phba);
7489                 return NULL;
7490         }
7491         phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
7492 
7493         spin_lock_init(&phba->ct_ev_lock);
7494         INIT_LIST_HEAD(&phba->ct_ev_waiters);
7495 
7496         return phba;
7497 }
7498 
7499 
7500 
7501 
7502 
7503 
7504 
7505 
7506 static void
7507 lpfc_hba_free(struct lpfc_hba *phba)
7508 {
7509         if (phba->sli_rev == LPFC_SLI_REV4)
7510                 kfree(phba->sli4_hba.hdwq);
7511 
7512         
7513         idr_remove(&lpfc_hba_index, phba->brd_no);
7514 
7515         
7516         kfree(phba->sli.sli3_ring);
7517         phba->sli.sli3_ring = NULL;
7518 
7519         kfree(phba);
7520         return;
7521 }
7522 
7523 
7524 
7525 
7526 
7527 
7528 
7529 
7530 
7531 
7532 
7533 
7534 static int
7535 lpfc_create_shost(struct lpfc_hba *phba)
7536 {
7537         struct lpfc_vport *vport;
7538         struct Scsi_Host  *shost;
7539 
7540         
7541         phba->fc_edtov = FF_DEF_EDTOV;
7542         phba->fc_ratov = FF_DEF_RATOV;
7543         phba->fc_altov = FF_DEF_ALTOV;
7544         phba->fc_arbtov = FF_DEF_ARBTOV;
7545 
7546         atomic_set(&phba->sdev_cnt, 0);
7547         vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
7548         if (!vport)
7549                 return -ENODEV;
7550 
7551         shost = lpfc_shost_from_vport(vport);
7552         phba->pport = vport;
7553 
7554         if (phba->nvmet_support) {
7555                 
7556                 if (phba->txrdy_payload_pool == NULL) {
7557                         phba->txrdy_payload_pool = dma_pool_create(
7558                                 "txrdy_pool", &phba->pcidev->dev,
7559                                 TXRDY_PAYLOAD_LEN, 16, 0);
7560                         if (phba->txrdy_payload_pool) {
7561                                 phba->targetport = NULL;
7562                                 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7563                                 lpfc_printf_log(phba, KERN_INFO,
7564                                                 LOG_INIT | LOG_NVME_DISC,
7565                                                 "6076 NVME Target Found\n");
7566                         }
7567                 }
7568         }
7569 
7570         lpfc_debugfs_initialize(vport);
7571         
7572         pci_set_drvdata(phba->pcidev, shost);
7573 
7574         
7575 
7576 
7577 
7578         vport->load_flag |= FC_ALLOW_FDMI;
7579         if (phba->cfg_enable_SmartSAN ||
7580             (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
7581 
7582                 
7583                 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
7584                 if (phba->cfg_enable_SmartSAN)
7585                         vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
7586                 else
7587                         vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7588         }
7589         return 0;
7590 }
7591 
7592 
7593 
7594 
7595 
7596 
7597 
7598 
7599 static void
7600 lpfc_destroy_shost(struct lpfc_hba *phba)
7601 {
7602         struct lpfc_vport *vport = phba->pport;
7603 
7604         
7605         destroy_port(vport);
7606 
7607         return;
7608 }
7609 
7610 
7611 
7612 
7613 
7614 
7615 
7616 
7617 
7618 static void
7619 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7620 {
7621         uint32_t old_mask;
7622         uint32_t old_guard;
7623 
7624         if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7625                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7626                                 "1478 Registering BlockGuard with the "
7627                                 "SCSI layer\n");
7628 
7629                 old_mask = phba->cfg_prot_mask;
7630                 old_guard = phba->cfg_prot_guard;
7631 
7632                 
7633                 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
7634                         SHOST_DIX_TYPE0_PROTECTION |
7635                         SHOST_DIX_TYPE1_PROTECTION);
7636                 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7637                                          SHOST_DIX_GUARD_CRC);
7638 
7639                 
7640                 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7641                         phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
7642 
7643                 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7644                         if ((old_mask != phba->cfg_prot_mask) ||
7645                                 (old_guard != phba->cfg_prot_guard))
7646                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7647                                         "1475 Registering BlockGuard with the "
7648                                         "SCSI layer: mask %d  guard %d\n",
7649                                         phba->cfg_prot_mask,
7650                                         phba->cfg_prot_guard);
7651 
7652                         scsi_host_set_prot(shost, phba->cfg_prot_mask);
7653                         scsi_host_set_guard(shost, phba->cfg_prot_guard);
7654                 } else
7655                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7656                                 "1479 Not Registering BlockGuard with the SCSI "
7657                                 "layer, Bad protection parameters: %d %d\n",
7658                                 old_mask, old_guard);
7659         }
7660 }
7661 
7662 
7663 
7664 
7665 
7666 
7667 
7668 
7669 static void
7670 lpfc_post_init_setup(struct lpfc_hba *phba)
7671 {
7672         struct Scsi_Host  *shost;
7673         struct lpfc_adapter_event_header adapter_event;
7674 
7675         
7676         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7677 
7678         
7679 
7680 
7681 
7682         shost = pci_get_drvdata(phba->pcidev);
7683         shost->can_queue = phba->cfg_hba_queue_depth - 10;
7684 
7685         lpfc_host_attrib_init(shost);
7686 
7687         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7688                 spin_lock_irq(shost->host_lock);
7689                 lpfc_poll_start_timer(phba);
7690                 spin_unlock_irq(shost->host_lock);
7691         }
7692 
7693         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7694                         "0428 Perform SCSI scan\n");
7695         
7696         adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7697         adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7698         fc_host_post_vendor_event(shost, fc_get_event_number(),
7699                                   sizeof(adapter_event),
7700                                   (char *) &adapter_event,
7701                                   LPFC_NL_VENDOR_ID);
7702         return;
7703 }
7704 
7705 
7706 
7707 
7708 
7709 
7710 
7711 
7712 
7713 
7714 
7715 
7716 static int
7717 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7718 {
7719         struct pci_dev *pdev = phba->pcidev;
7720         unsigned long bar0map_len, bar2map_len;
7721         int i, hbq_count;
7722         void *ptr;
7723         int error;
7724 
7725         if (!pdev)
7726                 return -ENODEV;
7727 
7728         
7729         error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7730         if (error)
7731                 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7732         if (error)
7733                 return error;
7734         error = -ENODEV;
7735 
7736         
7737 
7738 
7739         phba->pci_bar0_map = pci_resource_start(pdev, 0);
7740         bar0map_len = pci_resource_len(pdev, 0);
7741 
7742         phba->pci_bar2_map = pci_resource_start(pdev, 2);
7743         bar2map_len = pci_resource_len(pdev, 2);
7744 
7745         
7746         phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7747         if (!phba->slim_memmap_p) {
7748                 dev_printk(KERN_ERR, &pdev->dev,
7749                            "ioremap failed for SLIM memory.\n");
7750                 goto out;
7751         }
7752 
7753         
7754         phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
7755         if (!phba->ctrl_regs_memmap_p) {
7756                 dev_printk(KERN_ERR, &pdev->dev,
7757                            "ioremap failed for HBA control registers.\n");
7758                 goto out_iounmap_slim;
7759         }
7760 
7761         
7762         phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7763                                                &phba->slim2p.phys, GFP_KERNEL);
7764         if (!phba->slim2p.virt)
7765                 goto out_iounmap;
7766 
7767         phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7768         phba->mbox_ext = (phba->slim2p.virt +
7769                 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
7770         phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
7771         phba->IOCBs = (phba->slim2p.virt +
7772                        offsetof(struct lpfc_sli2_slim, IOCBs));
7773 
7774         phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
7775                                                  lpfc_sli_hbq_size(),
7776                                                  &phba->hbqslimp.phys,
7777                                                  GFP_KERNEL);
7778         if (!phba->hbqslimp.virt)
7779                 goto out_free_slim;
7780 
7781         hbq_count = lpfc_sli_hbq_count();
7782         ptr = phba->hbqslimp.virt;
7783         for (i = 0; i < hbq_count; ++i) {
7784                 phba->hbqs[i].hbq_virt = ptr;
7785                 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
7786                 ptr += (lpfc_hbq_defs[i]->entry_count *
7787                         sizeof(struct lpfc_hbq_entry));
7788         }
7789         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
7790         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
7791 
7792         memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
7793 
7794         phba->MBslimaddr = phba->slim_memmap_p;
7795         phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
7796         phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
7797         phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
7798         phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
7799 
7800         return 0;
7801 
7802 out_free_slim:
7803         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7804                           phba->slim2p.virt, phba->slim2p.phys);
7805 out_iounmap:
7806         iounmap(phba->ctrl_regs_memmap_p);
7807 out_iounmap_slim:
7808         iounmap(phba->slim_memmap_p);
7809 out:
7810         return error;
7811 }
7812 
7813 
7814 
7815 
7816 
7817 
7818 
7819 
7820 static void
7821 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
7822 {
7823         struct pci_dev *pdev;
7824 
7825         
7826         if (!phba->pcidev)
7827                 return;
7828         else
7829                 pdev = phba->pcidev;
7830 
7831         
7832         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7833                           phba->hbqslimp.virt, phba->hbqslimp.phys);
7834         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7835                           phba->slim2p.virt, phba->slim2p.phys);
7836 
7837         
7838         iounmap(phba->ctrl_regs_memmap_p);
7839         iounmap(phba->slim_memmap_p);
7840 
7841         return;
7842 }
7843 
7844 
7845 
7846 
7847 
7848 
7849 
7850 
7851 
7852 
7853 int
7854 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
7855 {
7856         struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
7857         struct lpfc_register reg_data;
7858         int i, port_error = 0;
7859         uint32_t if_type;
7860 
7861         memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
7862         memset(®_data, 0, sizeof(reg_data));
7863         if (!phba->sli4_hba.PSMPHRregaddr)
7864                 return -ENODEV;
7865 
7866         
7867         for (i = 0; i < 3000; i++) {
7868                 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
7869                         &portsmphr_reg.word0) ||
7870                         (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
7871                         
7872                         port_error = -ENODEV;
7873                         break;
7874                 }
7875                 if (LPFC_POST_STAGE_PORT_READY ==
7876                     bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
7877                         break;
7878                 msleep(10);
7879         }
7880 
7881         
7882 
7883 
7884 
7885         if (port_error) {
7886                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7887                         "1408 Port Failed POST - portsmphr=0x%x, "
7888                         "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
7889                         "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
7890                         portsmphr_reg.word0,
7891                         bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
7892                         bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
7893                         bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
7894                         bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
7895                         bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
7896                         bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
7897                         bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
7898                         bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
7899         } else {
7900                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7901                                 "2534 Device Info: SLIFamily=0x%x, "
7902                                 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
7903                                 "SLIHint_2=0x%x, FT=0x%x\n",
7904                                 bf_get(lpfc_sli_intf_sli_family,
7905                                        &phba->sli4_hba.sli_intf),
7906                                 bf_get(lpfc_sli_intf_slirev,
7907                                        &phba->sli4_hba.sli_intf),
7908                                 bf_get(lpfc_sli_intf_if_type,
7909                                        &phba->sli4_hba.sli_intf),
7910                                 bf_get(lpfc_sli_intf_sli_hint1,
7911                                        &phba->sli4_hba.sli_intf),
7912                                 bf_get(lpfc_sli_intf_sli_hint2,
7913                                        &phba->sli4_hba.sli_intf),
7914                                 bf_get(lpfc_sli_intf_func_type,
7915                                        &phba->sli4_hba.sli_intf));
7916                 
7917 
7918 
7919 
7920 
7921                 if_type = bf_get(lpfc_sli_intf_if_type,
7922                                  &phba->sli4_hba.sli_intf);
7923                 switch (if_type) {
7924                 case LPFC_SLI_INTF_IF_TYPE_0:
7925                         phba->sli4_hba.ue_mask_lo =
7926                               readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
7927                         phba->sli4_hba.ue_mask_hi =
7928                               readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
7929                         uerrlo_reg.word0 =
7930                               readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
7931                         uerrhi_reg.word0 =
7932                                 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
7933                         if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
7934                             (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
7935                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7936                                                 "1422 Unrecoverable Error "
7937                                                 "Detected during POST "
7938                                                 "uerr_lo_reg=0x%x, "
7939                                                 "uerr_hi_reg=0x%x, "
7940                                                 "ue_mask_lo_reg=0x%x, "
7941                                                 "ue_mask_hi_reg=0x%x\n",
7942                                                 uerrlo_reg.word0,
7943                                                 uerrhi_reg.word0,
7944                                                 phba->sli4_hba.ue_mask_lo,
7945                                                 phba->sli4_hba.ue_mask_hi);
7946                                 port_error = -ENODEV;
7947                         }
7948                         break;
7949                 case LPFC_SLI_INTF_IF_TYPE_2:
7950                 case LPFC_SLI_INTF_IF_TYPE_6:
7951                         
7952                         if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7953                                 ®_data.word0) ||
7954                                 (bf_get(lpfc_sliport_status_err, ®_data) &&
7955                                  !bf_get(lpfc_sliport_status_rn, ®_data))) {
7956                                 phba->work_status[0] =
7957                                         readl(phba->sli4_hba.u.if_type2.
7958                                               ERR1regaddr);
7959                                 phba->work_status[1] =
7960                                         readl(phba->sli4_hba.u.if_type2.
7961                                               ERR2regaddr);
7962                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7963                                         "2888 Unrecoverable port error "
7964                                         "following POST: port status reg "
7965                                         "0x%x, port_smphr reg 0x%x, "
7966                                         "error 1=0x%x, error 2=0x%x\n",
7967                                         reg_data.word0,
7968                                         portsmphr_reg.word0,
7969                                         phba->work_status[0],
7970                                         phba->work_status[1]);
7971                                 port_error = -ENODEV;
7972                         }
7973                         break;
7974                 case LPFC_SLI_INTF_IF_TYPE_1:
7975                 default:
7976                         break;
7977                 }
7978         }
7979         return port_error;
7980 }
7981 
7982 
7983 
7984 
7985 
7986 
7987 
7988 
7989 
7990 static void
7991 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
7992 {
7993         switch (if_type) {
7994         case LPFC_SLI_INTF_IF_TYPE_0:
7995                 phba->sli4_hba.u.if_type0.UERRLOregaddr =
7996                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
7997                 phba->sli4_hba.u.if_type0.UERRHIregaddr =
7998                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
7999                 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
8000                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
8001                 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
8002                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
8003                 phba->sli4_hba.SLIINTFregaddr =
8004                         phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8005                 break;
8006         case LPFC_SLI_INTF_IF_TYPE_2:
8007                 phba->sli4_hba.u.if_type2.EQDregaddr =
8008                         phba->sli4_hba.conf_regs_memmap_p +
8009                                                 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8010                 phba->sli4_hba.u.if_type2.ERR1regaddr =
8011                         phba->sli4_hba.conf_regs_memmap_p +
8012                                                 LPFC_CTL_PORT_ER1_OFFSET;
8013                 phba->sli4_hba.u.if_type2.ERR2regaddr =
8014                         phba->sli4_hba.conf_regs_memmap_p +
8015                                                 LPFC_CTL_PORT_ER2_OFFSET;
8016                 phba->sli4_hba.u.if_type2.CTRLregaddr =
8017                         phba->sli4_hba.conf_regs_memmap_p +
8018                                                 LPFC_CTL_PORT_CTL_OFFSET;
8019                 phba->sli4_hba.u.if_type2.STATUSregaddr =
8020                         phba->sli4_hba.conf_regs_memmap_p +
8021                                                 LPFC_CTL_PORT_STA_OFFSET;
8022                 phba->sli4_hba.SLIINTFregaddr =
8023                         phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8024                 phba->sli4_hba.PSMPHRregaddr =
8025                         phba->sli4_hba.conf_regs_memmap_p +
8026                                                 LPFC_CTL_PORT_SEM_OFFSET;
8027                 phba->sli4_hba.RQDBregaddr =
8028                         phba->sli4_hba.conf_regs_memmap_p +
8029                                                 LPFC_ULP0_RQ_DOORBELL;
8030                 phba->sli4_hba.WQDBregaddr =
8031                         phba->sli4_hba.conf_regs_memmap_p +
8032                                                 LPFC_ULP0_WQ_DOORBELL;
8033                 phba->sli4_hba.CQDBregaddr =
8034                         phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
8035                 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8036                 phba->sli4_hba.MQDBregaddr =
8037                         phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
8038                 phba->sli4_hba.BMBXregaddr =
8039                         phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8040                 break;
8041         case LPFC_SLI_INTF_IF_TYPE_6:
8042                 phba->sli4_hba.u.if_type2.EQDregaddr =
8043                         phba->sli4_hba.conf_regs_memmap_p +
8044                                                 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8045                 phba->sli4_hba.u.if_type2.ERR1regaddr =
8046                         phba->sli4_hba.conf_regs_memmap_p +
8047                                                 LPFC_CTL_PORT_ER1_OFFSET;
8048                 phba->sli4_hba.u.if_type2.ERR2regaddr =
8049                         phba->sli4_hba.conf_regs_memmap_p +
8050                                                 LPFC_CTL_PORT_ER2_OFFSET;
8051                 phba->sli4_hba.u.if_type2.CTRLregaddr =
8052                         phba->sli4_hba.conf_regs_memmap_p +
8053                                                 LPFC_CTL_PORT_CTL_OFFSET;
8054                 phba->sli4_hba.u.if_type2.STATUSregaddr =
8055                         phba->sli4_hba.conf_regs_memmap_p +
8056                                                 LPFC_CTL_PORT_STA_OFFSET;
8057                 phba->sli4_hba.PSMPHRregaddr =
8058                         phba->sli4_hba.conf_regs_memmap_p +
8059                                                 LPFC_CTL_PORT_SEM_OFFSET;
8060                 phba->sli4_hba.BMBXregaddr =
8061                         phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8062                 break;
8063         case LPFC_SLI_INTF_IF_TYPE_1:
8064         default:
8065                 dev_printk(KERN_ERR, &phba->pcidev->dev,
8066                            "FATAL - unsupported SLI4 interface type - %d\n",
8067                            if_type);
8068                 break;
8069         }
8070 }
8071 
8072 
8073 
8074 
8075 
8076 
8077 
8078 static void
8079 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8080 {
8081         switch (if_type) {
8082         case LPFC_SLI_INTF_IF_TYPE_0:
8083                 phba->sli4_hba.PSMPHRregaddr =
8084                         phba->sli4_hba.ctrl_regs_memmap_p +
8085                         LPFC_SLIPORT_IF0_SMPHR;
8086                 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8087                         LPFC_HST_ISR0;
8088                 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8089                         LPFC_HST_IMR0;
8090                 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8091                         LPFC_HST_ISCR0;
8092                 break;
8093         case LPFC_SLI_INTF_IF_TYPE_6:
8094                 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8095                         LPFC_IF6_RQ_DOORBELL;
8096                 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8097                         LPFC_IF6_WQ_DOORBELL;
8098                 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8099                         LPFC_IF6_CQ_DOORBELL;
8100                 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8101                         LPFC_IF6_EQ_DOORBELL;
8102                 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8103                         LPFC_IF6_MQ_DOORBELL;
8104                 break;
8105         case LPFC_SLI_INTF_IF_TYPE_2:
8106         case LPFC_SLI_INTF_IF_TYPE_1:
8107         default:
8108                 dev_err(&phba->pcidev->dev,
8109                            "FATAL - unsupported SLI4 interface type - %d\n",
8110                            if_type);
8111                 break;
8112         }
8113 }
8114 
8115 
8116 
8117 
8118 
8119 
8120 
8121 
8122 
8123 
8124 
8125 static int
8126 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
8127 {
8128         if (vf > LPFC_VIR_FUNC_MAX)
8129                 return -ENODEV;
8130 
8131         phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8132                                 vf * LPFC_VFR_PAGE_SIZE +
8133                                         LPFC_ULP0_RQ_DOORBELL);
8134         phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8135                                 vf * LPFC_VFR_PAGE_SIZE +
8136                                         LPFC_ULP0_WQ_DOORBELL);
8137         phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8138                                 vf * LPFC_VFR_PAGE_SIZE +
8139                                         LPFC_EQCQ_DOORBELL);
8140         phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8141         phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8142                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
8143         phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8144                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
8145         return 0;
8146 }
8147 
8148 
8149 
8150 
8151 
8152 
8153 
8154 
8155 
8156 
8157 
8158 
8159 
8160 
8161 
8162 
8163 static int
8164 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
8165 {
8166         uint32_t bmbx_size;
8167         struct lpfc_dmabuf *dmabuf;
8168         struct dma_address *dma_address;
8169         uint32_t pa_addr;
8170         uint64_t phys_addr;
8171 
8172         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8173         if (!dmabuf)
8174                 return -ENOMEM;
8175 
8176         
8177 
8178 
8179 
8180         bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
8181         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8182                                           &dmabuf->phys, GFP_KERNEL);
8183         if (!dmabuf->virt) {
8184                 kfree(dmabuf);
8185                 return -ENOMEM;
8186         }
8187 
8188         
8189 
8190 
8191 
8192 
8193 
8194 
8195         phba->sli4_hba.bmbx.dmabuf = dmabuf;
8196         phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
8197 
8198         phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
8199                                               LPFC_ALIGN_16_BYTE);
8200         phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
8201                                               LPFC_ALIGN_16_BYTE);
8202 
8203         
8204 
8205 
8206 
8207 
8208 
8209 
8210 
8211         dma_address = &phba->sli4_hba.bmbx.dma_address;
8212         phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
8213         pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
8214         dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
8215                                            LPFC_BMBX_BIT1_ADDR_HI);
8216 
8217         pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
8218         dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
8219                                            LPFC_BMBX_BIT1_ADDR_LO);
8220         return 0;
8221 }
8222 
8223 
8224 
8225 
8226 
8227 
8228 
8229 
8230 
8231 
8232 
8233 
8234 static void
8235 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
8236 {
8237         dma_free_coherent(&phba->pcidev->dev,
8238                           phba->sli4_hba.bmbx.bmbx_size,
8239                           phba->sli4_hba.bmbx.dmabuf->virt,
8240                           phba->sli4_hba.bmbx.dmabuf->phys);
8241 
8242         kfree(phba->sli4_hba.bmbx.dmabuf);
8243         memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
8244 }
8245 
8246 
8247 
8248 
8249 
8250 
8251 
8252 
8253 
8254 
8255 
8256 
8257 
8258 
8259 
8260 int
8261 lpfc_sli4_read_config(struct lpfc_hba *phba)
8262 {
8263         LPFC_MBOXQ_t *pmb;
8264         struct lpfc_mbx_read_config *rd_config;
8265         union  lpfc_sli4_cfg_shdr *shdr;
8266         uint32_t shdr_status, shdr_add_status;
8267         struct lpfc_mbx_get_func_cfg *get_func_cfg;
8268         struct lpfc_rsrc_desc_fcfcoe *desc;
8269         char *pdesc_0;
8270         uint16_t forced_link_speed;
8271         uint32_t if_type, qmin;
8272         int length, i, rc = 0, rc2;
8273 
8274         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8275         if (!pmb) {
8276                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8277                                 "2011 Unable to allocate memory for issuing "
8278                                 "SLI_CONFIG_SPECIAL mailbox command\n");
8279                 return -ENOMEM;
8280         }
8281 
8282         lpfc_read_config(phba, pmb);
8283 
8284         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8285         if (rc != MBX_SUCCESS) {
8286                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8287                         "2012 Mailbox failed , mbxCmd x%x "
8288                         "READ_CONFIG, mbxStatus x%x\n",
8289                         bf_get(lpfc_mqe_command, &pmb->u.mqe),
8290                         bf_get(lpfc_mqe_status, &pmb->u.mqe));
8291                 rc = -EIO;
8292         } else {
8293                 rd_config = &pmb->u.mqe.un.rd_config;
8294                 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
8295                         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
8296                         phba->sli4_hba.lnk_info.lnk_tp =
8297                                 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
8298                         phba->sli4_hba.lnk_info.lnk_no =
8299                                 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
8300                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8301                                         "3081 lnk_type:%d, lnk_numb:%d\n",
8302                                         phba->sli4_hba.lnk_info.lnk_tp,
8303                                         phba->sli4_hba.lnk_info.lnk_no);
8304                 } else
8305                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8306                                         "3082 Mailbox (x%x) returned ldv:x0\n",
8307                                         bf_get(lpfc_mqe_command, &pmb->u.mqe));
8308                 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
8309                         phba->bbcredit_support = 1;
8310                         phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
8311                 }
8312 
8313                 phba->sli4_hba.conf_trunk =
8314                         bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
8315                 phba->sli4_hba.extents_in_use =
8316                         bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
8317                 phba->sli4_hba.max_cfg_param.max_xri =
8318                         bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
8319                 
8320                 if (is_kdump_kernel() &&
8321                     phba->sli4_hba.max_cfg_param.max_xri > 512)
8322                         phba->sli4_hba.max_cfg_param.max_xri = 512;
8323                 phba->sli4_hba.max_cfg_param.xri_base =
8324                         bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8325                 phba->sli4_hba.max_cfg_param.max_vpi =
8326                         bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
8327                 
8328                 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
8329                         phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
8330                 phba->sli4_hba.max_cfg_param.vpi_base =
8331                         bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
8332                 phba->sli4_hba.max_cfg_param.max_rpi =
8333                         bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
8334                 phba->sli4_hba.max_cfg_param.rpi_base =
8335                         bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
8336                 phba->sli4_hba.max_cfg_param.max_vfi =
8337                         bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
8338                 phba->sli4_hba.max_cfg_param.vfi_base =
8339                         bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
8340                 phba->sli4_hba.max_cfg_param.max_fcfi =
8341                         bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
8342                 phba->sli4_hba.max_cfg_param.max_eq =
8343                         bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
8344                 phba->sli4_hba.max_cfg_param.max_rq =
8345                         bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
8346                 phba->sli4_hba.max_cfg_param.max_wq =
8347                         bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
8348                 phba->sli4_hba.max_cfg_param.max_cq =
8349                         bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
8350                 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
8351                 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
8352                 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
8353                 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
8354                 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
8355                                 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
8356                 phba->max_vports = phba->max_vpi;
8357                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8358                                 "2003 cfg params Extents? %d "
8359                                 "XRI(B:%d M:%d), "
8360                                 "VPI(B:%d M:%d) "
8361                                 "VFI(B:%d M:%d) "
8362                                 "RPI(B:%d M:%d) "
8363                                 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
8364                                 phba->sli4_hba.extents_in_use,
8365                                 phba->sli4_hba.max_cfg_param.xri_base,
8366                                 phba->sli4_hba.max_cfg_param.max_xri,
8367                                 phba->sli4_hba.max_cfg_param.vpi_base,
8368                                 phba->sli4_hba.max_cfg_param.max_vpi,
8369                                 phba->sli4_hba.max_cfg_param.vfi_base,
8370                                 phba->sli4_hba.max_cfg_param.max_vfi,
8371                                 phba->sli4_hba.max_cfg_param.rpi_base,
8372                                 phba->sli4_hba.max_cfg_param.max_rpi,
8373                                 phba->sli4_hba.max_cfg_param.max_fcfi,
8374                                 phba->sli4_hba.max_cfg_param.max_eq,
8375                                 phba->sli4_hba.max_cfg_param.max_cq,
8376                                 phba->sli4_hba.max_cfg_param.max_wq,
8377                                 phba->sli4_hba.max_cfg_param.max_rq);
8378 
8379                 
8380 
8381 
8382 
8383                 qmin = phba->sli4_hba.max_cfg_param.max_wq;
8384                 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8385                         qmin = phba->sli4_hba.max_cfg_param.max_cq;
8386                 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8387                         qmin = phba->sli4_hba.max_cfg_param.max_eq;
8388                 
8389 
8390 
8391 
8392 
8393 
8394                 qmin -= 4;
8395 
8396                 
8397                 if ((phba->cfg_irq_chann > qmin) ||
8398                     (phba->cfg_hdw_queue > qmin)) {
8399                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8400                                         "2005 Reducing Queues: "
8401                                         "WQ %d CQ %d EQ %d: min %d: "
8402                                         "IRQ %d HDWQ %d\n",
8403                                         phba->sli4_hba.max_cfg_param.max_wq,
8404                                         phba->sli4_hba.max_cfg_param.max_cq,
8405                                         phba->sli4_hba.max_cfg_param.max_eq,
8406                                         qmin, phba->cfg_irq_chann,
8407                                         phba->cfg_hdw_queue);
8408 
8409                         if (phba->cfg_irq_chann > qmin)
8410                                 phba->cfg_irq_chann = qmin;
8411                         if (phba->cfg_hdw_queue > qmin)
8412                                 phba->cfg_hdw_queue = qmin;
8413                 }
8414         }
8415 
8416         if (rc)
8417                 goto read_cfg_out;
8418 
8419         
8420         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8421         if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
8422                 forced_link_speed =
8423                         bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
8424                 if (forced_link_speed) {
8425                         phba->hba_flag |= HBA_FORCED_LINK_SPEED;
8426 
8427                         switch (forced_link_speed) {
8428                         case LINK_SPEED_1G:
8429                                 phba->cfg_link_speed =
8430                                         LPFC_USER_LINK_SPEED_1G;
8431                                 break;
8432                         case LINK_SPEED_2G:
8433                                 phba->cfg_link_speed =
8434                                         LPFC_USER_LINK_SPEED_2G;
8435                                 break;
8436                         case LINK_SPEED_4G:
8437                                 phba->cfg_link_speed =
8438                                         LPFC_USER_LINK_SPEED_4G;
8439                                 break;
8440                         case LINK_SPEED_8G:
8441                                 phba->cfg_link_speed =
8442                                         LPFC_USER_LINK_SPEED_8G;
8443                                 break;
8444                         case LINK_SPEED_10G:
8445                                 phba->cfg_link_speed =
8446                                         LPFC_USER_LINK_SPEED_10G;
8447                                 break;
8448                         case LINK_SPEED_16G:
8449                                 phba->cfg_link_speed =
8450                                         LPFC_USER_LINK_SPEED_16G;
8451                                 break;
8452                         case LINK_SPEED_32G:
8453                                 phba->cfg_link_speed =
8454                                         LPFC_USER_LINK_SPEED_32G;
8455                                 break;
8456                         case LINK_SPEED_64G:
8457                                 phba->cfg_link_speed =
8458                                         LPFC_USER_LINK_SPEED_64G;
8459                                 break;
8460                         case 0xffff:
8461                                 phba->cfg_link_speed =
8462                                         LPFC_USER_LINK_SPEED_AUTO;
8463                                 break;
8464                         default:
8465                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8466                                                 "0047 Unrecognized link "
8467                                                 "speed : %d\n",
8468                                                 forced_link_speed);
8469                                 phba->cfg_link_speed =
8470                                         LPFC_USER_LINK_SPEED_AUTO;
8471                         }
8472                 }
8473         }
8474 
8475         
8476         length = phba->sli4_hba.max_cfg_param.max_xri -
8477                         lpfc_sli4_get_els_iocb_cnt(phba);
8478         if (phba->cfg_hba_queue_depth > length) {
8479                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8480                                 "3361 HBA queue depth changed from %d to %d\n",
8481                                 phba->cfg_hba_queue_depth, length);
8482                 phba->cfg_hba_queue_depth = length;
8483         }
8484 
8485         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
8486             LPFC_SLI_INTF_IF_TYPE_2)
8487                 goto read_cfg_out;
8488 
8489         
8490         length = (sizeof(struct lpfc_mbx_get_func_cfg) -
8491                   sizeof(struct lpfc_sli4_cfg_mhdr));
8492         lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
8493                          LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
8494                          length, LPFC_SLI4_MBX_EMBED);
8495 
8496         rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8497         shdr = (union lpfc_sli4_cfg_shdr *)
8498                                 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
8499         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8500         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8501         if (rc2 || shdr_status || shdr_add_status) {
8502                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8503                                 "3026 Mailbox failed , mbxCmd x%x "
8504                                 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8505                                 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8506                                 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8507                 goto read_cfg_out;
8508         }
8509 
8510         
8511         get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
8512 
8513         pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
8514         desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
8515         length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
8516         if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
8517                 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
8518         else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
8519                 goto read_cfg_out;
8520 
8521         for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8522                 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
8523                 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8524                     bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
8525                         phba->sli4_hba.iov.pf_number =
8526                                 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
8527                         phba->sli4_hba.iov.vf_number =
8528                                 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
8529                         break;
8530                 }
8531         }
8532 
8533         if (i < LPFC_RSRC_DESC_MAX_NUM)
8534                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8535                                 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8536                                 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
8537                                 phba->sli4_hba.iov.vf_number);
8538         else
8539                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8540                                 "3028 GET_FUNCTION_CONFIG: failed to find "
8541                                 "Resource Descriptor:x%x\n",
8542                                 LPFC_RSRC_DESC_TYPE_FCFCOE);
8543 
8544 read_cfg_out:
8545         mempool_free(pmb, phba->mbox_mem_pool);
8546         return rc;
8547 }
8548 
8549 
8550 
8551 
8552 
8553 
8554 
8555 
8556 
8557 
8558 
8559 
8560 
8561 
8562 static int
8563 lpfc_setup_endian_order(struct lpfc_hba *phba)
8564 {
8565         LPFC_MBOXQ_t *mboxq;
8566         uint32_t if_type, rc = 0;
8567         uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8568                                       HOST_ENDIAN_HIGH_WORD1};
8569 
8570         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8571         switch (if_type) {
8572         case LPFC_SLI_INTF_IF_TYPE_0:
8573                 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8574                                                        GFP_KERNEL);
8575                 if (!mboxq) {
8576                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8577                                         "0492 Unable to allocate memory for "
8578                                         "issuing SLI_CONFIG_SPECIAL mailbox "
8579                                         "command\n");
8580                         return -ENOMEM;
8581                 }
8582 
8583                 
8584 
8585 
8586 
8587                 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8588                 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8589                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8590                 if (rc != MBX_SUCCESS) {
8591                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8592                                         "0493 SLI_CONFIG_SPECIAL mailbox "
8593                                         "failed with status x%x\n",
8594                                         rc);
8595                         rc = -EIO;
8596                 }
8597                 mempool_free(mboxq, phba->mbox_mem_pool);
8598                 break;
8599         case LPFC_SLI_INTF_IF_TYPE_6:
8600         case LPFC_SLI_INTF_IF_TYPE_2:
8601         case LPFC_SLI_INTF_IF_TYPE_1:
8602         default:
8603                 break;
8604         }
8605         return rc;
8606 }
8607 
8608 
8609 
8610 
8611 
8612 
8613 
8614 
8615 
8616 
8617 
8618 
8619 
8620 
8621 static int
8622 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
8623 {
8624         
8625 
8626 
8627 
8628 
8629         if (phba->nvmet_support) {
8630                 if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
8631                         phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
8632                 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8633                         phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
8634         }
8635 
8636         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8637                         "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8638                         phba->cfg_hdw_queue, phba->cfg_irq_chann,
8639                         phba->cfg_nvmet_mrq);
8640 
8641         
8642         phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8643         phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8644 
8645         
8646         phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8647         phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8648         return 0;
8649 }
8650 
8651 static int
8652 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
8653 {
8654         struct lpfc_queue *qdesc;
8655         u32 wqesize;
8656         int cpu;
8657 
8658         cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
8659         
8660         if (phba->enab_exp_wqcq_pages)
8661                 
8662                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8663                                               phba->sli4_hba.cq_esize,
8664                                               LPFC_CQE_EXP_COUNT, cpu);
8665 
8666         else
8667                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8668                                               phba->sli4_hba.cq_esize,
8669                                               phba->sli4_hba.cq_ecount, cpu);
8670         if (!qdesc) {
8671                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8672                         "0499 Failed allocate fast-path IO CQ (%d)\n", idx);
8673                 return 1;
8674         }
8675         qdesc->qe_valid = 1;
8676         qdesc->hdwq = idx;
8677         qdesc->chann = cpu;
8678         phba->sli4_hba.hdwq[idx].io_cq = qdesc;
8679 
8680         
8681         if (phba->enab_exp_wqcq_pages) {
8682                 
8683                 wqesize = (phba->fcp_embed_io) ?
8684                         LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
8685                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8686                                               wqesize,
8687                                               LPFC_WQE_EXP_COUNT, cpu);
8688         } else
8689                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8690                                               phba->sli4_hba.wq_esize,
8691                                               phba->sli4_hba.wq_ecount, cpu);
8692 
8693         if (!qdesc) {
8694                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8695                                 "0503 Failed allocate fast-path IO WQ (%d)\n",
8696                                 idx);
8697                 return 1;
8698         }
8699         qdesc->hdwq = idx;
8700         qdesc->chann = cpu;
8701         phba->sli4_hba.hdwq[idx].io_wq = qdesc;
8702         list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8703         return 0;
8704 }
8705 
8706 
8707 
8708 
8709 
8710 
8711 
8712 
8713 
8714 
8715 
8716 
8717 
8718 
8719 
8720 int
8721 lpfc_sli4_queue_create(struct lpfc_hba *phba)
8722 {
8723         struct lpfc_queue *qdesc;
8724         int idx, cpu, eqcpu;
8725         struct lpfc_sli4_hdw_queue *qp;
8726         struct lpfc_vector_map_info *cpup;
8727         struct lpfc_vector_map_info *eqcpup;
8728         struct lpfc_eq_intr_info *eqi;
8729 
8730         
8731 
8732 
8733 
8734         phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
8735         phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
8736         phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
8737         phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
8738         phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
8739         phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
8740         phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8741         phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8742         phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8743         phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8744 
8745         if (!phba->sli4_hba.hdwq) {
8746                 phba->sli4_hba.hdwq = kcalloc(
8747                         phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
8748                         GFP_KERNEL);
8749                 if (!phba->sli4_hba.hdwq) {
8750                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8751                                         "6427 Failed allocate memory for "
8752                                         "fast-path Hardware Queue array\n");
8753                         goto out_error;
8754                 }
8755                 
8756                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8757                         qp = &phba->sli4_hba.hdwq[idx];
8758                         spin_lock_init(&qp->io_buf_list_get_lock);
8759                         spin_lock_init(&qp->io_buf_list_put_lock);
8760                         INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
8761                         INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
8762                         qp->get_io_bufs = 0;
8763                         qp->put_io_bufs = 0;
8764                         qp->total_io_bufs = 0;
8765                         spin_lock_init(&qp->abts_io_buf_list_lock);
8766                         INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
8767                         qp->abts_scsi_io_bufs = 0;
8768                         qp->abts_nvme_io_bufs = 0;
8769                         INIT_LIST_HEAD(&qp->sgl_list);
8770                         INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
8771                         spin_lock_init(&qp->hdwq_lock);
8772                 }
8773         }
8774 
8775         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8776                 if (phba->nvmet_support) {
8777                         phba->sli4_hba.nvmet_cqset = kcalloc(
8778                                         phba->cfg_nvmet_mrq,
8779                                         sizeof(struct lpfc_queue *),
8780                                         GFP_KERNEL);
8781                         if (!phba->sli4_hba.nvmet_cqset) {
8782                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8783                                         "3121 Fail allocate memory for "
8784                                         "fast-path CQ set array\n");
8785                                 goto out_error;
8786                         }
8787                         phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
8788                                         phba->cfg_nvmet_mrq,
8789                                         sizeof(struct lpfc_queue *),
8790                                         GFP_KERNEL);
8791                         if (!phba->sli4_hba.nvmet_mrq_hdr) {
8792                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8793                                         "3122 Fail allocate memory for "
8794                                         "fast-path RQ set hdr array\n");
8795                                 goto out_error;
8796                         }
8797                         phba->sli4_hba.nvmet_mrq_data = kcalloc(
8798                                         phba->cfg_nvmet_mrq,
8799                                         sizeof(struct lpfc_queue *),
8800                                         GFP_KERNEL);
8801                         if (!phba->sli4_hba.nvmet_mrq_data) {
8802                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8803                                         "3124 Fail allocate memory for "
8804                                         "fast-path RQ set data array\n");
8805                                 goto out_error;
8806                         }
8807                 }
8808         }
8809 
8810         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
8811 
8812         
8813         for_each_present_cpu(cpu) {
8814                 
8815 
8816 
8817 
8818                 cpup = &phba->sli4_hba.cpu_map[cpu];
8819                 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
8820                         continue;
8821 
8822                 
8823                 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
8824 
8825                 
8826                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8827                                               phba->sli4_hba.eq_esize,
8828                                               phba->sli4_hba.eq_ecount, cpu);
8829                 if (!qdesc) {
8830                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8831                                         "0497 Failed allocate EQ (%d)\n",
8832                                         cpup->hdwq);
8833                         goto out_error;
8834                 }
8835                 qdesc->qe_valid = 1;
8836                 qdesc->hdwq = cpup->hdwq;
8837                 qdesc->chann = cpu; 
8838                 qdesc->last_cpu = qdesc->chann;
8839 
8840                 
8841                 qp->hba_eq = qdesc;
8842 
8843                 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
8844                 list_add(&qdesc->cpu_list, &eqi->list);
8845         }
8846 
8847         
8848 
8849 
8850         for_each_present_cpu(cpu) {
8851                 cpup = &phba->sli4_hba.cpu_map[cpu];
8852 
8853                 
8854                 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
8855                         continue;
8856 
8857                 
8858                 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
8859                 if (qp->hba_eq)
8860                         continue;
8861 
8862                 
8863                 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
8864                 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
8865                 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
8866         }
8867 
8868         
8869         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8870                 if (lpfc_alloc_io_wq_cq(phba, idx))
8871                         goto out_error;
8872         }
8873 
8874         if (phba->nvmet_support) {
8875                 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8876                         cpu = lpfc_find_cpu_handle(phba, idx,
8877                                                    LPFC_FIND_BY_HDWQ);
8878                         qdesc = lpfc_sli4_queue_alloc(phba,
8879                                                       LPFC_DEFAULT_PAGE_SIZE,
8880                                                       phba->sli4_hba.cq_esize,
8881                                                       phba->sli4_hba.cq_ecount,
8882                                                       cpu);
8883                         if (!qdesc) {
8884                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8885                                                 "3142 Failed allocate NVME "
8886                                                 "CQ Set (%d)\n", idx);
8887                                 goto out_error;
8888                         }
8889                         qdesc->qe_valid = 1;
8890                         qdesc->hdwq = idx;
8891                         qdesc->chann = cpu;
8892                         phba->sli4_hba.nvmet_cqset[idx] = qdesc;
8893                 }
8894         }
8895 
8896         
8897 
8898 
8899 
8900         cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
8901         
8902         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8903                                       phba->sli4_hba.cq_esize,
8904                                       phba->sli4_hba.cq_ecount, cpu);
8905         if (!qdesc) {
8906                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8907                                 "0500 Failed allocate slow-path mailbox CQ\n");
8908                 goto out_error;
8909         }
8910         qdesc->qe_valid = 1;
8911         phba->sli4_hba.mbx_cq = qdesc;
8912 
8913         
8914         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8915                                       phba->sli4_hba.cq_esize,
8916                                       phba->sli4_hba.cq_ecount, cpu);
8917         if (!qdesc) {
8918                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8919                                 "0501 Failed allocate slow-path ELS CQ\n");
8920                 goto out_error;
8921         }
8922         qdesc->qe_valid = 1;
8923         qdesc->chann = cpu;
8924         phba->sli4_hba.els_cq = qdesc;
8925 
8926 
8927         
8928 
8929 
8930 
8931         
8932 
8933         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8934                                       phba->sli4_hba.mq_esize,
8935                                       phba->sli4_hba.mq_ecount, cpu);
8936         if (!qdesc) {
8937                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8938                                 "0505 Failed allocate slow-path MQ\n");
8939                 goto out_error;
8940         }
8941         qdesc->chann = cpu;
8942         phba->sli4_hba.mbx_wq = qdesc;
8943 
8944         
8945 
8946 
8947 
8948         
8949         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8950                                       phba->sli4_hba.wq_esize,
8951                                       phba->sli4_hba.wq_ecount, cpu);
8952         if (!qdesc) {
8953                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8954                                 "0504 Failed allocate slow-path ELS WQ\n");
8955                 goto out_error;
8956         }
8957         qdesc->chann = cpu;
8958         phba->sli4_hba.els_wq = qdesc;
8959         list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8960 
8961         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8962                 
8963                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8964                                               phba->sli4_hba.cq_esize,
8965                                               phba->sli4_hba.cq_ecount, cpu);
8966                 if (!qdesc) {
8967                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8968                                         "6079 Failed allocate NVME LS CQ\n");
8969                         goto out_error;
8970                 }
8971                 qdesc->chann = cpu;
8972                 qdesc->qe_valid = 1;
8973                 phba->sli4_hba.nvmels_cq = qdesc;
8974 
8975                 
8976                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8977                                               phba->sli4_hba.wq_esize,
8978                                               phba->sli4_hba.wq_ecount, cpu);
8979                 if (!qdesc) {
8980                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8981                                         "6080 Failed allocate NVME LS WQ\n");
8982                         goto out_error;
8983                 }
8984                 qdesc->chann = cpu;
8985                 phba->sli4_hba.nvmels_wq = qdesc;
8986                 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8987         }
8988 
8989         
8990 
8991 
8992 
8993         
8994         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8995                                       phba->sli4_hba.rq_esize,
8996                                       phba->sli4_hba.rq_ecount, cpu);
8997         if (!qdesc) {
8998                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8999                                 "0506 Failed allocate receive HRQ\n");
9000                 goto out_error;
9001         }
9002         phba->sli4_hba.hdr_rq = qdesc;
9003 
9004         
9005         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9006                                       phba->sli4_hba.rq_esize,
9007                                       phba->sli4_hba.rq_ecount, cpu);
9008         if (!qdesc) {
9009                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9010                                 "0507 Failed allocate receive DRQ\n");
9011                 goto out_error;
9012         }
9013         phba->sli4_hba.dat_rq = qdesc;
9014 
9015         if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9016             phba->nvmet_support) {
9017                 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9018                         cpu = lpfc_find_cpu_handle(phba, idx,
9019                                                    LPFC_FIND_BY_HDWQ);
9020                         
9021                         qdesc = lpfc_sli4_queue_alloc(phba,
9022                                                       LPFC_DEFAULT_PAGE_SIZE,
9023                                                       phba->sli4_hba.rq_esize,
9024                                                       LPFC_NVMET_RQE_DEF_COUNT,
9025                                                       cpu);
9026                         if (!qdesc) {
9027                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9028                                                 "3146 Failed allocate "
9029                                                 "receive HRQ\n");
9030                                 goto out_error;
9031                         }
9032                         qdesc->hdwq = idx;
9033                         phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
9034 
9035                         
9036                         qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9037                                                    GFP_KERNEL,
9038                                                    cpu_to_node(cpu));
9039                         if (qdesc->rqbp == NULL) {
9040                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9041                                                 "6131 Failed allocate "
9042                                                 "Header RQBP\n");
9043                                 goto out_error;
9044                         }
9045 
9046                         
9047                         INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
9048 
9049                         
9050                         qdesc = lpfc_sli4_queue_alloc(phba,
9051                                                       LPFC_DEFAULT_PAGE_SIZE,
9052                                                       phba->sli4_hba.rq_esize,
9053                                                       LPFC_NVMET_RQE_DEF_COUNT,
9054                                                       cpu);
9055                         if (!qdesc) {
9056                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9057                                                 "3156 Failed allocate "
9058                                                 "receive DRQ\n");
9059                                 goto out_error;
9060                         }
9061                         qdesc->hdwq = idx;
9062                         phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
9063                 }
9064         }
9065 
9066         
9067         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9068                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9069                         memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9070                                sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9071                 }
9072         }
9073 
9074         
9075         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9076                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9077                         memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9078                                sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9079                 }
9080         }
9081 
9082         return 0;
9083 
9084 out_error:
9085         lpfc_sli4_queue_destroy(phba);
9086         return -ENOMEM;
9087 }
9088 
9089 static inline void
9090 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
9091 {
9092         if (*qp != NULL) {
9093                 lpfc_sli4_queue_free(*qp);
9094                 *qp = NULL;
9095         }
9096 }
9097 
9098 static inline void
9099 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
9100 {
9101         int idx;
9102 
9103         if (*qs == NULL)
9104                 return;
9105 
9106         for (idx = 0; idx < max; idx++)
9107                 __lpfc_sli4_release_queue(&(*qs)[idx]);
9108 
9109         kfree(*qs);
9110         *qs = NULL;
9111 }
9112 
9113 static inline void
9114 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
9115 {
9116         struct lpfc_sli4_hdw_queue *hdwq;
9117         struct lpfc_queue *eq;
9118         uint32_t idx;
9119 
9120         hdwq = phba->sli4_hba.hdwq;
9121 
9122         
9123         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9124                 
9125                 lpfc_sli4_queue_free(hdwq[idx].io_cq);
9126                 lpfc_sli4_queue_free(hdwq[idx].io_wq);
9127                 hdwq[idx].io_cq = NULL;
9128                 hdwq[idx].io_wq = NULL;
9129                 if (phba->cfg_xpsgl && !phba->nvmet_support)
9130                         lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
9131                 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
9132         }
9133         
9134         for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9135                 
9136                 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9137                 lpfc_sli4_queue_free(eq);
9138                 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
9139         }
9140 }
9141 
9142 
9143 
9144 
9145 
9146 
9147 
9148 
9149 
9150 
9151 
9152 
9153 
9154 void
9155 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
9156 {
9157         
9158 
9159 
9160 
9161 
9162         spin_lock_irq(&phba->hbalock);
9163         phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9164         while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9165                 spin_unlock_irq(&phba->hbalock);
9166                 msleep(20);
9167                 spin_lock_irq(&phba->hbalock);
9168         }
9169         spin_unlock_irq(&phba->hbalock);
9170 
9171         lpfc_sli4_cleanup_poll_list(phba);
9172 
9173         
9174         if (phba->sli4_hba.hdwq)
9175                 lpfc_sli4_release_hdwq(phba);
9176 
9177         if (phba->nvmet_support) {
9178                 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
9179                                          phba->cfg_nvmet_mrq);
9180 
9181                 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
9182                                          phba->cfg_nvmet_mrq);
9183                 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
9184                                          phba->cfg_nvmet_mrq);
9185         }
9186 
9187         
9188         __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
9189 
9190         
9191         __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
9192 
9193         
9194         __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
9195 
9196         
9197         __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
9198         __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
9199 
9200         
9201         __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
9202 
9203         
9204         __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
9205 
9206         
9207         __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
9208 
9209         
9210         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9211 
9212         
9213         spin_lock_irq(&phba->hbalock);
9214         phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9215         spin_unlock_irq(&phba->hbalock);
9216 }
9217 
9218 int
9219 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
9220 {
9221         struct lpfc_rqb *rqbp;
9222         struct lpfc_dmabuf *h_buf;
9223         struct rqb_dmabuf *rqb_buffer;
9224 
9225         rqbp = rq->rqbp;
9226         while (!list_empty(&rqbp->rqb_buffer_list)) {
9227                 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
9228                                  struct lpfc_dmabuf, list);
9229 
9230                 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
9231                 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
9232                 rqbp->buffer_count--;
9233         }
9234         return 1;
9235 }
9236 
9237 static int
9238 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
9239         struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
9240         int qidx, uint32_t qtype)
9241 {
9242         struct lpfc_sli_ring *pring;
9243         int rc;
9244 
9245         if (!eq || !cq || !wq) {
9246                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9247                         "6085 Fast-path %s (%d) not allocated\n",
9248                         ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
9249                 return -ENOMEM;
9250         }
9251 
9252         
9253         rc = lpfc_cq_create(phba, cq, eq,
9254                         (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
9255         if (rc) {
9256                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9257                         "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9258                         qidx, (uint32_t)rc);
9259                 return rc;
9260         }
9261 
9262         if (qtype != LPFC_MBOX) {
9263                 
9264                 if (cq_map)
9265                         *cq_map = cq->queue_id;
9266 
9267                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9268                         "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9269                         qidx, cq->queue_id, qidx, eq->queue_id);
9270 
9271                 
9272                 rc = lpfc_wq_create(phba, wq, cq, qtype);
9273                 if (rc) {
9274                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9275                                 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
9276                                 qidx, (uint32_t)rc);
9277                         
9278                         return rc;
9279                 }
9280 
9281                 
9282                 pring = wq->pring;
9283                 pring->sli.sli4.wqp = (void *)wq;
9284                 cq->pring = pring;
9285 
9286                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9287                         "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9288                         qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
9289         } else {
9290                 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
9291                 if (rc) {
9292                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9293                                 "0539 Failed setup of slow-path MQ: "
9294                                 "rc = 0x%x\n", rc);
9295                         
9296                         return rc;
9297                 }
9298 
9299                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9300                         "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9301                         phba->sli4_hba.mbx_wq->queue_id,
9302                         phba->sli4_hba.mbx_cq->queue_id);
9303         }
9304 
9305         return 0;
9306 }
9307 
9308 
9309 
9310 
9311 
9312 
9313 
9314 
9315 static void
9316 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9317 {
9318         struct lpfc_queue *eq, *childq;
9319         int qidx;
9320 
9321         memset(phba->sli4_hba.cq_lookup, 0,
9322                (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
9323         
9324         for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9325                 
9326                 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9327                 if (!eq)
9328                         continue;
9329                 
9330                 list_for_each_entry(childq, &eq->child_list, list) {
9331                         if (childq->queue_id > phba->sli4_hba.cq_max)
9332                                 continue;
9333                         if (childq->subtype == LPFC_IO)
9334                                 phba->sli4_hba.cq_lookup[childq->queue_id] =
9335                                         childq;
9336                 }
9337         }
9338 }
9339 
9340 
9341 
9342 
9343 
9344 
9345 
9346 
9347 
9348 
9349 
9350 
9351 
9352 int
9353 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9354 {
9355         uint32_t shdr_status, shdr_add_status;
9356         union lpfc_sli4_cfg_shdr *shdr;
9357         struct lpfc_vector_map_info *cpup;
9358         struct lpfc_sli4_hdw_queue *qp;
9359         LPFC_MBOXQ_t *mboxq;
9360         int qidx, cpu;
9361         uint32_t length, usdelay;
9362         int rc = -ENOMEM;
9363 
9364         
9365         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9366         if (!mboxq) {
9367                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9368                                 "3249 Unable to allocate memory for "
9369                                 "QUERY_FW_CFG mailbox command\n");
9370                 return -ENOMEM;
9371         }
9372         length = (sizeof(struct lpfc_mbx_query_fw_config) -
9373                   sizeof(struct lpfc_sli4_cfg_mhdr));
9374         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9375                          LPFC_MBOX_OPCODE_QUERY_FW_CFG,
9376                          length, LPFC_SLI4_MBX_EMBED);
9377 
9378         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9379 
9380         shdr = (union lpfc_sli4_cfg_shdr *)
9381                         &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9382         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9383         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9384         if (shdr_status || shdr_add_status || rc) {
9385                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9386                                 "3250 QUERY_FW_CFG mailbox failed with status "
9387                                 "x%x add_status x%x, mbx status x%x\n",
9388                                 shdr_status, shdr_add_status, rc);
9389                 if (rc != MBX_TIMEOUT)
9390                         mempool_free(mboxq, phba->mbox_mem_pool);
9391                 rc = -ENXIO;
9392                 goto out_error;
9393         }
9394 
9395         phba->sli4_hba.fw_func_mode =
9396                         mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
9397         phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
9398         phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
9399         phba->sli4_hba.physical_port =
9400                         mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
9401         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9402                         "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9403                         "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
9404                         phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
9405 
9406         if (rc != MBX_TIMEOUT)
9407                 mempool_free(mboxq, phba->mbox_mem_pool);
9408 
9409         
9410 
9411 
9412         qp = phba->sli4_hba.hdwq;
9413 
9414         
9415         if (!qp) {
9416                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9417                                 "3147 Fast-path EQs not allocated\n");
9418                 rc = -ENOMEM;
9419                 goto out_error;
9420         }
9421 
9422         
9423         for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9424                 
9425                 for_each_present_cpu(cpu) {
9426                         cpup = &phba->sli4_hba.cpu_map[cpu];
9427 
9428                         
9429 
9430 
9431                         if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9432                                 continue;
9433                         if (qidx != cpup->eq)
9434                                 continue;
9435 
9436                         
9437                         rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9438                                             phba->cfg_fcp_imax);
9439                         if (rc) {
9440                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9441                                                 "0523 Failed setup of fast-path"
9442                                                 " EQ (%d), rc = 0x%x\n",
9443                                                 cpup->eq, (uint32_t)rc);
9444                                 goto out_destroy;
9445                         }
9446 
9447                         
9448                         phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9449                                 qp[cpup->hdwq].hba_eq;
9450 
9451                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9452                                         "2584 HBA EQ setup: queue[%d]-id=%d\n",
9453                                         cpup->eq,
9454                                         qp[cpup->hdwq].hba_eq->queue_id);
9455                 }
9456         }
9457 
9458         
9459         for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9460                 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9461                 cpup = &phba->sli4_hba.cpu_map[cpu];
9462 
9463                 
9464                 rc = lpfc_create_wq_cq(phba,
9465                                        phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9466                                        qp[qidx].io_cq,
9467                                        qp[qidx].io_wq,
9468                                        &phba->sli4_hba.hdwq[qidx].io_cq_map,
9469                                        qidx,
9470                                        LPFC_IO);
9471                 if (rc) {
9472                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9473                                         "0535 Failed to setup fastpath "
9474                                         "IO WQ/CQ (%d), rc = 0x%x\n",
9475                                         qidx, (uint32_t)rc);
9476                         goto out_destroy;
9477                 }
9478         }
9479 
9480         
9481 
9482 
9483 
9484         
9485 
9486         if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
9487                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9488                                 "0528 %s not allocated\n",
9489                                 phba->sli4_hba.mbx_cq ?
9490                                 "Mailbox WQ" : "Mailbox CQ");
9491                 rc = -ENOMEM;
9492                 goto out_destroy;
9493         }
9494 
9495         rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9496                                phba->sli4_hba.mbx_cq,
9497                                phba->sli4_hba.mbx_wq,
9498                                NULL, 0, LPFC_MBOX);
9499         if (rc) {
9500                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9501                         "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9502                         (uint32_t)rc);
9503                 goto out_destroy;
9504         }
9505         if (phba->nvmet_support) {
9506                 if (!phba->sli4_hba.nvmet_cqset) {
9507                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9508                                         "3165 Fast-path NVME CQ Set "
9509                                         "array not allocated\n");
9510                         rc = -ENOMEM;
9511                         goto out_destroy;
9512                 }
9513                 if (phba->cfg_nvmet_mrq > 1) {
9514                         rc = lpfc_cq_create_set(phba,
9515                                         phba->sli4_hba.nvmet_cqset,
9516                                         qp,
9517                                         LPFC_WCQ, LPFC_NVMET);
9518                         if (rc) {
9519                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9520                                                 "3164 Failed setup of NVME CQ "
9521                                                 "Set, rc = 0x%x\n",
9522                                                 (uint32_t)rc);
9523                                 goto out_destroy;
9524                         }
9525                 } else {
9526                         
9527                         rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
9528                                             qp[0].hba_eq,
9529                                             LPFC_WCQ, LPFC_NVMET);
9530                         if (rc) {
9531                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9532                                                 "6089 Failed setup NVMET CQ: "
9533                                                 "rc = 0x%x\n", (uint32_t)rc);
9534                                 goto out_destroy;
9535                         }
9536                         phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9537 
9538                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9539                                         "6090 NVMET CQ setup: cq-id=%d, "
9540                                         "parent eq-id=%d\n",
9541                                         phba->sli4_hba.nvmet_cqset[0]->queue_id,
9542                                         qp[0].hba_eq->queue_id);
9543                 }
9544         }
9545 
9546         
9547         if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
9548                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9549                                 "0530 ELS %s not allocated\n",
9550                                 phba->sli4_hba.els_cq ? "WQ" : "CQ");
9551                 rc = -ENOMEM;
9552                 goto out_destroy;
9553         }
9554         rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9555                                phba->sli4_hba.els_cq,
9556                                phba->sli4_hba.els_wq,
9557                                NULL, 0, LPFC_ELS);
9558         if (rc) {
9559                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9560                                 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9561                                 (uint32_t)rc);
9562                 goto out_destroy;
9563         }
9564         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9565                         "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9566                         phba->sli4_hba.els_wq->queue_id,
9567                         phba->sli4_hba.els_cq->queue_id);
9568 
9569         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9570                 
9571                 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
9572                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9573                                         "6091 LS %s not allocated\n",
9574                                         phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9575                         rc = -ENOMEM;
9576                         goto out_destroy;
9577                 }
9578                 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9579                                        phba->sli4_hba.nvmels_cq,
9580                                        phba->sli4_hba.nvmels_wq,
9581                                        NULL, 0, LPFC_NVME_LS);
9582                 if (rc) {
9583                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9584                                         "0526 Failed setup of NVVME LS WQ/CQ: "
9585                                         "rc = 0x%x\n", (uint32_t)rc);
9586                         goto out_destroy;
9587                 }
9588 
9589                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9590                                 "6096 ELS WQ setup: wq-id=%d, "
9591                                 "parent cq-id=%d\n",
9592                                 phba->sli4_hba.nvmels_wq->queue_id,
9593                                 phba->sli4_hba.nvmels_cq->queue_id);
9594         }
9595 
9596         
9597 
9598 
9599         if (phba->nvmet_support) {
9600                 if ((!phba->sli4_hba.nvmet_cqset) ||
9601                     (!phba->sli4_hba.nvmet_mrq_hdr) ||
9602                     (!phba->sli4_hba.nvmet_mrq_data)) {
9603                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9604                                         "6130 MRQ CQ Queues not "
9605                                         "allocated\n");
9606                         rc = -ENOMEM;
9607                         goto out_destroy;
9608                 }
9609                 if (phba->cfg_nvmet_mrq > 1) {
9610                         rc = lpfc_mrq_create(phba,
9611                                              phba->sli4_hba.nvmet_mrq_hdr,
9612                                              phba->sli4_hba.nvmet_mrq_data,
9613                                              phba->sli4_hba.nvmet_cqset,
9614                                              LPFC_NVMET);
9615                         if (rc) {
9616                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9617                                                 "6098 Failed setup of NVMET "
9618                                                 "MRQ: rc = 0x%x\n",
9619                                                 (uint32_t)rc);
9620                                 goto out_destroy;
9621                         }
9622 
9623                 } else {
9624                         rc = lpfc_rq_create(phba,
9625                                             phba->sli4_hba.nvmet_mrq_hdr[0],
9626                                             phba->sli4_hba.nvmet_mrq_data[0],
9627                                             phba->sli4_hba.nvmet_cqset[0],
9628                                             LPFC_NVMET);
9629                         if (rc) {
9630                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9631                                                 "6057 Failed setup of NVMET "
9632                                                 "Receive Queue: rc = 0x%x\n",
9633                                                 (uint32_t)rc);
9634                                 goto out_destroy;
9635                         }
9636 
9637                         lpfc_printf_log(
9638                                 phba, KERN_INFO, LOG_INIT,
9639                                 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9640                                 "dat-rq-id=%d parent cq-id=%d\n",
9641                                 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9642                                 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9643                                 phba->sli4_hba.nvmet_cqset[0]->queue_id);
9644 
9645                 }
9646         }
9647 
9648         if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
9649                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9650                                 "0540 Receive Queue not allocated\n");
9651                 rc = -ENOMEM;
9652                 goto out_destroy;
9653         }
9654 
9655         rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
9656                             phba->sli4_hba.els_cq, LPFC_USOL);
9657         if (rc) {
9658                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9659                                 "0541 Failed setup of Receive Queue: "
9660                                 "rc = 0x%x\n", (uint32_t)rc);
9661                 goto out_destroy;
9662         }
9663 
9664         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9665                         "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9666                         "parent cq-id=%d\n",
9667                         phba->sli4_hba.hdr_rq->queue_id,
9668                         phba->sli4_hba.dat_rq->queue_id,
9669                         phba->sli4_hba.els_cq->queue_id);
9670 
9671         if (phba->cfg_fcp_imax)
9672                 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
9673         else
9674                 usdelay = 0;
9675 
9676         for (qidx = 0; qidx < phba->cfg_irq_chann;
9677              qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
9678                 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
9679                                          usdelay);
9680 
9681         if (phba->sli4_hba.cq_max) {
9682                 kfree(phba->sli4_hba.cq_lookup);
9683                 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
9684                         sizeof(struct lpfc_queue *), GFP_KERNEL);
9685                 if (!phba->sli4_hba.cq_lookup) {
9686                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9687                                         "0549 Failed setup of CQ Lookup table: "
9688                                         "size 0x%x\n", phba->sli4_hba.cq_max);
9689                         rc = -ENOMEM;
9690                         goto out_destroy;
9691                 }
9692                 lpfc_setup_cq_lookup(phba);
9693         }
9694         return 0;
9695 
9696 out_destroy:
9697         lpfc_sli4_queue_unset(phba);
9698 out_error:
9699         return rc;
9700 }
9701 
9702 
9703 
9704 
9705 
9706 
9707 
9708 
9709 
9710 
9711 
9712 
9713 
9714 void
9715 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
9716 {
9717         struct lpfc_sli4_hdw_queue *qp;
9718         struct lpfc_queue *eq;
9719         int qidx;
9720 
9721         
9722         if (phba->sli4_hba.mbx_wq)
9723                 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
9724 
9725         
9726         if (phba->sli4_hba.nvmels_wq)
9727                 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
9728 
9729         
9730         if (phba->sli4_hba.els_wq)
9731                 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
9732 
9733         
9734         if (phba->sli4_hba.hdr_rq)
9735                 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
9736                                 phba->sli4_hba.dat_rq);
9737 
9738         
9739         if (phba->sli4_hba.mbx_cq)
9740                 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
9741 
9742         
9743         if (phba->sli4_hba.els_cq)
9744                 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
9745 
9746         
9747         if (phba->sli4_hba.nvmels_cq)
9748                 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
9749 
9750         if (phba->nvmet_support) {
9751                 
9752                 if (phba->sli4_hba.nvmet_mrq_hdr) {
9753                         for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9754                                 lpfc_rq_destroy(
9755                                         phba,
9756                                         phba->sli4_hba.nvmet_mrq_hdr[qidx],
9757                                         phba->sli4_hba.nvmet_mrq_data[qidx]);
9758                 }
9759 
9760                 
9761                 if (phba->sli4_hba.nvmet_cqset) {
9762                         for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9763                                 lpfc_cq_destroy(
9764                                         phba, phba->sli4_hba.nvmet_cqset[qidx]);
9765                 }
9766         }
9767 
9768         
9769         if (phba->sli4_hba.hdwq) {
9770                 
9771                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9772                         
9773                         qp = &phba->sli4_hba.hdwq[qidx];
9774                         lpfc_wq_destroy(phba, qp->io_wq);
9775                         lpfc_cq_destroy(phba, qp->io_cq);
9776                 }
9777                 
9778                 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9779                         
9780                         eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9781                         lpfc_eq_destroy(phba, eq);
9782                 }
9783         }
9784 
9785         kfree(phba->sli4_hba.cq_lookup);
9786         phba->sli4_hba.cq_lookup = NULL;
9787         phba->sli4_hba.cq_max = 0;
9788 }
9789 
9790 
9791 
9792 
9793 
9794 
9795 
9796 
9797 
9798 
9799 
9800 
9801 
9802 
9803 
9804 
9805 
9806 static int
9807 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
9808 {
9809         struct lpfc_cq_event *cq_event;
9810         int i;
9811 
9812         for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
9813                 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
9814                 if (!cq_event)
9815                         goto out_pool_create_fail;
9816                 list_add_tail(&cq_event->list,
9817                               &phba->sli4_hba.sp_cqe_event_pool);
9818         }
9819         return 0;
9820 
9821 out_pool_create_fail:
9822         lpfc_sli4_cq_event_pool_destroy(phba);
9823         return -ENOMEM;
9824 }
9825 
9826 
9827 
9828 
9829 
9830 
9831 
9832 
9833 
9834 
9835 
9836 static void
9837 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
9838 {
9839         struct lpfc_cq_event *cq_event, *next_cq_event;
9840 
9841         list_for_each_entry_safe(cq_event, next_cq_event,
9842                                  &phba->sli4_hba.sp_cqe_event_pool, list) {
9843                 list_del(&cq_event->list);
9844                 kfree(cq_event);
9845         }
9846 }
9847 
9848 
9849 
9850 
9851 
9852 
9853 
9854 
9855 
9856 
9857 
9858 struct lpfc_cq_event *
9859 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9860 {
9861         struct lpfc_cq_event *cq_event = NULL;
9862 
9863         list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
9864                          struct lpfc_cq_event, list);
9865         return cq_event;
9866 }
9867 
9868 
9869 
9870 
9871 
9872 
9873 
9874 
9875 
9876 
9877 
9878 struct lpfc_cq_event *
9879 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9880 {
9881         struct lpfc_cq_event *cq_event;
9882         unsigned long iflags;
9883 
9884         spin_lock_irqsave(&phba->hbalock, iflags);
9885         cq_event = __lpfc_sli4_cq_event_alloc(phba);
9886         spin_unlock_irqrestore(&phba->hbalock, iflags);
9887         return cq_event;
9888 }
9889 
9890 
9891 
9892 
9893 
9894 
9895 
9896 
9897 
9898 void
9899 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9900                              struct lpfc_cq_event *cq_event)
9901 {
9902         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
9903 }
9904 
9905 
9906 
9907 
9908 
9909 
9910 
9911 
9912 
9913 void
9914 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9915                            struct lpfc_cq_event *cq_event)
9916 {
9917         unsigned long iflags;
9918         spin_lock_irqsave(&phba->hbalock, iflags);
9919         __lpfc_sli4_cq_event_release(phba, cq_event);
9920         spin_unlock_irqrestore(&phba->hbalock, iflags);
9921 }
9922 
9923 
9924 
9925 
9926 
9927 
9928 
9929 
9930 static void
9931 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
9932 {
9933         LIST_HEAD(cqelist);
9934         struct lpfc_cq_event *cqe;
9935         unsigned long iflags;
9936 
9937         
9938         spin_lock_irqsave(&phba->hbalock, iflags);
9939         
9940         list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
9941                          &cqelist);
9942         
9943         list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
9944                          &cqelist);
9945         
9946         list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
9947                          &cqelist);
9948         spin_unlock_irqrestore(&phba->hbalock, iflags);
9949 
9950         while (!list_empty(&cqelist)) {
9951                 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
9952                 lpfc_sli4_cq_event_release(phba, cqe);
9953         }
9954 }
9955 
9956 
9957 
9958 
9959 
9960 
9961 
9962 
9963 
9964 
9965 
9966 
9967 
9968 int
9969 lpfc_pci_function_reset(struct lpfc_hba *phba)
9970 {
9971         LPFC_MBOXQ_t *mboxq;
9972         uint32_t rc = 0, if_type;
9973         uint32_t shdr_status, shdr_add_status;
9974         uint32_t rdy_chk;
9975         uint32_t port_reset = 0;
9976         union lpfc_sli4_cfg_shdr *shdr;
9977         struct lpfc_register reg_data;
9978         uint16_t devid;
9979 
9980         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9981         switch (if_type) {
9982         case LPFC_SLI_INTF_IF_TYPE_0:
9983                 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
9984                                                        GFP_KERNEL);
9985                 if (!mboxq) {
9986                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9987                                         "0494 Unable to allocate memory for "
9988                                         "issuing SLI_FUNCTION_RESET mailbox "
9989                                         "command\n");
9990                         return -ENOMEM;
9991                 }
9992 
9993                 
9994                 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9995                                  LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
9996                                  LPFC_SLI4_MBX_EMBED);
9997                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9998                 shdr = (union lpfc_sli4_cfg_shdr *)
9999                         &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10000                 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10001                 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
10002                                          &shdr->response);
10003                 if (rc != MBX_TIMEOUT)
10004                         mempool_free(mboxq, phba->mbox_mem_pool);
10005                 if (shdr_status || shdr_add_status || rc) {
10006                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10007                                         "0495 SLI_FUNCTION_RESET mailbox "
10008                                         "failed with status x%x add_status x%x,"
10009                                         " mbx status x%x\n",
10010                                         shdr_status, shdr_add_status, rc);
10011                         rc = -ENXIO;
10012                 }
10013                 break;
10014         case LPFC_SLI_INTF_IF_TYPE_2:
10015         case LPFC_SLI_INTF_IF_TYPE_6:
10016 wait:
10017                 
10018 
10019 
10020 
10021 
10022                 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
10023                         if (lpfc_readl(phba->sli4_hba.u.if_type2.
10024                                 STATUSregaddr, ®_data.word0)) {
10025                                 rc = -ENODEV;
10026                                 goto out;
10027                         }
10028                         if (bf_get(lpfc_sliport_status_rdy, ®_data))
10029                                 break;
10030                         msleep(20);
10031                 }
10032 
10033                 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
10034                         phba->work_status[0] = readl(
10035                                 phba->sli4_hba.u.if_type2.ERR1regaddr);
10036                         phba->work_status[1] = readl(
10037                                 phba->sli4_hba.u.if_type2.ERR2regaddr);
10038                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10039                                         "2890 Port not ready, port status reg "
10040                                         "0x%x error 1=0x%x, error 2=0x%x\n",
10041                                         reg_data.word0,
10042                                         phba->work_status[0],
10043                                         phba->work_status[1]);
10044                         rc = -ENODEV;
10045                         goto out;
10046                 }
10047 
10048                 if (!port_reset) {
10049                         
10050 
10051 
10052                         reg_data.word0 = 0;
10053                         bf_set(lpfc_sliport_ctrl_end, ®_data,
10054                                LPFC_SLIPORT_LITTLE_ENDIAN);
10055                         bf_set(lpfc_sliport_ctrl_ip, ®_data,
10056                                LPFC_SLIPORT_INIT_PORT);
10057                         writel(reg_data.word0, phba->sli4_hba.u.if_type2.
10058                                CTRLregaddr);
10059                         
10060                         pci_read_config_word(phba->pcidev,
10061                                              PCI_DEVICE_ID, &devid);
10062 
10063                         port_reset = 1;
10064                         msleep(20);
10065                         goto wait;
10066                 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
10067                         rc = -ENODEV;
10068                         goto out;
10069                 }
10070                 break;
10071 
10072         case LPFC_SLI_INTF_IF_TYPE_1:
10073         default:
10074                 break;
10075         }
10076 
10077 out:
10078         
10079         if (rc) {
10080                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10081                                 "3317 HBA not functional: IP Reset Failed "
10082                                 "try: echo fw_reset > board_mode\n");
10083                 rc = -ENODEV;
10084         }
10085 
10086         return rc;
10087 }
10088 
10089 
10090 
10091 
10092 
10093 
10094 
10095 
10096 
10097 
10098 
10099 
10100 static int
10101 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10102 {
10103         struct pci_dev *pdev = phba->pcidev;
10104         unsigned long bar0map_len, bar1map_len, bar2map_len;
10105         int error;
10106         uint32_t if_type;
10107 
10108         if (!pdev)
10109                 return -ENODEV;
10110 
10111         
10112         error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10113         if (error)
10114                 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10115         if (error)
10116                 return error;
10117 
10118         
10119 
10120 
10121 
10122         if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10123                                   &phba->sli4_hba.sli_intf.word0)) {
10124                 return -ENODEV;
10125         }
10126 
10127         
10128         if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
10129             LPFC_SLI_INTF_VALID) {
10130                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10131                                 "2894 SLI_INTF reg contents invalid "
10132                                 "sli_intf reg 0x%x\n",
10133                                 phba->sli4_hba.sli_intf.word0);
10134                 return -ENODEV;
10135         }
10136 
10137         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10138         
10139 
10140 
10141 
10142 
10143 
10144         if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
10145                 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
10146                 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
10147 
10148                 
10149 
10150 
10151 
10152                 phba->sli4_hba.conf_regs_memmap_p =
10153                         ioremap(phba->pci_bar0_map, bar0map_len);
10154                 if (!phba->sli4_hba.conf_regs_memmap_p) {
10155                         dev_printk(KERN_ERR, &pdev->dev,
10156                                    "ioremap failed for SLI4 PCI config "
10157                                    "registers.\n");
10158                         return -ENODEV;
10159                 }
10160                 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
10161                 
10162                 lpfc_sli4_bar0_register_memmap(phba, if_type);
10163         } else {
10164                 phba->pci_bar0_map = pci_resource_start(pdev, 1);
10165                 bar0map_len = pci_resource_len(pdev, 1);
10166                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10167                         dev_printk(KERN_ERR, &pdev->dev,
10168                            "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
10169                         return -ENODEV;
10170                 }
10171                 phba->sli4_hba.conf_regs_memmap_p =
10172                                 ioremap(phba->pci_bar0_map, bar0map_len);
10173                 if (!phba->sli4_hba.conf_regs_memmap_p) {
10174                         dev_printk(KERN_ERR, &pdev->dev,
10175                                 "ioremap failed for SLI4 PCI config "
10176                                 "registers.\n");
10177                         return -ENODEV;
10178                 }
10179                 lpfc_sli4_bar0_register_memmap(phba, if_type);
10180         }
10181 
10182         if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10183                 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
10184                         
10185 
10186 
10187 
10188                         phba->pci_bar1_map = pci_resource_start(pdev,
10189                                                                 PCI_64BIT_BAR2);
10190                         bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10191                         phba->sli4_hba.ctrl_regs_memmap_p =
10192                                         ioremap(phba->pci_bar1_map,
10193                                                 bar1map_len);
10194                         if (!phba->sli4_hba.ctrl_regs_memmap_p) {
10195                                 dev_err(&pdev->dev,
10196                                            "ioremap failed for SLI4 HBA "
10197                                             "control registers.\n");
10198                                 error = -ENOMEM;
10199                                 goto out_iounmap_conf;
10200                         }
10201                         phba->pci_bar2_memmap_p =
10202                                          phba->sli4_hba.ctrl_regs_memmap_p;
10203                         lpfc_sli4_bar1_register_memmap(phba, if_type);
10204                 } else {
10205                         error = -ENOMEM;
10206                         goto out_iounmap_conf;
10207                 }
10208         }
10209 
10210         if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
10211             (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
10212                 
10213 
10214 
10215 
10216                 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
10217                 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10218                 phba->sli4_hba.drbl_regs_memmap_p =
10219                                 ioremap(phba->pci_bar1_map, bar1map_len);
10220                 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10221                         dev_err(&pdev->dev,
10222                            "ioremap failed for SLI4 HBA doorbell registers.\n");
10223                         error = -ENOMEM;
10224                         goto out_iounmap_conf;
10225                 }
10226                 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
10227                 lpfc_sli4_bar1_register_memmap(phba, if_type);
10228         }
10229 
10230         if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10231                 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10232                         
10233 
10234 
10235 
10236                         phba->pci_bar2_map = pci_resource_start(pdev,
10237                                                                 PCI_64BIT_BAR4);
10238                         bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10239                         phba->sli4_hba.drbl_regs_memmap_p =
10240                                         ioremap(phba->pci_bar2_map,
10241                                                 bar2map_len);
10242                         if (!phba->sli4_hba.drbl_regs_memmap_p) {
10243                                 dev_err(&pdev->dev,
10244                                            "ioremap failed for SLI4 HBA"
10245                                            " doorbell registers.\n");
10246                                 error = -ENOMEM;
10247                                 goto out_iounmap_ctrl;
10248                         }
10249                         phba->pci_bar4_memmap_p =
10250                                         phba->sli4_hba.drbl_regs_memmap_p;
10251                         error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
10252                         if (error)
10253                                 goto out_iounmap_all;
10254                 } else {
10255                         error = -ENOMEM;
10256                         goto out_iounmap_all;
10257                 }
10258         }
10259 
10260         if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
10261             pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10262                 
10263 
10264 
10265 
10266                 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
10267                 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10268                 phba->sli4_hba.dpp_regs_memmap_p =
10269                                 ioremap(phba->pci_bar2_map, bar2map_len);
10270                 if (!phba->sli4_hba.dpp_regs_memmap_p) {
10271                         dev_err(&pdev->dev,
10272                            "ioremap failed for SLI4 HBA dpp registers.\n");
10273                         error = -ENOMEM;
10274                         goto out_iounmap_ctrl;
10275                 }
10276                 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
10277         }
10278 
10279         
10280         switch (if_type) {
10281         case LPFC_SLI_INTF_IF_TYPE_0:
10282         case LPFC_SLI_INTF_IF_TYPE_2:
10283                 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
10284                 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10285                 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
10286                 break;
10287         case LPFC_SLI_INTF_IF_TYPE_6:
10288                 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
10289                 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10290                 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
10291                 break;
10292         default:
10293                 break;
10294         }
10295 
10296         return 0;
10297 
10298 out_iounmap_all:
10299         iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10300 out_iounmap_ctrl:
10301         iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10302 out_iounmap_conf:
10303         iounmap(phba->sli4_hba.conf_regs_memmap_p);
10304 
10305         return error;
10306 }
10307 
10308 
10309 
10310 
10311 
10312 
10313 
10314 
10315 static void
10316 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
10317 {
10318         uint32_t if_type;
10319         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10320 
10321         switch (if_type) {
10322         case LPFC_SLI_INTF_IF_TYPE_0:
10323                 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10324                 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10325                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10326                 break;
10327         case LPFC_SLI_INTF_IF_TYPE_2:
10328                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10329                 break;
10330         case LPFC_SLI_INTF_IF_TYPE_6:
10331                 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10332                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10333                 break;
10334         case LPFC_SLI_INTF_IF_TYPE_1:
10335         default:
10336                 dev_printk(KERN_ERR, &phba->pcidev->dev,
10337                            "FATAL - unsupported SLI4 interface type - %d\n",
10338                            if_type);
10339                 break;
10340         }
10341 }
10342 
10343 
10344 
10345 
10346 
10347 
10348 
10349 
10350 
10351 
10352 
10353 
10354 static int
10355 lpfc_sli_enable_msix(struct lpfc_hba *phba)
10356 {
10357         int rc;
10358         LPFC_MBOXQ_t *pmb;
10359 
10360         
10361         rc = pci_alloc_irq_vectors(phba->pcidev,
10362                         LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
10363         if (rc < 0) {
10364                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10365                                 "0420 PCI enable MSI-X failed (%d)\n", rc);
10366                 goto vec_fail_out;
10367         }
10368 
10369         
10370 
10371 
10372 
10373         
10374         rc = request_irq(pci_irq_vector(phba->pcidev, 0),
10375                          &lpfc_sli_sp_intr_handler, 0,
10376                          LPFC_SP_DRIVER_HANDLER_NAME, phba);
10377         if (rc) {
10378                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10379                                 "0421 MSI-X slow-path request_irq failed "
10380                                 "(%d)\n", rc);
10381                 goto msi_fail_out;
10382         }
10383 
10384         
10385         rc = request_irq(pci_irq_vector(phba->pcidev, 1),
10386                          &lpfc_sli_fp_intr_handler, 0,
10387                          LPFC_FP_DRIVER_HANDLER_NAME, phba);
10388 
10389         if (rc) {
10390                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10391                                 "0429 MSI-X fast-path request_irq failed "
10392                                 "(%d)\n", rc);
10393                 goto irq_fail_out;
10394         }
10395 
10396         
10397 
10398 
10399         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10400 
10401         if (!pmb) {
10402                 rc = -ENOMEM;
10403                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10404                                 "0474 Unable to allocate memory for issuing "
10405                                 "MBOX_CONFIG_MSI command\n");
10406                 goto mem_fail_out;
10407         }
10408         rc = lpfc_config_msi(phba, pmb);
10409         if (rc)
10410                 goto mbx_fail_out;
10411         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10412         if (rc != MBX_SUCCESS) {
10413                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
10414                                 "0351 Config MSI mailbox command failed, "
10415                                 "mbxCmd x%x, mbxStatus x%x\n",
10416                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
10417                 goto mbx_fail_out;
10418         }
10419 
10420         
10421         mempool_free(pmb, phba->mbox_mem_pool);
10422         return rc;
10423 
10424 mbx_fail_out:
10425         
10426         mempool_free(pmb, phba->mbox_mem_pool);
10427 
10428 mem_fail_out:
10429         
10430         free_irq(pci_irq_vector(phba->pcidev, 1), phba);
10431 
10432 irq_fail_out:
10433         
10434         free_irq(pci_irq_vector(phba->pcidev, 0), phba);
10435 
10436 msi_fail_out:
10437         
10438         pci_free_irq_vectors(phba->pcidev);
10439 
10440 vec_fail_out:
10441         return rc;
10442 }
10443 
10444 
10445 
10446 
10447 
10448 
10449 
10450 
10451 
10452 
10453 
10454 
10455 
10456 
10457 
10458 static int
10459 lpfc_sli_enable_msi(struct lpfc_hba *phba)
10460 {
10461         int rc;
10462 
10463         rc = pci_enable_msi(phba->pcidev);
10464         if (!rc)
10465                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10466                                 "0462 PCI enable MSI mode success.\n");
10467         else {
10468                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10469                                 "0471 PCI enable MSI mode failed (%d)\n", rc);
10470                 return rc;
10471         }
10472 
10473         rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10474                          0, LPFC_DRIVER_NAME, phba);
10475         if (rc) {
10476                 pci_disable_msi(phba->pcidev);
10477                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10478                                 "0478 MSI request_irq failed (%d)\n", rc);
10479         }
10480         return rc;
10481 }
10482 
10483 
10484 
10485 
10486 
10487 
10488 
10489 
10490 
10491 
10492 
10493 
10494 
10495 
10496 
10497 
10498 
10499 static uint32_t
10500 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10501 {
10502         uint32_t intr_mode = LPFC_INTR_ERROR;
10503         int retval;
10504 
10505         if (cfg_mode == 2) {
10506                 
10507                 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
10508                 if (!retval) {
10509                         
10510                         retval = lpfc_sli_enable_msix(phba);
10511                         if (!retval) {
10512                                 
10513                                 phba->intr_type = MSIX;
10514                                 intr_mode = 2;
10515                         }
10516                 }
10517         }
10518 
10519         
10520         if (cfg_mode >= 1 && phba->intr_type == NONE) {
10521                 retval = lpfc_sli_enable_msi(phba);
10522                 if (!retval) {
10523                         
10524                         phba->intr_type = MSI;
10525                         intr_mode = 1;
10526                 }
10527         }
10528 
10529         
10530         if (phba->intr_type == NONE) {
10531                 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10532                                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10533                 if (!retval) {
10534                         
10535                         phba->intr_type = INTx;
10536                         intr_mode = 0;
10537                 }
10538         }
10539         return intr_mode;
10540 }
10541 
10542 
10543 
10544 
10545 
10546 
10547 
10548 
10549 
10550 
10551 static void
10552 lpfc_sli_disable_intr(struct lpfc_hba *phba)
10553 {
10554         int nr_irqs, i;
10555 
10556         if (phba->intr_type == MSIX)
10557                 nr_irqs = LPFC_MSIX_VECTORS;
10558         else
10559                 nr_irqs = 1;
10560 
10561         for (i = 0; i < nr_irqs; i++)
10562                 free_irq(pci_irq_vector(phba->pcidev, i), phba);
10563         pci_free_irq_vectors(phba->pcidev);
10564 
10565         
10566         phba->intr_type = NONE;
10567         phba->sli.slistat.sli_intr = 0;
10568 }
10569 
10570 
10571 
10572 
10573 
10574 
10575 
10576 
10577 
10578 static uint16_t
10579 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10580 {
10581         struct lpfc_vector_map_info *cpup;
10582         int cpu;
10583 
10584         
10585         for_each_present_cpu(cpu) {
10586                 cpup = &phba->sli4_hba.cpu_map[cpu];
10587 
10588                 
10589 
10590 
10591 
10592                 if ((match == LPFC_FIND_BY_EQ) &&
10593                     (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
10594                     (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
10595                     (cpup->eq == id))
10596                         return cpu;
10597 
10598                 
10599                 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10600                         return cpu;
10601         }
10602         return 0;
10603 }
10604 
10605 #ifdef CONFIG_X86
10606 
10607 
10608 
10609 
10610 
10611 
10612 
10613 static int
10614 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10615                 uint16_t phys_id, uint16_t core_id)
10616 {
10617         struct lpfc_vector_map_info *cpup;
10618         int idx;
10619 
10620         for_each_present_cpu(idx) {
10621                 cpup = &phba->sli4_hba.cpu_map[idx];
10622                 
10623                 if ((cpup->phys_id == phys_id) &&
10624                     (cpup->core_id == core_id) &&
10625                     (cpu != idx))
10626                         return 1;
10627         }
10628         return 0;
10629 }
10630 #endif
10631 
10632 
10633 
10634 
10635 
10636 
10637 
10638 
10639 
10640 
10641 
10642 static void
10643 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10644 {
10645         int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
10646         int max_phys_id, min_phys_id;
10647         int max_core_id, min_core_id;
10648         struct lpfc_vector_map_info *cpup;
10649         struct lpfc_vector_map_info *new_cpup;
10650         const struct cpumask *maskp;
10651 #ifdef CONFIG_X86
10652         struct cpuinfo_x86 *cpuinfo;
10653 #endif
10654 
10655         
10656         for_each_possible_cpu(cpu) {
10657                 cpup = &phba->sli4_hba.cpu_map[cpu];
10658                 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
10659                 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
10660                 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
10661                 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
10662                 cpup->irq = LPFC_VECTOR_MAP_EMPTY;
10663                 cpup->flag = 0;
10664         }
10665 
10666         max_phys_id = 0;
10667         min_phys_id = LPFC_VECTOR_MAP_EMPTY;
10668         max_core_id = 0;
10669         min_core_id = LPFC_VECTOR_MAP_EMPTY;
10670 
10671         
10672         for_each_present_cpu(cpu) {
10673                 cpup = &phba->sli4_hba.cpu_map[cpu];
10674 #ifdef CONFIG_X86
10675                 cpuinfo = &cpu_data(cpu);
10676                 cpup->phys_id = cpuinfo->phys_proc_id;
10677                 cpup->core_id = cpuinfo->cpu_core_id;
10678                 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
10679                         cpup->flag |= LPFC_CPU_MAP_HYPER;
10680 #else
10681                 
10682                 cpup->phys_id = 0;
10683                 cpup->core_id = cpu;
10684 #endif
10685 
10686                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10687                                 "3328 CPU %d physid %d coreid %d flag x%x\n",
10688                                 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
10689 
10690                 if (cpup->phys_id > max_phys_id)
10691                         max_phys_id = cpup->phys_id;
10692                 if (cpup->phys_id < min_phys_id)
10693                         min_phys_id = cpup->phys_id;
10694 
10695                 if (cpup->core_id > max_core_id)
10696                         max_core_id = cpup->core_id;
10697                 if (cpup->core_id < min_core_id)
10698                         min_core_id = cpup->core_id;
10699         }
10700 
10701         for_each_possible_cpu(i) {
10702                 struct lpfc_eq_intr_info *eqi =
10703                         per_cpu_ptr(phba->sli4_hba.eq_info, i);
10704 
10705                 INIT_LIST_HEAD(&eqi->list);
10706                 eqi->icnt = 0;
10707         }
10708 
10709         
10710 
10711 
10712 
10713 
10714 
10715 
10716 
10717 
10718 
10719         for (idx = 0; idx <  phba->cfg_irq_chann; idx++) {
10720                 
10721                 maskp = pci_irq_get_affinity(phba->pcidev, idx);
10722                 if (!maskp) {
10723                         if (phba->cfg_irq_chann > 1)
10724                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10725                                                 "3329 No affinity mask found "
10726                                                 "for vector %d (%d)\n",
10727                                                 idx, phba->cfg_irq_chann);
10728                         if (!idx) {
10729                                 cpu = cpumask_first(cpu_present_mask);
10730                                 cpup = &phba->sli4_hba.cpu_map[cpu];
10731                                 cpup->eq = idx;
10732                                 cpup->irq = pci_irq_vector(phba->pcidev, idx);
10733                                 cpup->flag |= LPFC_CPU_FIRST_IRQ;
10734                         }
10735                         break;
10736                 }
10737 
10738                 i = 0;
10739                 
10740                 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
10741                         
10742                         cpup = &phba->sli4_hba.cpu_map[cpu];
10743                         cpup->eq = idx;
10744                         cpup->irq = pci_irq_vector(phba->pcidev, idx);
10745 
10746                         
10747 
10748 
10749                         if (!i)
10750                                 cpup->flag |= LPFC_CPU_FIRST_IRQ;
10751                         i++;
10752 
10753                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10754                                         "3336 Set Affinity: CPU %d "
10755                                         "irq %d eq %d flag x%x\n",
10756                                         cpu, cpup->irq, cpup->eq, cpup->flag);
10757                 }
10758         }
10759 
10760         
10761 
10762 
10763 
10764 
10765         first_cpu = cpumask_first(cpu_present_mask);
10766         start_cpu = first_cpu;
10767 
10768         for_each_present_cpu(cpu) {
10769                 cpup = &phba->sli4_hba.cpu_map[cpu];
10770 
10771                 
10772                 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10773                         
10774                         cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10775 
10776                         
10777 
10778 
10779 
10780 
10781                         new_cpu = start_cpu;
10782                         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10783                                 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10784                                 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10785                                     (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
10786                                     (new_cpup->phys_id == cpup->phys_id))
10787                                         goto found_same;
10788                                 new_cpu = cpumask_next(
10789                                         new_cpu, cpu_present_mask);
10790                                 if (new_cpu == nr_cpumask_bits)
10791                                         new_cpu = first_cpu;
10792                         }
10793                         
10794                         continue;
10795 found_same:
10796                         
10797                         cpup->eq = new_cpup->eq;
10798                         cpup->irq = new_cpup->irq;
10799 
10800                         
10801 
10802 
10803 
10804                         start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10805                         if (start_cpu == nr_cpumask_bits)
10806                                 start_cpu = first_cpu;
10807 
10808                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10809                                         "3337 Set Affinity: CPU %d "
10810                                         "irq %d from id %d same "
10811                                         "phys_id (%d)\n",
10812                                         cpu, cpup->irq, new_cpu, cpup->phys_id);
10813                 }
10814         }
10815 
10816         
10817         start_cpu = first_cpu;
10818 
10819         for_each_present_cpu(cpu) {
10820                 cpup = &phba->sli4_hba.cpu_map[cpu];
10821 
10822                 
10823                 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10824                         
10825                         cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10826 
10827                         
10828 
10829 
10830 
10831 
10832                         new_cpu = start_cpu;
10833                         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10834                                 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10835                                 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10836                                     (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY))
10837                                         goto found_any;
10838                                 new_cpu = cpumask_next(
10839                                         new_cpu, cpu_present_mask);
10840                                 if (new_cpu == nr_cpumask_bits)
10841                                         new_cpu = first_cpu;
10842                         }
10843                         
10844                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10845                                         "3339 Set Affinity: CPU %d "
10846                                         "irq %d UNASSIGNED\n",
10847                                         cpup->hdwq, cpup->irq);
10848                         continue;
10849 found_any:
10850                         
10851                         cpup->eq = new_cpup->eq;
10852                         cpup->irq = new_cpup->irq;
10853 
10854                         
10855 
10856 
10857 
10858                         start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10859                         if (start_cpu == nr_cpumask_bits)
10860                                 start_cpu = first_cpu;
10861 
10862                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10863                                         "3338 Set Affinity: CPU %d "
10864                                         "irq %d from id %d (%d/%d)\n",
10865                                         cpu, cpup->irq, new_cpu,
10866                                         new_cpup->phys_id, new_cpup->core_id);
10867                 }
10868         }
10869 
10870         
10871 
10872 
10873         idx = 0;
10874         for_each_present_cpu(cpu) {
10875                 cpup = &phba->sli4_hba.cpu_map[cpu];
10876 
10877                 
10878                 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10879                         continue;
10880 
10881                 
10882                 cpup->hdwq = idx;
10883                 idx++;
10884                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10885                                 "3333 Set Affinity: CPU %d (phys %d core %d): "
10886                                 "hdwq %d eq %d irq %d flg x%x\n",
10887                                 cpu, cpup->phys_id, cpup->core_id,
10888                                 cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
10889         }
10890         
10891 
10892 
10893 
10894 
10895 
10896 
10897 
10898         next_idx = idx;
10899         start_cpu = 0;
10900         idx = 0;
10901         for_each_present_cpu(cpu) {
10902                 cpup = &phba->sli4_hba.cpu_map[cpu];
10903 
10904                 
10905                 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10906                         continue;
10907 
10908                 
10909 
10910 
10911 
10912                 if (next_idx < phba->cfg_hdw_queue) {
10913                         cpup->hdwq = next_idx;
10914                         next_idx++;
10915                         continue;
10916                 }
10917 
10918                 
10919 
10920 
10921 
10922 
10923                 new_cpu = start_cpu;
10924                 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10925                         new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10926                         if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
10927                             new_cpup->phys_id == cpup->phys_id &&
10928                             new_cpup->core_id == cpup->core_id) {
10929                                 goto found_hdwq;
10930                         }
10931                         new_cpu = cpumask_next(new_cpu, cpu_present_mask);
10932                         if (new_cpu == nr_cpumask_bits)
10933                                 new_cpu = first_cpu;
10934                 }
10935 
10936                 
10937 
10938 
10939                 new_cpu = start_cpu;
10940                 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10941                         new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10942                         if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
10943                             new_cpup->phys_id == cpup->phys_id)
10944                                 goto found_hdwq;
10945 
10946                         new_cpu = cpumask_next(new_cpu, cpu_present_mask);
10947                         if (new_cpu == nr_cpumask_bits)
10948                                 new_cpu = first_cpu;
10949                 }
10950 
10951                 
10952                 cpup->hdwq = idx % phba->cfg_hdw_queue;
10953                 idx++;
10954                 goto logit;
10955  found_hdwq:
10956                 
10957                 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10958                 if (start_cpu == nr_cpumask_bits)
10959                         start_cpu = first_cpu;
10960                 cpup->hdwq = new_cpup->hdwq;
10961  logit:
10962                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10963                                 "3335 Set Affinity: CPU %d (phys %d core %d): "
10964                                 "hdwq %d eq %d irq %d flg x%x\n",
10965                                 cpu, cpup->phys_id, cpup->core_id,
10966                                 cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
10967         }
10968 
10969         
10970 
10971 
10972         return;
10973 }
10974 
10975 
10976 
10977 
10978 
10979 
10980 
10981 
10982 static void
10983 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
10984                   struct list_head *eqlist)
10985 {
10986         struct lpfc_vector_map_info *map;
10987         const struct cpumask *maskp;
10988         struct lpfc_queue *eq;
10989         unsigned int i;
10990         cpumask_t tmp;
10991         u16 idx;
10992 
10993         for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10994                 maskp = pci_irq_get_affinity(phba->pcidev, idx);
10995                 if (!maskp)
10996                         continue;
10997                 
10998 
10999 
11000 
11001 
11002                 if (!cpumask_and(&tmp, maskp, cpumask_of(cpu)))
11003                         continue;
11004                 
11005 
11006 
11007 
11008 
11009 
11010                 cpumask_and(&tmp, maskp, cpu_online_mask);
11011                 if (cpumask_weight(&tmp) > 1)
11012                         continue;
11013 
11014                 
11015 
11016 
11017 
11018 
11019                 for_each_possible_cpu(i) {
11020                         map = &phba->sli4_hba.cpu_map[i];
11021                         if (!(map->irq == pci_irq_vector(phba->pcidev, idx)))
11022                                 continue;
11023                         eq = phba->sli4_hba.hdwq[map->hdwq].hba_eq;
11024                         list_add(&eq->_poll_list, eqlist);
11025                         
11026                         break;
11027                 }
11028         }
11029 }
11030 
11031 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
11032 {
11033         if (phba->sli_rev != LPFC_SLI_REV4)
11034                 return;
11035 
11036         cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
11037                                             &phba->cpuhp);
11038         
11039 
11040 
11041 
11042         synchronize_rcu();
11043         del_timer_sync(&phba->cpuhp_poll_timer);
11044 }
11045 
11046 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
11047 {
11048         if (phba->pport->fc_flag & FC_OFFLINE_MODE)
11049                 return;
11050 
11051         __lpfc_cpuhp_remove(phba);
11052 }
11053 
11054 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
11055 {
11056         if (phba->sli_rev != LPFC_SLI_REV4)
11057                 return;
11058 
11059         rcu_read_lock();
11060 
11061         if (!list_empty(&phba->poll_list)) {
11062                 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
11063                 mod_timer(&phba->cpuhp_poll_timer,
11064                           jiffies + msecs_to_jiffies(LPFC_POLL_HB));
11065         }
11066 
11067         rcu_read_unlock();
11068 
11069         cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
11070                                          &phba->cpuhp);
11071 }
11072 
11073 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
11074 {
11075         if (phba->pport->load_flag & FC_UNLOADING) {
11076                 *retval = -EAGAIN;
11077                 return true;
11078         }
11079 
11080         if (phba->sli_rev != LPFC_SLI_REV4) {
11081                 *retval = 0;
11082                 return true;
11083         }
11084 
11085         
11086         return false;
11087 }
11088 
11089 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
11090 {
11091         struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11092         struct lpfc_queue *eq, *next;
11093         LIST_HEAD(eqlist);
11094         int retval;
11095 
11096         if (!phba) {
11097                 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11098                 return 0;
11099         }
11100 
11101         if (__lpfc_cpuhp_checks(phba, &retval))
11102                 return retval;
11103 
11104         lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
11105 
11106         
11107         list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
11108                 list_del_init(&eq->_poll_list);
11109                 lpfc_sli4_start_polling(eq);
11110         }
11111 
11112         return 0;
11113 }
11114 
11115 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
11116 {
11117         struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11118         struct lpfc_queue *eq, *next;
11119         unsigned int n;
11120         int retval;
11121 
11122         if (!phba) {
11123                 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11124                 return 0;
11125         }
11126 
11127         if (__lpfc_cpuhp_checks(phba, &retval))
11128                 return retval;
11129 
11130         list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
11131                 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
11132                 if (n == cpu)
11133                         lpfc_sli4_stop_polling(eq);
11134         }
11135 
11136         return 0;
11137 }
11138 
11139 
11140 
11141 
11142 
11143 
11144 
11145 
11146 
11147 
11148 
11149 
11150 static int
11151 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
11152 {
11153         int vectors, rc, index;
11154         char *name;
11155 
11156         
11157         vectors = phba->cfg_irq_chann;
11158 
11159         rc = pci_alloc_irq_vectors(phba->pcidev,
11160                                 1,
11161                                 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
11162         if (rc < 0) {
11163                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11164                                 "0484 PCI enable MSI-X failed (%d)\n", rc);
11165                 goto vec_fail_out;
11166         }
11167         vectors = rc;
11168 
11169         
11170         for (index = 0; index < vectors; index++) {
11171                 name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
11172                 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
11173                 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
11174                          LPFC_DRIVER_HANDLER_NAME"%d", index);
11175 
11176                 phba->sli4_hba.hba_eq_hdl[index].idx = index;
11177                 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
11178                 rc = request_irq(pci_irq_vector(phba->pcidev, index),
11179                          &lpfc_sli4_hba_intr_handler, 0,
11180                          name,
11181                          &phba->sli4_hba.hba_eq_hdl[index]);
11182                 if (rc) {
11183                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11184                                         "0486 MSI-X fast-path (%d) "
11185                                         "request_irq failed (%d)\n", index, rc);
11186                         goto cfg_fail_out;
11187                 }
11188         }
11189 
11190         if (vectors != phba->cfg_irq_chann) {
11191                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11192                                 "3238 Reducing IO channels to match number of "
11193                                 "MSI-X vectors, requested %d got %d\n",
11194                                 phba->cfg_irq_chann, vectors);
11195                 if (phba->cfg_irq_chann > vectors)
11196                         phba->cfg_irq_chann = vectors;
11197                 if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors))
11198                         phba->cfg_nvmet_mrq = vectors;
11199         }
11200 
11201         return rc;
11202 
11203 cfg_fail_out:
11204         
11205         for (--index; index >= 0; index--)
11206                 free_irq(pci_irq_vector(phba->pcidev, index),
11207                                 &phba->sli4_hba.hba_eq_hdl[index]);
11208 
11209         
11210         pci_free_irq_vectors(phba->pcidev);
11211 
11212 vec_fail_out:
11213         return rc;
11214 }
11215 
11216 
11217 
11218 
11219 
11220 
11221 
11222 
11223 
11224 
11225 
11226 
11227 
11228 
11229 
11230 static int
11231 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11232 {
11233         int rc, index;
11234 
11235         rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
11236                                    PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
11237         if (rc > 0)
11238                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11239                                 "0487 PCI enable MSI mode success.\n");
11240         else {
11241                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11242                                 "0488 PCI enable MSI mode failed (%d)\n", rc);
11243                 return rc ? rc : -1;
11244         }
11245 
11246         rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11247                          0, LPFC_DRIVER_NAME, phba);
11248         if (rc) {
11249                 pci_free_irq_vectors(phba->pcidev);
11250                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11251                                 "0490 MSI request_irq failed (%d)\n", rc);
11252                 return rc;
11253         }
11254 
11255         for (index = 0; index < phba->cfg_irq_chann; index++) {
11256                 phba->sli4_hba.hba_eq_hdl[index].idx = index;
11257                 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
11258         }
11259 
11260         return 0;
11261 }
11262 
11263 
11264 
11265 
11266 
11267 
11268 
11269 
11270 
11271 
11272 
11273 
11274 
11275 
11276 
11277 
11278 
11279 static uint32_t
11280 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11281 {
11282         uint32_t intr_mode = LPFC_INTR_ERROR;
11283         int retval, idx;
11284 
11285         if (cfg_mode == 2) {
11286                 
11287                 retval = 0;
11288                 if (!retval) {
11289                         
11290                         retval = lpfc_sli4_enable_msix(phba);
11291                         if (!retval) {
11292                                 
11293                                 phba->intr_type = MSIX;
11294                                 intr_mode = 2;
11295                         }
11296                 }
11297         }
11298 
11299         
11300         if (cfg_mode >= 1 && phba->intr_type == NONE) {
11301                 retval = lpfc_sli4_enable_msi(phba);
11302                 if (!retval) {
11303                         
11304                         phba->intr_type = MSI;
11305                         intr_mode = 1;
11306                 }
11307         }
11308 
11309         
11310         if (phba->intr_type == NONE) {
11311                 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11312                                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
11313                 if (!retval) {
11314                         struct lpfc_hba_eq_hdl *eqhdl;
11315 
11316                         
11317                         phba->intr_type = INTx;
11318                         intr_mode = 0;
11319 
11320                         for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11321                                 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
11322                                 eqhdl->idx = idx;
11323                                 eqhdl->phba = phba;
11324                         }
11325                 }
11326         }
11327         return intr_mode;
11328 }
11329 
11330 
11331 
11332 
11333 
11334 
11335 
11336 
11337 
11338 
11339 static void
11340 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
11341 {
11342         
11343         if (phba->intr_type == MSIX) {
11344                 int index;
11345 
11346                 
11347                 for (index = 0; index < phba->cfg_irq_chann; index++) {
11348                         irq_set_affinity_hint(
11349                                 pci_irq_vector(phba->pcidev, index),
11350                                 NULL);
11351                         free_irq(pci_irq_vector(phba->pcidev, index),
11352                                         &phba->sli4_hba.hba_eq_hdl[index]);
11353                 }
11354         } else {
11355                 free_irq(phba->pcidev->irq, phba);
11356         }
11357 
11358         pci_free_irq_vectors(phba->pcidev);
11359 
11360         
11361         phba->intr_type = NONE;
11362         phba->sli.slistat.sli_intr = 0;
11363 }
11364 
11365 
11366 
11367 
11368 
11369 
11370 
11371 
11372 static void
11373 lpfc_unset_hba(struct lpfc_hba *phba)
11374 {
11375         struct lpfc_vport *vport = phba->pport;
11376         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
11377 
11378         spin_lock_irq(shost->host_lock);
11379         vport->load_flag |= FC_UNLOADING;
11380         spin_unlock_irq(shost->host_lock);
11381 
11382         kfree(phba->vpi_bmask);
11383         kfree(phba->vpi_ids);
11384 
11385         lpfc_stop_hba_timers(phba);
11386 
11387         phba->pport->work_port_events = 0;
11388 
11389         lpfc_sli_hba_down(phba);
11390 
11391         lpfc_sli_brdrestart(phba);
11392 
11393         lpfc_sli_disable_intr(phba);
11394 
11395         return;
11396 }
11397 
11398 
11399 
11400 
11401 
11402 
11403 
11404 
11405 
11406 
11407 
11408 
11409 
11410 
11411 static void
11412 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11413 {
11414         struct lpfc_sli4_hdw_queue *qp;
11415         int idx, ccnt;
11416         int wait_time = 0;
11417         int io_xri_cmpl = 1;
11418         int nvmet_xri_cmpl = 1;
11419         int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11420 
11421         
11422 
11423 
11424 
11425         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
11426 
11427         
11428         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11429                 lpfc_nvme_wait_for_io_drain(phba);
11430 
11431         ccnt = 0;
11432         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11433                 qp = &phba->sli4_hba.hdwq[idx];
11434                 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
11435                 if (!io_xri_cmpl) 
11436                         ccnt++;
11437         }
11438         if (ccnt)
11439                 io_xri_cmpl = 0;
11440 
11441         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11442                 nvmet_xri_cmpl =
11443                         list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11444         }
11445 
11446         while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
11447                 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
11448                         if (!nvmet_xri_cmpl)
11449                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11450                                                 "6424 NVMET XRI exchange busy "
11451                                                 "wait time: %d seconds.\n",
11452                                                 wait_time/1000);
11453                         if (!io_xri_cmpl)
11454                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11455                                                 "6100 IO XRI exchange busy "
11456                                                 "wait time: %d seconds.\n",
11457                                                 wait_time/1000);
11458                         if (!els_xri_cmpl)
11459                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11460                                                 "2878 ELS XRI exchange busy "
11461                                                 "wait time: %d seconds.\n",
11462                                                 wait_time/1000);
11463                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
11464                         wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
11465                 } else {
11466                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
11467                         wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
11468                 }
11469 
11470                 ccnt = 0;
11471                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11472                         qp = &phba->sli4_hba.hdwq[idx];
11473                         io_xri_cmpl = list_empty(
11474                             &qp->lpfc_abts_io_buf_list);
11475                         if (!io_xri_cmpl) 
11476                                 ccnt++;
11477                 }
11478                 if (ccnt)
11479                         io_xri_cmpl = 0;
11480 
11481                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11482                         nvmet_xri_cmpl = list_empty(
11483                                 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11484                 }
11485                 els_xri_cmpl =
11486                         list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11487 
11488         }
11489 }
11490 
11491 
11492 
11493 
11494 
11495 
11496 
11497 
11498 
11499 
11500 
11501 static void
11502 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
11503 {
11504         int wait_cnt = 0;
11505         LPFC_MBOXQ_t *mboxq;
11506         struct pci_dev *pdev = phba->pcidev;
11507 
11508         lpfc_stop_hba_timers(phba);
11509         if (phba->pport)
11510                 phba->sli4_hba.intr_enable = 0;
11511 
11512         
11513 
11514 
11515 
11516 
11517         
11518         spin_lock_irq(&phba->hbalock);
11519         phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11520         spin_unlock_irq(&phba->hbalock);
11521         
11522         while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11523                 msleep(10);
11524                 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
11525                         break;
11526         }
11527         
11528         if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11529                 spin_lock_irq(&phba->hbalock);
11530                 mboxq = phba->sli.mbox_active;
11531                 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
11532                 __lpfc_mbox_cmpl_put(phba, mboxq);
11533                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11534                 phba->sli.mbox_active = NULL;
11535                 spin_unlock_irq(&phba->hbalock);
11536         }
11537 
11538         
11539         lpfc_sli_hba_iocb_abort(phba);
11540 
11541         
11542         lpfc_sli4_xri_exchange_busy_wait(phba);
11543 
11544         
11545         lpfc_cpuhp_remove(phba);
11546 
11547         
11548         lpfc_sli4_disable_intr(phba);
11549 
11550         
11551         if (phba->cfg_sriov_nr_virtfn)
11552                 pci_disable_sriov(pdev);
11553 
11554         
11555         kthread_stop(phba->worker_thread);
11556 
11557         
11558         lpfc_ras_stop_fwlog(phba);
11559 
11560         
11561 
11562 
11563         lpfc_sli4_queue_unset(phba);
11564         lpfc_sli4_queue_destroy(phba);
11565 
11566         
11567         lpfc_pci_function_reset(phba);
11568 
11569         
11570         if (phba->ras_fwlog.ras_enabled)
11571                 lpfc_sli4_ras_dma_free(phba);
11572 
11573         
11574         if (phba->pport)
11575                 phba->pport->work_port_events = 0;
11576 }
11577 
11578  
11579 
11580 
11581 
11582 
11583 
11584 
11585 
11586 
11587 
11588 
11589 
11590 int
11591 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11592 {
11593         int rc;
11594         struct lpfc_mqe *mqe;
11595         struct lpfc_pc_sli4_params *sli4_params;
11596         uint32_t mbox_tmo;
11597 
11598         rc = 0;
11599         mqe = &mboxq->u.mqe;
11600 
11601         
11602         lpfc_pc_sli4_params(mboxq);
11603         if (!phba->sli4_hba.intr_enable)
11604                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11605         else {
11606                 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
11607                 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11608         }
11609 
11610         if (unlikely(rc))
11611                 return 1;
11612 
11613         sli4_params = &phba->sli4_hba.pc_sli4_params;
11614         sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
11615         sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
11616         sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
11617         sli4_params->featurelevel_1 = bf_get(featurelevel_1,
11618                                              &mqe->un.sli4_params);
11619         sli4_params->featurelevel_2 = bf_get(featurelevel_2,
11620                                              &mqe->un.sli4_params);
11621         sli4_params->proto_types = mqe->un.sli4_params.word3;
11622         sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
11623         sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
11624         sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
11625         sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
11626         sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
11627         sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
11628         sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
11629         sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
11630         sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
11631         sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
11632         sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
11633         sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
11634         sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
11635         sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
11636         sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
11637         sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
11638         sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
11639         sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
11640         sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
11641         sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
11642 
11643         
11644         if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
11645                 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
11646 
11647         return rc;
11648 }
11649 
11650 
11651 
11652 
11653 
11654 
11655 
11656 
11657 
11658 
11659 
11660 
11661 
11662 int
11663 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11664 {
11665         int rc;
11666         struct lpfc_mqe *mqe = &mboxq->u.mqe;
11667         struct lpfc_pc_sli4_params *sli4_params;
11668         uint32_t mbox_tmo;
11669         int length;
11670         bool exp_wqcq_pages = true;
11671         struct lpfc_sli4_parameters *mbx_sli4_parameters;
11672 
11673         
11674 
11675 
11676 
11677 
11678         phba->sli4_hba.rpi_hdrs_in_use = 1;
11679 
11680         
11681         length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
11682                   sizeof(struct lpfc_sli4_cfg_mhdr));
11683         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11684                          LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
11685                          length, LPFC_SLI4_MBX_EMBED);
11686         if (!phba->sli4_hba.intr_enable)
11687                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11688         else {
11689                 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
11690                 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11691         }
11692         if (unlikely(rc))
11693                 return rc;
11694         sli4_params = &phba->sli4_hba.pc_sli4_params;
11695         mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
11696         sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
11697         sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
11698         sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
11699         sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
11700                                              mbx_sli4_parameters);
11701         sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
11702                                              mbx_sli4_parameters);
11703         if (bf_get(cfg_phwq, mbx_sli4_parameters))
11704                 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
11705         else
11706                 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
11707         sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
11708         sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
11709         sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
11710         sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
11711         sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
11712         sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
11713         sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
11714         sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
11715         sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
11716         sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
11717         sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
11718         sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
11719                                             mbx_sli4_parameters);
11720         sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
11721         sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
11722                                            mbx_sli4_parameters);
11723         phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
11724         phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
11725 
11726         
11727         phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
11728 
11729         
11730         rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
11731                      bf_get(cfg_xib, mbx_sli4_parameters));
11732 
11733         if (rc) {
11734                 
11735                 sli4_params->nvme = 1;
11736 
11737                 
11738                 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
11739                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
11740                                         "6133 Disabling NVME support: "
11741                                         "FC4 type not supported: x%x\n",
11742                                         phba->cfg_enable_fc4_type);
11743                         goto fcponly;
11744                 }
11745         } else {
11746                 
11747                 sli4_params->nvme = 0;
11748                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11749                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
11750                                         "6101 Disabling NVME support: Not "
11751                                         "supported by firmware (%d %d) x%x\n",
11752                                         bf_get(cfg_nvme, mbx_sli4_parameters),
11753                                         bf_get(cfg_xib, mbx_sli4_parameters),
11754                                         phba->cfg_enable_fc4_type);
11755 fcponly:
11756                         phba->nvme_support = 0;
11757                         phba->nvmet_support = 0;
11758                         phba->cfg_nvmet_mrq = 0;
11759                         phba->cfg_nvme_seg_cnt = 0;
11760 
11761                         
11762                         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
11763                                 return -ENODEV;
11764                         phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
11765                 }
11766         }
11767 
11768         
11769 
11770 
11771 
11772         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11773                 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
11774                 phba->cfg_iocb_cnt = 5;
11775         }
11776 
11777         
11778         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
11779             LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
11780                 phba->cfg_enable_pbde = 0;
11781 
11782         
11783 
11784 
11785 
11786 
11787 
11788 
11789 
11790         if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
11791             !(bf_get(cfg_nosr, mbx_sli4_parameters)))
11792                 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
11793         else
11794                 phba->cfg_suppress_rsp = 0;
11795 
11796         if (bf_get(cfg_eqdr, mbx_sli4_parameters))
11797                 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
11798 
11799         
11800         if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
11801                 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
11802 
11803         
11804 
11805 
11806 
11807 
11808         if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
11809                 phba->fcp_embed_io = 1;
11810         else
11811                 phba->fcp_embed_io = 0;
11812 
11813         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
11814                         "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
11815                         bf_get(cfg_xib, mbx_sli4_parameters),
11816                         phba->cfg_enable_pbde,
11817                         phba->fcp_embed_io, phba->nvme_support,
11818                         phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
11819 
11820         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
11821             LPFC_SLI_INTF_IF_TYPE_2) &&
11822             (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
11823                  LPFC_SLI_INTF_FAMILY_LNCR_A0))
11824                 exp_wqcq_pages = false;
11825 
11826         if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
11827             (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
11828             exp_wqcq_pages &&
11829             (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
11830                 phba->enab_exp_wqcq_pages = 1;
11831         else
11832                 phba->enab_exp_wqcq_pages = 0;
11833         
11834 
11835 
11836         if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
11837                 phba->mds_diags_support = 1;
11838         else
11839                 phba->mds_diags_support = 0;
11840 
11841         
11842 
11843 
11844         if (bf_get(cfg_nsler, mbx_sli4_parameters))
11845                 phba->nsler = 1;
11846         else
11847                 phba->nsler = 0;
11848 
11849         return 0;
11850 }
11851 
11852 
11853 
11854 
11855 
11856 
11857 
11858 
11859 
11860 
11861 
11862 
11863 
11864 
11865 
11866 
11867 
11868 
11869 static int
11870 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
11871 {
11872         struct lpfc_hba   *phba;
11873         struct lpfc_vport *vport = NULL;
11874         struct Scsi_Host  *shost = NULL;
11875         int error;
11876         uint32_t cfg_mode, intr_mode;
11877 
11878         
11879         phba = lpfc_hba_alloc(pdev);
11880         if (!phba)
11881                 return -ENOMEM;
11882 
11883         
11884         error = lpfc_enable_pci_dev(phba);
11885         if (error)
11886                 goto out_free_phba;
11887 
11888         
11889         error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
11890         if (error)
11891                 goto out_disable_pci_dev;
11892 
11893         
11894         error = lpfc_sli_pci_mem_setup(phba);
11895         if (error) {
11896                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11897                                 "1402 Failed to set up pci memory space.\n");
11898                 goto out_disable_pci_dev;
11899         }
11900 
11901         
11902         error = lpfc_sli_driver_resource_setup(phba);
11903         if (error) {
11904                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11905                                 "1404 Failed to set up driver resource.\n");
11906                 goto out_unset_pci_mem_s3;
11907         }
11908 
11909         
11910 
11911         error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
11912         if (error) {
11913                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11914                                 "1405 Failed to initialize iocb list.\n");
11915                 goto out_unset_driver_resource_s3;
11916         }
11917 
11918         
11919         error = lpfc_setup_driver_resource_phase2(phba);
11920         if (error) {
11921                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11922                                 "1406 Failed to set up driver resource.\n");
11923                 goto out_free_iocb_list;
11924         }
11925 
11926         
11927         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
11928 
11929         
11930         error = lpfc_create_shost(phba);
11931         if (error) {
11932                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11933                                 "1407 Failed to create scsi host.\n");
11934                 goto out_unset_driver_resource;
11935         }
11936 
11937         
11938         vport = phba->pport;
11939         error = lpfc_alloc_sysfs_attr(vport);
11940         if (error) {
11941                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11942                                 "1476 Failed to allocate sysfs attr\n");
11943                 goto out_destroy_shost;
11944         }
11945 
11946         shost = lpfc_shost_from_vport(vport); 
11947         
11948         cfg_mode = phba->cfg_use_msi;
11949         while (true) {
11950                 
11951                 lpfc_stop_port(phba);
11952                 
11953                 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
11954                 if (intr_mode == LPFC_INTR_ERROR) {
11955                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11956                                         "0431 Failed to enable interrupt.\n");
11957                         error = -ENODEV;
11958                         goto out_free_sysfs_attr;
11959                 }
11960                 
11961                 if (lpfc_sli_hba_setup(phba)) {
11962                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11963                                         "1477 Failed to set up hba\n");
11964                         error = -ENODEV;
11965                         goto out_remove_device;
11966                 }
11967 
11968                 
11969                 msleep(50);
11970                 
11971                 if (intr_mode == 0 ||
11972                     phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
11973                         
11974                         phba->intr_mode = intr_mode;
11975                         lpfc_log_intr_mode(phba, intr_mode);
11976                         break;
11977                 } else {
11978                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11979                                         "0447 Configure interrupt mode (%d) "
11980                                         "failed active interrupt test.\n",
11981                                         intr_mode);
11982                         
11983                         lpfc_sli_disable_intr(phba);
11984                         
11985                         cfg_mode = --intr_mode;
11986                 }
11987         }
11988 
11989         
11990         lpfc_post_init_setup(phba);
11991 
11992         
11993         lpfc_create_static_vport(phba);
11994 
11995         return 0;
11996 
11997 out_remove_device:
11998         lpfc_unset_hba(phba);
11999 out_free_sysfs_attr:
12000         lpfc_free_sysfs_attr(vport);
12001 out_destroy_shost:
12002         lpfc_destroy_shost(phba);
12003 out_unset_driver_resource:
12004         lpfc_unset_driver_resource_phase2(phba);
12005 out_free_iocb_list:
12006         lpfc_free_iocb_list(phba);
12007 out_unset_driver_resource_s3:
12008         lpfc_sli_driver_resource_unset(phba);
12009 out_unset_pci_mem_s3:
12010         lpfc_sli_pci_mem_unset(phba);
12011 out_disable_pci_dev:
12012         lpfc_disable_pci_dev(phba);
12013         if (shost)
12014                 scsi_host_put(shost);
12015 out_free_phba:
12016         lpfc_hba_free(phba);
12017         return error;
12018 }
12019 
12020 
12021 
12022 
12023 
12024 
12025 
12026 
12027 
12028 
12029 static void
12030 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
12031 {
12032         struct Scsi_Host  *shost = pci_get_drvdata(pdev);
12033         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
12034         struct lpfc_vport **vports;
12035         struct lpfc_hba   *phba = vport->phba;
12036         int i;
12037 
12038         spin_lock_irq(&phba->hbalock);
12039         vport->load_flag |= FC_UNLOADING;
12040         spin_unlock_irq(&phba->hbalock);
12041 
12042         lpfc_free_sysfs_attr(vport);
12043 
12044         
12045         vports = lpfc_create_vport_work_array(phba);
12046         if (vports != NULL)
12047                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12048                         if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12049                                 continue;
12050                         fc_vport_terminate(vports[i]->fc_vport);
12051                 }
12052         lpfc_destroy_vport_work_array(phba, vports);
12053 
12054         
12055         fc_remove_host(shost);
12056         scsi_remove_host(shost);
12057 
12058         lpfc_cleanup(vport);
12059 
12060         
12061 
12062 
12063 
12064 
12065 
12066         
12067         lpfc_sli_hba_down(phba);
12068         
12069         kthread_stop(phba->worker_thread);
12070         
12071         lpfc_sli_brdrestart(phba);
12072 
12073         kfree(phba->vpi_bmask);
12074         kfree(phba->vpi_ids);
12075 
12076         lpfc_stop_hba_timers(phba);
12077         spin_lock_irq(&phba->port_list_lock);
12078         list_del_init(&vport->listentry);
12079         spin_unlock_irq(&phba->port_list_lock);
12080 
12081         lpfc_debugfs_terminate(vport);
12082 
12083         
12084         if (phba->cfg_sriov_nr_virtfn)
12085                 pci_disable_sriov(pdev);
12086 
12087         
12088         lpfc_sli_disable_intr(phba);
12089 
12090         scsi_host_put(shost);
12091 
12092         
12093 
12094 
12095 
12096         lpfc_scsi_free(phba);
12097         lpfc_free_iocb_list(phba);
12098 
12099         lpfc_mem_free_all(phba);
12100 
12101         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
12102                           phba->hbqslimp.virt, phba->hbqslimp.phys);
12103 
12104         
12105         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
12106                           phba->slim2p.virt, phba->slim2p.phys);
12107 
12108         
12109         iounmap(phba->ctrl_regs_memmap_p);
12110         iounmap(phba->slim_memmap_p);
12111 
12112         lpfc_hba_free(phba);
12113 
12114         pci_release_mem_regions(pdev);
12115         pci_disable_device(pdev);
12116 }
12117 
12118 
12119 
12120 
12121 
12122 
12123 
12124 
12125 
12126 
12127 
12128 
12129 
12130 
12131 
12132 
12133 
12134 
12135 
12136 
12137 
12138 
12139 static int
12140 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
12141 {
12142         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12143         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12144 
12145         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12146                         "0473 PCI device Power Management suspend.\n");
12147 
12148         
12149         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12150         lpfc_offline(phba);
12151         kthread_stop(phba->worker_thread);
12152 
12153         
12154         lpfc_sli_disable_intr(phba);
12155 
12156         
12157         pci_save_state(pdev);
12158         pci_set_power_state(pdev, PCI_D3hot);
12159 
12160         return 0;
12161 }
12162 
12163 
12164 
12165 
12166 
12167 
12168 
12169 
12170 
12171 
12172 
12173 
12174 
12175 
12176 
12177 
12178 
12179 
12180 
12181 
12182 static int
12183 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
12184 {
12185         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12186         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12187         uint32_t intr_mode;
12188         int error;
12189 
12190         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12191                         "0452 PCI device Power Management resume.\n");
12192 
12193         
12194         pci_set_power_state(pdev, PCI_D0);
12195         pci_restore_state(pdev);
12196 
12197         
12198 
12199 
12200 
12201         pci_save_state(pdev);
12202 
12203         if (pdev->is_busmaster)
12204                 pci_set_master(pdev);
12205 
12206         
12207         phba->worker_thread = kthread_run(lpfc_do_work, phba,
12208                                         "lpfc_worker_%d", phba->brd_no);
12209         if (IS_ERR(phba->worker_thread)) {
12210                 error = PTR_ERR(phba->worker_thread);
12211                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12212                                 "0434 PM resume failed to start worker "
12213                                 "thread: error=x%x.\n", error);
12214                 return error;
12215         }
12216 
12217         
12218         intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12219         if (intr_mode == LPFC_INTR_ERROR) {
12220                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12221                                 "0430 PM resume Failed to enable interrupt\n");
12222                 return -EIO;
12223         } else
12224                 phba->intr_mode = intr_mode;
12225 
12226         
12227         lpfc_sli_brdrestart(phba);
12228         lpfc_online(phba);
12229 
12230         
12231         lpfc_log_intr_mode(phba, phba->intr_mode);
12232 
12233         return 0;
12234 }
12235 
12236 
12237 
12238 
12239 
12240 
12241 
12242 
12243 static void
12244 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
12245 {
12246         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12247                         "2723 PCI channel I/O abort preparing for recovery\n");
12248 
12249         
12250 
12251 
12252 
12253         lpfc_sli_abort_fcp_rings(phba);
12254 }
12255 
12256 
12257 
12258 
12259 
12260 
12261 
12262 
12263 
12264 static void
12265 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12266 {
12267         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12268                         "2710 PCI channel disable preparing for reset\n");
12269 
12270         
12271         lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
12272 
12273         
12274         lpfc_scsi_dev_block(phba);
12275 
12276         
12277         lpfc_sli_flush_io_rings(phba);
12278 
12279         
12280         lpfc_stop_hba_timers(phba);
12281 
12282         
12283         lpfc_sli_disable_intr(phba);
12284         pci_disable_device(phba->pcidev);
12285 }
12286 
12287 
12288 
12289 
12290 
12291 
12292 
12293 
12294 
12295 static void
12296 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12297 {
12298         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12299                         "2711 PCI channel permanent disable for failure\n");
12300         
12301         lpfc_scsi_dev_block(phba);
12302 
12303         
12304         lpfc_stop_hba_timers(phba);
12305 
12306         
12307         lpfc_sli_flush_io_rings(phba);
12308 }
12309 
12310 
12311 
12312 
12313 
12314 
12315 
12316 
12317 
12318 
12319 
12320 
12321 
12322 
12323 
12324 
12325 
12326 
12327 
12328 static pci_ers_result_t
12329 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
12330 {
12331         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12332         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12333 
12334         switch (state) {
12335         case pci_channel_io_normal:
12336                 
12337                 lpfc_sli_prep_dev_for_recover(phba);
12338                 return PCI_ERS_RESULT_CAN_RECOVER;
12339         case pci_channel_io_frozen:
12340                 
12341                 lpfc_sli_prep_dev_for_reset(phba);
12342                 return PCI_ERS_RESULT_NEED_RESET;
12343         case pci_channel_io_perm_failure:
12344                 
12345                 lpfc_sli_prep_dev_for_perm_failure(phba);
12346                 return PCI_ERS_RESULT_DISCONNECT;
12347         default:
12348                 
12349                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12350                                 "0472 Unknown PCI error state: x%x\n", state);
12351                 lpfc_sli_prep_dev_for_reset(phba);
12352                 return PCI_ERS_RESULT_NEED_RESET;
12353         }
12354 }
12355 
12356 
12357 
12358 
12359 
12360 
12361 
12362 
12363 
12364 
12365 
12366 
12367 
12368 
12369 
12370 
12371 
12372 
12373 
12374 static pci_ers_result_t
12375 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
12376 {
12377         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12378         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12379         struct lpfc_sli *psli = &phba->sli;
12380         uint32_t intr_mode;
12381 
12382         dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
12383         if (pci_enable_device_mem(pdev)) {
12384                 printk(KERN_ERR "lpfc: Cannot re-enable "
12385                         "PCI device after reset.\n");
12386                 return PCI_ERS_RESULT_DISCONNECT;
12387         }
12388 
12389         pci_restore_state(pdev);
12390 
12391         
12392 
12393 
12394 
12395         pci_save_state(pdev);
12396 
12397         if (pdev->is_busmaster)
12398                 pci_set_master(pdev);
12399 
12400         spin_lock_irq(&phba->hbalock);
12401         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
12402         spin_unlock_irq(&phba->hbalock);
12403 
12404         
12405         intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12406         if (intr_mode == LPFC_INTR_ERROR) {
12407                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12408                                 "0427 Cannot re-enable interrupt after "
12409                                 "slot reset.\n");
12410                 return PCI_ERS_RESULT_DISCONNECT;
12411         } else
12412                 phba->intr_mode = intr_mode;
12413 
12414         
12415         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12416         lpfc_offline(phba);
12417         lpfc_sli_brdrestart(phba);
12418 
12419         
12420         lpfc_log_intr_mode(phba, phba->intr_mode);
12421 
12422         return PCI_ERS_RESULT_RECOVERED;
12423 }
12424 
12425 
12426 
12427 
12428 
12429 
12430 
12431 
12432 
12433 
12434 
12435 static void
12436 lpfc_io_resume_s3(struct pci_dev *pdev)
12437 {
12438         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12439         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12440 
12441         
12442         lpfc_online(phba);
12443 }
12444 
12445 
12446 
12447 
12448 
12449 
12450 
12451 int
12452 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
12453 {
12454         int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
12455 
12456         if (phba->sli_rev == LPFC_SLI_REV4) {
12457                 if (max_xri <= 100)
12458                         return 10;
12459                 else if (max_xri <= 256)
12460                         return 25;
12461                 else if (max_xri <= 512)
12462                         return 50;
12463                 else if (max_xri <= 1024)
12464                         return 100;
12465                 else if (max_xri <= 1536)
12466                         return 150;
12467                 else if (max_xri <= 2048)
12468                         return 200;
12469                 else
12470                         return 250;
12471         } else
12472                 return 0;
12473 }
12474 
12475 
12476 
12477 
12478 
12479 
12480 
12481 int
12482 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
12483 {
12484         int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
12485 
12486         if (phba->nvmet_support)
12487                 max_xri += LPFC_NVMET_BUF_POST;
12488         return max_xri;
12489 }
12490 
12491 
12492 static void
12493 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
12494         uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
12495         const struct firmware *fw)
12496 {
12497         if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
12498             (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
12499              magic_number != MAGIC_NUMER_G6) ||
12500             (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
12501              magic_number != MAGIC_NUMER_G7))
12502                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12503                         "3030 This firmware version is not supported on "
12504                         "this HBA model. Device:%x Magic:%x Type:%x "
12505                         "ID:%x Size %d %zd\n",
12506                         phba->pcidev->device, magic_number, ftype, fid,
12507                         fsize, fw->size);
12508         else
12509                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12510                         "3022 FW Download failed. Device:%x Magic:%x Type:%x "
12511                         "ID:%x Size %d %zd\n",
12512                         phba->pcidev->device, magic_number, ftype, fid,
12513                         fsize, fw->size);
12514 }
12515 
12516 
12517 
12518 
12519 
12520 
12521 
12522 
12523 static void
12524 lpfc_write_firmware(const struct firmware *fw, void *context)
12525 {
12526         struct lpfc_hba *phba = (struct lpfc_hba *)context;
12527         char fwrev[FW_REV_STR_SIZE];
12528         struct lpfc_grp_hdr *image;
12529         struct list_head dma_buffer_list;
12530         int i, rc = 0;
12531         struct lpfc_dmabuf *dmabuf, *next;
12532         uint32_t offset = 0, temp_offset = 0;
12533         uint32_t magic_number, ftype, fid, fsize;
12534 
12535         
12536         if (!fw) {
12537                 rc = -ENXIO;
12538                 goto out;
12539         }
12540         image = (struct lpfc_grp_hdr *)fw->data;
12541 
12542         magic_number = be32_to_cpu(image->magic_number);
12543         ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
12544         fid = bf_get_be32(lpfc_grp_hdr_id, image);
12545         fsize = be32_to_cpu(image->size);
12546 
12547         INIT_LIST_HEAD(&dma_buffer_list);
12548         lpfc_decode_firmware_rev(phba, fwrev, 1);
12549         if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
12550                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12551                                 "3023 Updating Firmware, Current Version:%s "
12552                                 "New Version:%s\n",
12553                                 fwrev, image->revision);
12554                 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
12555                         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
12556                                          GFP_KERNEL);
12557                         if (!dmabuf) {
12558                                 rc = -ENOMEM;
12559                                 goto release_out;
12560                         }
12561                         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
12562                                                           SLI4_PAGE_SIZE,
12563                                                           &dmabuf->phys,
12564                                                           GFP_KERNEL);
12565                         if (!dmabuf->virt) {
12566                                 kfree(dmabuf);
12567                                 rc = -ENOMEM;
12568                                 goto release_out;
12569                         }
12570                         list_add_tail(&dmabuf->list, &dma_buffer_list);
12571                 }
12572                 while (offset < fw->size) {
12573                         temp_offset = offset;
12574                         list_for_each_entry(dmabuf, &dma_buffer_list, list) {
12575                                 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
12576                                         memcpy(dmabuf->virt,
12577                                                fw->data + temp_offset,
12578                                                fw->size - temp_offset);
12579                                         temp_offset = fw->size;
12580                                         break;
12581                                 }
12582                                 memcpy(dmabuf->virt, fw->data + temp_offset,
12583                                        SLI4_PAGE_SIZE);
12584                                 temp_offset += SLI4_PAGE_SIZE;
12585                         }
12586                         rc = lpfc_wr_object(phba, &dma_buffer_list,
12587                                     (fw->size - offset), &offset);
12588                         if (rc) {
12589                                 lpfc_log_write_firmware_error(phba, offset,
12590                                         magic_number, ftype, fid, fsize, fw);
12591                                 goto release_out;
12592                         }
12593                 }
12594                 rc = offset;
12595         } else
12596                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12597                                 "3029 Skipped Firmware update, Current "
12598                                 "Version:%s New Version:%s\n",
12599                                 fwrev, image->revision);
12600 
12601 release_out:
12602         list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
12603                 list_del(&dmabuf->list);
12604                 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
12605                                   dmabuf->virt, dmabuf->phys);
12606                 kfree(dmabuf);
12607         }
12608         release_firmware(fw);
12609 out:
12610         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12611                         "3024 Firmware update done: %d.\n", rc);
12612         return;
12613 }
12614 
12615 
12616 
12617 
12618 
12619 
12620 
12621 
12622 int
12623 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
12624 {
12625         uint8_t file_name[ELX_MODEL_NAME_SIZE];
12626         int ret;
12627         const struct firmware *fw;
12628 
12629         
12630         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
12631             LPFC_SLI_INTF_IF_TYPE_2)
12632                 return -EPERM;
12633 
12634         snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
12635 
12636         if (fw_upgrade == INT_FW_UPGRADE) {
12637                 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
12638                                         file_name, &phba->pcidev->dev,
12639                                         GFP_KERNEL, (void *)phba,
12640                                         lpfc_write_firmware);
12641         } else if (fw_upgrade == RUN_FW_UPGRADE) {
12642                 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
12643                 if (!ret)
12644                         lpfc_write_firmware(fw, (void *)phba);
12645         } else {
12646                 ret = -EINVAL;
12647         }
12648 
12649         return ret;
12650 }
12651 
12652 
12653 
12654 
12655 
12656 
12657 
12658 
12659 
12660 
12661 
12662 
12663 
12664 
12665 
12666 
12667 
12668 
12669 
12670 static int
12671 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
12672 {
12673         struct lpfc_hba   *phba;
12674         struct lpfc_vport *vport = NULL;
12675         struct Scsi_Host  *shost = NULL;
12676         int error;
12677         uint32_t cfg_mode, intr_mode;
12678 
12679         
12680         phba = lpfc_hba_alloc(pdev);
12681         if (!phba)
12682                 return -ENOMEM;
12683 
12684         
12685         error = lpfc_enable_pci_dev(phba);
12686         if (error)
12687                 goto out_free_phba;
12688 
12689         
12690         error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
12691         if (error)
12692                 goto out_disable_pci_dev;
12693 
12694         
12695         error = lpfc_sli4_pci_mem_setup(phba);
12696         if (error) {
12697                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12698                                 "1410 Failed to set up pci memory space.\n");
12699                 goto out_disable_pci_dev;
12700         }
12701 
12702         
12703         error = lpfc_sli4_driver_resource_setup(phba);
12704         if (error) {
12705                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12706                                 "1412 Failed to set up driver resource.\n");
12707                 goto out_unset_pci_mem_s4;
12708         }
12709 
12710         INIT_LIST_HEAD(&phba->active_rrq_list);
12711         INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
12712 
12713         
12714         error = lpfc_setup_driver_resource_phase2(phba);
12715         if (error) {
12716                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12717                                 "1414 Failed to set up driver resource.\n");
12718                 goto out_unset_driver_resource_s4;
12719         }
12720 
12721         
12722         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
12723 
12724         
12725         cfg_mode = phba->cfg_use_msi;
12726 
12727         
12728         phba->pport = NULL;
12729         lpfc_stop_port(phba);
12730 
12731         
12732         intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
12733         if (intr_mode == LPFC_INTR_ERROR) {
12734                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12735                                 "0426 Failed to enable interrupt.\n");
12736                 error = -ENODEV;
12737                 goto out_unset_driver_resource;
12738         }
12739         
12740         if (phba->intr_type != MSIX) {
12741                 phba->cfg_irq_chann = 1;
12742                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12743                         if (phba->nvmet_support)
12744                                 phba->cfg_nvmet_mrq = 1;
12745                 }
12746         }
12747         lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
12748 
12749         
12750         error = lpfc_create_shost(phba);
12751         if (error) {
12752                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12753                                 "1415 Failed to create scsi host.\n");
12754                 goto out_disable_intr;
12755         }
12756         vport = phba->pport;
12757         shost = lpfc_shost_from_vport(vport); 
12758 
12759         
12760         error = lpfc_alloc_sysfs_attr(vport);
12761         if (error) {
12762                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12763                                 "1416 Failed to allocate sysfs attr\n");
12764                 goto out_destroy_shost;
12765         }
12766 
12767         
12768         if (lpfc_sli4_hba_setup(phba)) {
12769                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12770                                 "1421 Failed to set up hba\n");
12771                 error = -ENODEV;
12772                 goto out_free_sysfs_attr;
12773         }
12774 
12775         
12776         phba->intr_mode = intr_mode;
12777         lpfc_log_intr_mode(phba, intr_mode);
12778 
12779         
12780         lpfc_post_init_setup(phba);
12781 
12782         
12783 
12784 
12785         if (phba->nvmet_support == 0) {
12786                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12787                         
12788 
12789 
12790 
12791 
12792                         error = lpfc_nvme_create_localport(vport);
12793                         if (error) {
12794                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12795                                                 "6004 NVME registration "
12796                                                 "failed, error x%x\n",
12797                                                 error);
12798                         }
12799                 }
12800         }
12801 
12802         
12803         if (phba->cfg_request_firmware_upgrade)
12804                 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
12805 
12806         
12807         lpfc_create_static_vport(phba);
12808 
12809         
12810         lpfc_sli4_ras_setup(phba);
12811 
12812         INIT_LIST_HEAD(&phba->poll_list);
12813         cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
12814 
12815         return 0;
12816 
12817 out_free_sysfs_attr:
12818         lpfc_free_sysfs_attr(vport);
12819 out_destroy_shost:
12820         lpfc_destroy_shost(phba);
12821 out_disable_intr:
12822         lpfc_sli4_disable_intr(phba);
12823 out_unset_driver_resource:
12824         lpfc_unset_driver_resource_phase2(phba);
12825 out_unset_driver_resource_s4:
12826         lpfc_sli4_driver_resource_unset(phba);
12827 out_unset_pci_mem_s4:
12828         lpfc_sli4_pci_mem_unset(phba);
12829 out_disable_pci_dev:
12830         lpfc_disable_pci_dev(phba);
12831         if (shost)
12832                 scsi_host_put(shost);
12833 out_free_phba:
12834         lpfc_hba_free(phba);
12835         return error;
12836 }
12837 
12838 
12839 
12840 
12841 
12842 
12843 
12844 
12845 
12846 
12847 static void
12848 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
12849 {
12850         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12851         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
12852         struct lpfc_vport **vports;
12853         struct lpfc_hba *phba = vport->phba;
12854         int i;
12855 
12856         
12857         spin_lock_irq(&phba->hbalock);
12858         vport->load_flag |= FC_UNLOADING;
12859         spin_unlock_irq(&phba->hbalock);
12860 
12861         
12862         lpfc_free_sysfs_attr(vport);
12863 
12864         
12865         vports = lpfc_create_vport_work_array(phba);
12866         if (vports != NULL)
12867                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12868                         if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12869                                 continue;
12870                         fc_vport_terminate(vports[i]->fc_vport);
12871                 }
12872         lpfc_destroy_vport_work_array(phba, vports);
12873 
12874         
12875         fc_remove_host(shost);
12876         scsi_remove_host(shost);
12877 
12878         
12879 
12880 
12881         lpfc_cleanup(vport);
12882         lpfc_nvmet_destroy_targetport(phba);
12883         lpfc_nvme_destroy_localport(vport);
12884 
12885         
12886         if (phba->cfg_xri_rebalancing)
12887                 lpfc_destroy_multixri_pools(phba);
12888 
12889         
12890 
12891 
12892 
12893 
12894         lpfc_debugfs_terminate(vport);
12895 
12896         lpfc_stop_hba_timers(phba);
12897         spin_lock_irq(&phba->port_list_lock);
12898         list_del_init(&vport->listentry);
12899         spin_unlock_irq(&phba->port_list_lock);
12900 
12901         
12902 
12903 
12904         lpfc_io_free(phba);
12905         lpfc_free_iocb_list(phba);
12906         lpfc_sli4_hba_unset(phba);
12907 
12908         lpfc_unset_driver_resource_phase2(phba);
12909         lpfc_sli4_driver_resource_unset(phba);
12910 
12911         
12912         lpfc_sli4_pci_mem_unset(phba);
12913 
12914         
12915         scsi_host_put(shost);
12916         lpfc_disable_pci_dev(phba);
12917 
12918         
12919         lpfc_hba_free(phba);
12920 
12921         return;
12922 }
12923 
12924 
12925 
12926 
12927 
12928 
12929 
12930 
12931 
12932 
12933 
12934 
12935 
12936 
12937 
12938 
12939 
12940 
12941 
12942 
12943 
12944 
12945 static int
12946 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
12947 {
12948         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12949         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12950 
12951         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12952                         "2843 PCI device Power Management suspend.\n");
12953 
12954         
12955         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12956         lpfc_offline(phba);
12957         kthread_stop(phba->worker_thread);
12958 
12959         
12960         lpfc_sli4_disable_intr(phba);
12961         lpfc_sli4_queue_destroy(phba);
12962 
12963         
12964         pci_save_state(pdev);
12965         pci_set_power_state(pdev, PCI_D3hot);
12966 
12967         return 0;
12968 }
12969 
12970 
12971 
12972 
12973 
12974 
12975 
12976 
12977 
12978 
12979 
12980 
12981 
12982 
12983 
12984 
12985 
12986 
12987 
12988 
12989 static int
12990 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
12991 {
12992         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12993         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12994         uint32_t intr_mode;
12995         int error;
12996 
12997         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12998                         "0292 PCI device Power Management resume.\n");
12999 
13000         
13001         pci_set_power_state(pdev, PCI_D0);
13002         pci_restore_state(pdev);
13003 
13004         
13005 
13006 
13007 
13008         pci_save_state(pdev);
13009 
13010         if (pdev->is_busmaster)
13011                 pci_set_master(pdev);
13012 
13013          
13014         phba->worker_thread = kthread_run(lpfc_do_work, phba,
13015                                         "lpfc_worker_%d", phba->brd_no);
13016         if (IS_ERR(phba->worker_thread)) {
13017                 error = PTR_ERR(phba->worker_thread);
13018                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13019                                 "0293 PM resume failed to start worker "
13020                                 "thread: error=x%x.\n", error);
13021                 return error;
13022         }
13023 
13024         
13025         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13026         if (intr_mode == LPFC_INTR_ERROR) {
13027                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13028                                 "0294 PM resume Failed to enable interrupt\n");
13029                 return -EIO;
13030         } else
13031                 phba->intr_mode = intr_mode;
13032 
13033         
13034         lpfc_sli_brdrestart(phba);
13035         lpfc_online(phba);
13036 
13037         
13038         lpfc_log_intr_mode(phba, phba->intr_mode);
13039 
13040         return 0;
13041 }
13042 
13043 
13044 
13045 
13046 
13047 
13048 
13049 
13050 static void
13051 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
13052 {
13053         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13054                         "2828 PCI channel I/O abort preparing for recovery\n");
13055         
13056 
13057 
13058 
13059         lpfc_sli_abort_fcp_rings(phba);
13060 }
13061 
13062 
13063 
13064 
13065 
13066 
13067 
13068 
13069 
13070 static void
13071 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
13072 {
13073         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13074                         "2826 PCI channel disable preparing for reset\n");
13075 
13076         
13077         lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
13078 
13079         
13080         lpfc_scsi_dev_block(phba);
13081 
13082         
13083         lpfc_sli_flush_io_rings(phba);
13084 
13085         
13086         lpfc_stop_hba_timers(phba);
13087 
13088         
13089         lpfc_sli4_disable_intr(phba);
13090         lpfc_sli4_queue_destroy(phba);
13091         pci_disable_device(phba->pcidev);
13092 }
13093 
13094 
13095 
13096 
13097 
13098 
13099 
13100 
13101 
13102 static void
13103 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
13104 {
13105         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13106                         "2827 PCI channel permanent disable for failure\n");
13107 
13108         
13109         lpfc_scsi_dev_block(phba);
13110 
13111         
13112         lpfc_stop_hba_timers(phba);
13113 
13114         
13115         lpfc_sli_flush_io_rings(phba);
13116 }
13117 
13118 
13119 
13120 
13121 
13122 
13123 
13124 
13125 
13126 
13127 
13128 
13129 
13130 
13131 
13132 
13133 
13134 static pci_ers_result_t
13135 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
13136 {
13137         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13138         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13139 
13140         switch (state) {
13141         case pci_channel_io_normal:
13142                 
13143                 lpfc_sli4_prep_dev_for_recover(phba);
13144                 return PCI_ERS_RESULT_CAN_RECOVER;
13145         case pci_channel_io_frozen:
13146                 
13147                 lpfc_sli4_prep_dev_for_reset(phba);
13148                 return PCI_ERS_RESULT_NEED_RESET;
13149         case pci_channel_io_perm_failure:
13150                 
13151                 lpfc_sli4_prep_dev_for_perm_failure(phba);
13152                 return PCI_ERS_RESULT_DISCONNECT;
13153         default:
13154                 
13155                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13156                                 "2825 Unknown PCI error state: x%x\n", state);
13157                 lpfc_sli4_prep_dev_for_reset(phba);
13158                 return PCI_ERS_RESULT_NEED_RESET;
13159         }
13160 }
13161 
13162 
13163 
13164 
13165 
13166 
13167 
13168 
13169 
13170 
13171 
13172 
13173 
13174 
13175 
13176 
13177 
13178 
13179 
13180 static pci_ers_result_t
13181 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
13182 {
13183         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13184         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13185         struct lpfc_sli *psli = &phba->sli;
13186         uint32_t intr_mode;
13187 
13188         dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
13189         if (pci_enable_device_mem(pdev)) {
13190                 printk(KERN_ERR "lpfc: Cannot re-enable "
13191                         "PCI device after reset.\n");
13192                 return PCI_ERS_RESULT_DISCONNECT;
13193         }
13194 
13195         pci_restore_state(pdev);
13196 
13197         
13198 
13199 
13200 
13201         pci_save_state(pdev);
13202 
13203         if (pdev->is_busmaster)
13204                 pci_set_master(pdev);
13205 
13206         spin_lock_irq(&phba->hbalock);
13207         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13208         spin_unlock_irq(&phba->hbalock);
13209 
13210         
13211         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13212         if (intr_mode == LPFC_INTR_ERROR) {
13213                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13214                                 "2824 Cannot re-enable interrupt after "
13215                                 "slot reset.\n");
13216                 return PCI_ERS_RESULT_DISCONNECT;
13217         } else
13218                 phba->intr_mode = intr_mode;
13219 
13220         
13221         lpfc_log_intr_mode(phba, phba->intr_mode);
13222 
13223         return PCI_ERS_RESULT_RECOVERED;
13224 }
13225 
13226 
13227 
13228 
13229 
13230 
13231 
13232 
13233 
13234 
13235 
13236 static void
13237 lpfc_io_resume_s4(struct pci_dev *pdev)
13238 {
13239         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13240         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13241 
13242         
13243 
13244 
13245 
13246 
13247 
13248         if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
13249                 
13250                 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13251                 lpfc_offline(phba);
13252                 lpfc_sli_brdrestart(phba);
13253                 
13254                 lpfc_online(phba);
13255         }
13256 }
13257 
13258 
13259 
13260 
13261 
13262 
13263 
13264 
13265 
13266 
13267 
13268 
13269 
13270 
13271 
13272 
13273 
13274 
13275 
13276 static int
13277 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
13278 {
13279         int rc;
13280         struct lpfc_sli_intf intf;
13281 
13282         if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
13283                 return -ENODEV;
13284 
13285         if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
13286             (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
13287                 rc = lpfc_pci_probe_one_s4(pdev, pid);
13288         else
13289                 rc = lpfc_pci_probe_one_s3(pdev, pid);
13290 
13291         return rc;
13292 }
13293 
13294 
13295 
13296 
13297 
13298 
13299 
13300 
13301 
13302 
13303 
13304 static void
13305 lpfc_pci_remove_one(struct pci_dev *pdev)
13306 {
13307         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13308         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13309 
13310         switch (phba->pci_dev_grp) {
13311         case LPFC_PCI_DEV_LP:
13312                 lpfc_pci_remove_one_s3(pdev);
13313                 break;
13314         case LPFC_PCI_DEV_OC:
13315                 lpfc_pci_remove_one_s4(pdev);
13316                 break;
13317         default:
13318                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13319                                 "1424 Invalid PCI device group: 0x%x\n",
13320                                 phba->pci_dev_grp);
13321                 break;
13322         }
13323         return;
13324 }
13325 
13326 
13327 
13328 
13329 
13330 
13331 
13332 
13333 
13334 
13335 
13336 
13337 
13338 
13339 
13340 static int
13341 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
13342 {
13343         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13344         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13345         int rc = -ENODEV;
13346 
13347         switch (phba->pci_dev_grp) {
13348         case LPFC_PCI_DEV_LP:
13349                 rc = lpfc_pci_suspend_one_s3(pdev, msg);
13350                 break;
13351         case LPFC_PCI_DEV_OC:
13352                 rc = lpfc_pci_suspend_one_s4(pdev, msg);
13353                 break;
13354         default:
13355                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13356                                 "1425 Invalid PCI device group: 0x%x\n",
13357                                 phba->pci_dev_grp);
13358                 break;
13359         }
13360         return rc;
13361 }
13362 
13363 
13364 
13365 
13366 
13367 
13368 
13369 
13370 
13371 
13372 
13373 
13374 
13375 
13376 static int
13377 lpfc_pci_resume_one(struct pci_dev *pdev)
13378 {
13379         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13380         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13381         int rc = -ENODEV;
13382 
13383         switch (phba->pci_dev_grp) {
13384         case LPFC_PCI_DEV_LP:
13385                 rc = lpfc_pci_resume_one_s3(pdev);
13386                 break;
13387         case LPFC_PCI_DEV_OC:
13388                 rc = lpfc_pci_resume_one_s4(pdev);
13389                 break;
13390         default:
13391                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13392                                 "1426 Invalid PCI device group: 0x%x\n",
13393                                 phba->pci_dev_grp);
13394                 break;
13395         }
13396         return rc;
13397 }
13398 
13399 
13400 
13401 
13402 
13403 
13404 
13405 
13406 
13407 
13408 
13409 
13410 
13411 
13412 
13413 
13414 static pci_ers_result_t
13415 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13416 {
13417         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13418         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13419         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13420 
13421         switch (phba->pci_dev_grp) {
13422         case LPFC_PCI_DEV_LP:
13423                 rc = lpfc_io_error_detected_s3(pdev, state);
13424                 break;
13425         case LPFC_PCI_DEV_OC:
13426                 rc = lpfc_io_error_detected_s4(pdev, state);
13427                 break;
13428         default:
13429                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13430                                 "1427 Invalid PCI device group: 0x%x\n",
13431                                 phba->pci_dev_grp);
13432                 break;
13433         }
13434         return rc;
13435 }
13436 
13437 
13438 
13439 
13440 
13441 
13442 
13443 
13444 
13445 
13446 
13447 
13448 
13449 
13450 
13451 static pci_ers_result_t
13452 lpfc_io_slot_reset(struct pci_dev *pdev)
13453 {
13454         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13455         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13456         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13457 
13458         switch (phba->pci_dev_grp) {
13459         case LPFC_PCI_DEV_LP:
13460                 rc = lpfc_io_slot_reset_s3(pdev);
13461                 break;
13462         case LPFC_PCI_DEV_OC:
13463                 rc = lpfc_io_slot_reset_s4(pdev);
13464                 break;
13465         default:
13466                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13467                                 "1428 Invalid PCI device group: 0x%x\n",
13468                                 phba->pci_dev_grp);
13469                 break;
13470         }
13471         return rc;
13472 }
13473 
13474 
13475 
13476 
13477 
13478 
13479 
13480 
13481 
13482 
13483 
13484 static void
13485 lpfc_io_resume(struct pci_dev *pdev)
13486 {
13487         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13488         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13489 
13490         switch (phba->pci_dev_grp) {
13491         case LPFC_PCI_DEV_LP:
13492                 lpfc_io_resume_s3(pdev);
13493                 break;
13494         case LPFC_PCI_DEV_OC:
13495                 lpfc_io_resume_s4(pdev);
13496                 break;
13497         default:
13498                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13499                                 "1429 Invalid PCI device group: 0x%x\n",
13500                                 phba->pci_dev_grp);
13501                 break;
13502         }
13503         return;
13504 }
13505 
13506 
13507 
13508 
13509 
13510 
13511 
13512 
13513 
13514 
13515 
13516 static void
13517 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
13518 {
13519 
13520         if (!phba->cfg_EnableXLane)
13521                 return;
13522 
13523         if (phba->sli4_hba.pc_sli4_params.oas_supported) {
13524                 phba->cfg_fof = 1;
13525         } else {
13526                 phba->cfg_fof = 0;
13527                 if (phba->device_data_mem_pool)
13528                         mempool_destroy(phba->device_data_mem_pool);
13529                 phba->device_data_mem_pool = NULL;
13530         }
13531 
13532         return;
13533 }
13534 
13535 
13536 
13537 
13538 
13539 
13540 
13541 
13542 void
13543 lpfc_sli4_ras_init(struct lpfc_hba *phba)
13544 {
13545         switch (phba->pcidev->device) {
13546         case PCI_DEVICE_ID_LANCER_G6_FC:
13547         case PCI_DEVICE_ID_LANCER_G7_FC:
13548                 phba->ras_fwlog.ras_hwsupport = true;
13549                 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
13550                     phba->cfg_ras_fwlog_buffsize)
13551                         phba->ras_fwlog.ras_enabled = true;
13552                 else
13553                         phba->ras_fwlog.ras_enabled = false;
13554                 break;
13555         default:
13556                 phba->ras_fwlog.ras_hwsupport = false;
13557         }
13558 }
13559 
13560 
13561 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
13562 
13563 static const struct pci_error_handlers lpfc_err_handler = {
13564         .error_detected = lpfc_io_error_detected,
13565         .slot_reset = lpfc_io_slot_reset,
13566         .resume = lpfc_io_resume,
13567 };
13568 
13569 static struct pci_driver lpfc_driver = {
13570         .name           = LPFC_DRIVER_NAME,
13571         .id_table       = lpfc_id_table,
13572         .probe          = lpfc_pci_probe_one,
13573         .remove         = lpfc_pci_remove_one,
13574         .shutdown       = lpfc_pci_remove_one,
13575         .suspend        = lpfc_pci_suspend_one,
13576         .resume         = lpfc_pci_resume_one,
13577         .err_handler    = &lpfc_err_handler,
13578 };
13579 
13580 static const struct file_operations lpfc_mgmt_fop = {
13581         .owner = THIS_MODULE,
13582 };
13583 
13584 static struct miscdevice lpfc_mgmt_dev = {
13585         .minor = MISC_DYNAMIC_MINOR,
13586         .name = "lpfcmgmt",
13587         .fops = &lpfc_mgmt_fop,
13588 };
13589 
13590 
13591 
13592 
13593 
13594 
13595 
13596 
13597 
13598 
13599 
13600 
13601 
13602 static int __init
13603 lpfc_init(void)
13604 {
13605         int error = 0;
13606 
13607         printk(LPFC_MODULE_DESC "\n");
13608         printk(LPFC_COPYRIGHT "\n");
13609 
13610         error = misc_register(&lpfc_mgmt_dev);
13611         if (error)
13612                 printk(KERN_ERR "Could not register lpfcmgmt device, "
13613                         "misc_register returned with status %d", error);
13614 
13615         lpfc_transport_functions.vport_create = lpfc_vport_create;
13616         lpfc_transport_functions.vport_delete = lpfc_vport_delete;
13617         lpfc_transport_template =
13618                                 fc_attach_transport(&lpfc_transport_functions);
13619         if (lpfc_transport_template == NULL)
13620                 return -ENOMEM;
13621         lpfc_vport_transport_template =
13622                 fc_attach_transport(&lpfc_vport_transport_functions);
13623         if (lpfc_vport_transport_template == NULL) {
13624                 fc_release_transport(lpfc_transport_template);
13625                 return -ENOMEM;
13626         }
13627         lpfc_nvme_cmd_template();
13628         lpfc_nvmet_cmd_template();
13629 
13630         
13631         lpfc_present_cpu = num_present_cpus();
13632 
13633         error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
13634                                         "lpfc/sli4:online",
13635                                         lpfc_cpu_online, lpfc_cpu_offline);
13636         if (error < 0)
13637                 goto cpuhp_failure;
13638         lpfc_cpuhp_state = error;
13639 
13640         error = pci_register_driver(&lpfc_driver);
13641         if (error)
13642                 goto unwind;
13643 
13644         return error;
13645 
13646 unwind:
13647         cpuhp_remove_multi_state(lpfc_cpuhp_state);
13648 cpuhp_failure:
13649         fc_release_transport(lpfc_transport_template);
13650         fc_release_transport(lpfc_vport_transport_template);
13651 
13652         return error;
13653 }
13654 
13655 
13656 
13657 
13658 
13659 
13660 
13661 
13662 static void __exit
13663 lpfc_exit(void)
13664 {
13665         misc_deregister(&lpfc_mgmt_dev);
13666         pci_unregister_driver(&lpfc_driver);
13667         cpuhp_remove_multi_state(lpfc_cpuhp_state);
13668         fc_release_transport(lpfc_transport_template);
13669         fc_release_transport(lpfc_vport_transport_template);
13670         idr_destroy(&lpfc_hba_index);
13671 }
13672 
13673 module_init(lpfc_init);
13674 module_exit(lpfc_exit);
13675 MODULE_LICENSE("GPL");
13676 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
13677 MODULE_AUTHOR("Broadcom");
13678 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);