root/drivers/scsi/megaraid/megaraid_sas_fusion.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. megasas_adp_reset_wait_for_ready
  2. megasas_enable_intr_fusion
  3. megasas_disable_intr_fusion
  4. megasas_clear_intr_fusion
  5. megasas_get_cmd_fusion
  6. megasas_return_cmd_fusion
  7. megasas_write_64bit_req_desc
  8. megasas_fire_cmd_fusion
  9. megasas_fusion_update_can_queue
  10. megasas_free_cmds_fusion
  11. megasas_create_sg_sense_fusion
  12. megasas_alloc_cmdlist_fusion
  13. megasas_alloc_request_fusion
  14. megasas_alloc_reply_fusion
  15. megasas_alloc_rdpq_fusion
  16. megasas_free_rdpq_fusion
  17. megasas_free_reply_fusion
  18. megasas_alloc_cmds_fusion
  19. wait_and_poll
  20. megasas_ioc_init_fusion
  21. megasas_sync_pd_seq_num
  22. megasas_get_ld_map_info
  23. megasas_get_map_info
  24. megasas_sync_map_info
  25. megasas_display_intel_branding
  26. megasas_allocate_raid_maps
  27. megasas_configure_queue_sizes
  28. megasas_alloc_ioc_init_frame
  29. megasas_free_ioc_init_cmd
  30. megasas_init_adapter_fusion
  31. megasas_fault_detect_work
  32. megasas_fusion_start_watchdog
  33. megasas_fusion_stop_watchdog
  34. map_cmd_status
  35. megasas_is_prp_possible
  36. megasas_make_prp_nvme
  37. megasas_make_sgl_fusion
  38. megasas_make_sgl
  39. megasas_set_pd_lba
  40. megasas_stream_detect
  41. megasas_set_raidflag_cpu_affinity
  42. megasas_build_ldio_fusion
  43. megasas_build_ld_nonrw_fusion
  44. megasas_build_syspd_fusion
  45. megasas_build_io_fusion
  46. megasas_get_request_descriptor
  47. megasas_prepare_secondRaid1_IO
  48. megasas_build_and_issue_cmd_fusion
  49. megasas_complete_r1_command
  50. complete_cmd_fusion
  51. megasas_enable_irq_poll
  52. megasas_sync_irqs
  53. megasas_irqpoll
  54. megasas_complete_cmd_dpc_fusion
  55. megasas_isr_fusion
  56. build_mpt_mfi_pass_thru
  57. build_mpt_cmd
  58. megasas_issue_dcmd_fusion
  59. megasas_release_fusion
  60. megasas_read_fw_status_reg_fusion
  61. megasas_alloc_host_crash_buffer
  62. megasas_free_host_crash_buffer
  63. megasas_adp_reset_fusion
  64. megasas_check_reset_fusion
  65. megasas_trigger_snap_dump
  66. megasas_wait_for_outstanding_fusion
  67. megasas_reset_reply_desc
  68. megasas_refire_mgmt_cmd
  69. megasas_track_scsiio
  70. megasas_tm_response_code
  71. megasas_issue_tm
  72. megasas_fusion_smid_lookup
  73. megasas_get_tm_devhandle
  74. megasas_task_abort_fusion
  75. megasas_reset_target_fusion
  76. megasas_get_peer_instance
  77. megasas_check_mpio_paths
  78. megasas_reset_fusion
  79. megasas_fusion_crash_dump
  80. megasas_fusion_ocr_wq
  81. megasas_alloc_fusion_context
  82. megasas_free_fusion_context

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  *  Linux MegaRAID driver for SAS based RAID controllers
   4  *
   5  *  Copyright (c) 2009-2013  LSI Corporation
   6  *  Copyright (c) 2013-2016  Avago Technologies
   7  *  Copyright (c) 2016-2018  Broadcom Inc.
   8  *
   9  *  FILE: megaraid_sas_fusion.c
  10  *
  11  *  Authors: Broadcom Inc.
  12  *           Sumant Patro
  13  *           Adam Radford
  14  *           Kashyap Desai <kashyap.desai@broadcom.com>
  15  *           Sumit Saxena <sumit.saxena@broadcom.com>
  16  *
  17  *  Send feedback to: megaraidlinux.pdl@broadcom.com
  18  */
  19 
  20 #include <linux/kernel.h>
  21 #include <linux/types.h>
  22 #include <linux/pci.h>
  23 #include <linux/list.h>
  24 #include <linux/moduleparam.h>
  25 #include <linux/module.h>
  26 #include <linux/spinlock.h>
  27 #include <linux/interrupt.h>
  28 #include <linux/delay.h>
  29 #include <linux/uio.h>
  30 #include <linux/uaccess.h>
  31 #include <linux/fs.h>
  32 #include <linux/compat.h>
  33 #include <linux/blkdev.h>
  34 #include <linux/mutex.h>
  35 #include <linux/poll.h>
  36 #include <linux/vmalloc.h>
  37 #include <linux/workqueue.h>
  38 #include <linux/irq_poll.h>
  39 
  40 #include <scsi/scsi.h>
  41 #include <scsi/scsi_cmnd.h>
  42 #include <scsi/scsi_device.h>
  43 #include <scsi/scsi_host.h>
  44 #include <scsi/scsi_dbg.h>
  45 #include <linux/dmi.h>
  46 
  47 #include "megaraid_sas_fusion.h"
  48 #include "megaraid_sas.h"
  49 
  50 
  51 extern void megasas_free_cmds(struct megasas_instance *instance);
  52 extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
  53                                            *instance);
  54 extern void
  55 megasas_complete_cmd(struct megasas_instance *instance,
  56                      struct megasas_cmd *cmd, u8 alt_status);
  57 int
  58 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
  59               int seconds);
  60 
  61 void
  62 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
  63 int megasas_alloc_cmds(struct megasas_instance *instance);
  64 int
  65 megasas_clear_intr_fusion(struct megasas_instance *instance);
  66 int
  67 megasas_issue_polled(struct megasas_instance *instance,
  68                      struct megasas_cmd *cmd);
  69 void
  70 megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
  71 
  72 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
  73 void megaraid_sas_kill_hba(struct megasas_instance *instance);
  74 
  75 extern u32 megasas_dbg_lvl;
  76 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
  77                                   int initial);
  78 void megasas_start_timer(struct megasas_instance *instance);
  79 extern struct megasas_mgmt_info megasas_mgmt_info;
  80 extern unsigned int resetwaittime;
  81 extern unsigned int dual_qdepth_disable;
  82 static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
  83 static void megasas_free_reply_fusion(struct megasas_instance *instance);
  84 static inline
  85 void megasas_configure_queue_sizes(struct megasas_instance *instance);
  86 static void megasas_fusion_crash_dump(struct megasas_instance *instance);
  87 extern u32 megasas_readl(struct megasas_instance *instance,
  88                          const volatile void __iomem *addr);
  89 
  90 /**
  91  * megasas_adp_reset_wait_for_ready -   initiate chip reset and wait for
  92  *                                      controller to come to ready state
  93  * @instance -                          adapter's soft state
  94  * @do_adp_reset -                      If true, do a chip reset
  95  * @ocr_context -                       If called from OCR context this will
  96  *                                      be set to 1, else 0
  97  *
  98  * This function initates a chip reset followed by a wait for controller to
  99  * transition to ready state.
 100  * During this, driver will block all access to PCI config space from userspace
 101  */
 102 int
 103 megasas_adp_reset_wait_for_ready(struct megasas_instance *instance,
 104                                  bool do_adp_reset,
 105                                  int ocr_context)
 106 {
 107         int ret = FAILED;
 108 
 109         /*
 110          * Block access to PCI config space from userspace
 111          * when diag reset is initiated from driver
 112          */
 113         if (megasas_dbg_lvl & OCR_DEBUG)
 114                 dev_info(&instance->pdev->dev,
 115                          "Block access to PCI config space %s %d\n",
 116                          __func__, __LINE__);
 117 
 118         pci_cfg_access_lock(instance->pdev);
 119 
 120         if (do_adp_reset) {
 121                 if (instance->instancet->adp_reset
 122                         (instance, instance->reg_set))
 123                         goto out;
 124         }
 125 
 126         /* Wait for FW to become ready */
 127         if (megasas_transition_to_ready(instance, ocr_context)) {
 128                 dev_warn(&instance->pdev->dev,
 129                          "Failed to transition controller to ready for scsi%d.\n",
 130                          instance->host->host_no);
 131                 goto out;
 132         }
 133 
 134         ret = SUCCESS;
 135 out:
 136         if (megasas_dbg_lvl & OCR_DEBUG)
 137                 dev_info(&instance->pdev->dev,
 138                          "Unlock access to PCI config space %s %d\n",
 139                          __func__, __LINE__);
 140 
 141         pci_cfg_access_unlock(instance->pdev);
 142 
 143         return ret;
 144 }
 145 
 146 /**
 147  * megasas_check_same_4gb_region -      check if allocation
 148  *                                      crosses same 4GB boundary or not
 149  * @instance -                          adapter's soft instance
 150  * start_addr -                 start address of DMA allocation
 151  * size -                               size of allocation in bytes
 152  * return -                             true : allocation does not cross same
 153  *                                      4GB boundary
 154  *                                      false: allocation crosses same
 155  *                                      4GB boundary
 156  */
 157 static inline bool megasas_check_same_4gb_region
 158         (struct megasas_instance *instance, dma_addr_t start_addr, size_t size)
 159 {
 160         dma_addr_t end_addr;
 161 
 162         end_addr = start_addr + size;
 163 
 164         if (upper_32_bits(start_addr) != upper_32_bits(end_addr)) {
 165                 dev_err(&instance->pdev->dev,
 166                         "Failed to get same 4GB boundary: start_addr: 0x%llx end_addr: 0x%llx\n",
 167                         (unsigned long long)start_addr,
 168                         (unsigned long long)end_addr);
 169                 return false;
 170         }
 171 
 172         return true;
 173 }
 174 
 175 /**
 176  * megasas_enable_intr_fusion - Enables interrupts
 177  * @regs:                       MFI register set
 178  */
 179 void
 180 megasas_enable_intr_fusion(struct megasas_instance *instance)
 181 {
 182         struct megasas_register_set __iomem *regs;
 183         regs = instance->reg_set;
 184 
 185         instance->mask_interrupts = 0;
 186         /* For Thunderbolt/Invader also clear intr on enable */
 187         writel(~0, &regs->outbound_intr_status);
 188         readl(&regs->outbound_intr_status);
 189 
 190         writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
 191 
 192         /* Dummy readl to force pci flush */
 193         dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n",
 194                  __func__, readl(&regs->outbound_intr_mask));
 195 }
 196 
 197 /**
 198  * megasas_disable_intr_fusion - Disables interrupt
 199  * @regs:                        MFI register set
 200  */
 201 void
 202 megasas_disable_intr_fusion(struct megasas_instance *instance)
 203 {
 204         u32 mask = 0xFFFFFFFF;
 205         struct megasas_register_set __iomem *regs;
 206         regs = instance->reg_set;
 207         instance->mask_interrupts = 1;
 208 
 209         writel(mask, &regs->outbound_intr_mask);
 210         /* Dummy readl to force pci flush */
 211         dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n",
 212                  __func__, readl(&regs->outbound_intr_mask));
 213 }
 214 
 215 int
 216 megasas_clear_intr_fusion(struct megasas_instance *instance)
 217 {
 218         u32 status;
 219         struct megasas_register_set __iomem *regs;
 220         regs = instance->reg_set;
 221         /*
 222          * Check if it is our interrupt
 223          */
 224         status = megasas_readl(instance,
 225                                &regs->outbound_intr_status);
 226 
 227         if (status & 1) {
 228                 writel(status, &regs->outbound_intr_status);
 229                 readl(&regs->outbound_intr_status);
 230                 return 1;
 231         }
 232         if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
 233                 return 0;
 234 
 235         return 1;
 236 }
 237 
 238 /**
 239  * megasas_get_cmd_fusion -     Get a command from the free pool
 240  * @instance:           Adapter soft state
 241  *
 242  * Returns a blk_tag indexed mpt frame
 243  */
 244 inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
 245                                                   *instance, u32 blk_tag)
 246 {
 247         struct fusion_context *fusion;
 248 
 249         fusion = instance->ctrl_context;
 250         return fusion->cmd_list[blk_tag];
 251 }
 252 
 253 /**
 254  * megasas_return_cmd_fusion -  Return a cmd to free command pool
 255  * @instance:           Adapter soft state
 256  * @cmd:                Command packet to be returned to free command pool
 257  */
 258 inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
 259         struct megasas_cmd_fusion *cmd)
 260 {
 261         cmd->scmd = NULL;
 262         memset(cmd->io_request, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
 263         cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
 264         cmd->cmd_completed = false;
 265 }
 266 
 267 /**
 268  * megasas_write_64bit_req_desc -       PCI writes 64bit request descriptor
 269  * @instance:                           Adapter soft state
 270  * @req_desc:                           64bit Request descriptor
 271  */
 272 static void
 273 megasas_write_64bit_req_desc(struct megasas_instance *instance,
 274                 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
 275 {
 276 #if defined(writeq) && defined(CONFIG_64BIT)
 277         u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
 278                 le32_to_cpu(req_desc->u.low));
 279         writeq(req_data, &instance->reg_set->inbound_low_queue_port);
 280 #else
 281         unsigned long flags;
 282         spin_lock_irqsave(&instance->hba_lock, flags);
 283         writel(le32_to_cpu(req_desc->u.low),
 284                 &instance->reg_set->inbound_low_queue_port);
 285         writel(le32_to_cpu(req_desc->u.high),
 286                 &instance->reg_set->inbound_high_queue_port);
 287         spin_unlock_irqrestore(&instance->hba_lock, flags);
 288 #endif
 289 }
 290 
 291 /**
 292  * megasas_fire_cmd_fusion -    Sends command to the FW
 293  * @instance:                   Adapter soft state
 294  * @req_desc:                   32bit or 64bit Request descriptor
 295  *
 296  * Perform PCI Write. AERO SERIES supports 32 bit Descriptor.
 297  * Prior to AERO_SERIES support 64 bit Descriptor.
 298  */
 299 static void
 300 megasas_fire_cmd_fusion(struct megasas_instance *instance,
 301                 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
 302 {
 303         if (instance->atomic_desc_support)
 304                 writel(le32_to_cpu(req_desc->u.low),
 305                         &instance->reg_set->inbound_single_queue_port);
 306         else
 307                 megasas_write_64bit_req_desc(instance, req_desc);
 308 }
 309 
 310 /**
 311  * megasas_fusion_update_can_queue -    Do all Adapter Queue depth related calculations here
 312  * @instance:                                                   Adapter soft state
 313  * fw_boot_context:                                             Whether this function called during probe or after OCR
 314  *
 315  * This function is only for fusion controllers.
 316  * Update host can queue, if firmware downgrade max supported firmware commands.
 317  * Firmware upgrade case will be skiped because underlying firmware has
 318  * more resource than exposed to the OS.
 319  *
 320  */
 321 static void
 322 megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context)
 323 {
 324         u16 cur_max_fw_cmds = 0;
 325         u16 ldio_threshold = 0;
 326 
 327         /* ventura FW does not fill outbound_scratch_pad_2 with queue depth */
 328         if (instance->adapter_type < VENTURA_SERIES)
 329                 cur_max_fw_cmds =
 330                 megasas_readl(instance,
 331                               &instance->reg_set->outbound_scratch_pad_2) & 0x00FFFF;
 332 
 333         if (dual_qdepth_disable || !cur_max_fw_cmds)
 334                 cur_max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
 335         else
 336                 ldio_threshold =
 337                         (instance->instancet->read_fw_status_reg(instance) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
 338 
 339         dev_info(&instance->pdev->dev,
 340                  "Current firmware supports maximum commands: %d\t LDIO threshold: %d\n",
 341                  cur_max_fw_cmds, ldio_threshold);
 342 
 343         if (fw_boot_context == OCR_CONTEXT) {
 344                 cur_max_fw_cmds = cur_max_fw_cmds - 1;
 345                 if (cur_max_fw_cmds < instance->max_fw_cmds) {
 346                         instance->cur_can_queue =
 347                                 cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS +
 348                                                 MEGASAS_FUSION_IOCTL_CMDS);
 349                         instance->host->can_queue = instance->cur_can_queue;
 350                         instance->ldio_threshold = ldio_threshold;
 351                 }
 352         } else {
 353                 instance->max_fw_cmds = cur_max_fw_cmds;
 354                 instance->ldio_threshold = ldio_threshold;
 355 
 356                 if (reset_devices)
 357                         instance->max_fw_cmds = min(instance->max_fw_cmds,
 358                                                 (u16)MEGASAS_KDUMP_QUEUE_DEPTH);
 359                 /*
 360                 * Reduce the max supported cmds by 1. This is to ensure that the
 361                 * reply_q_sz (1 more than the max cmd that driver may send)
 362                 * does not exceed max cmds that the FW can support
 363                 */
 364                 instance->max_fw_cmds = instance->max_fw_cmds-1;
 365         }
 366 }
 367 /**
 368  * megasas_free_cmds_fusion -   Free all the cmds in the free cmd pool
 369  * @instance:           Adapter soft state
 370  */
 371 void
 372 megasas_free_cmds_fusion(struct megasas_instance *instance)
 373 {
 374         int i;
 375         struct fusion_context *fusion = instance->ctrl_context;
 376         struct megasas_cmd_fusion *cmd;
 377 
 378         if (fusion->sense)
 379                 dma_pool_free(fusion->sense_dma_pool, fusion->sense,
 380                               fusion->sense_phys_addr);
 381 
 382         /* SG */
 383         if (fusion->cmd_list) {
 384                 for (i = 0; i < instance->max_mpt_cmds; i++) {
 385                         cmd = fusion->cmd_list[i];
 386                         if (cmd) {
 387                                 if (cmd->sg_frame)
 388                                         dma_pool_free(fusion->sg_dma_pool,
 389                                                       cmd->sg_frame,
 390                                                       cmd->sg_frame_phys_addr);
 391                         }
 392                         kfree(cmd);
 393                 }
 394                 kfree(fusion->cmd_list);
 395         }
 396 
 397         if (fusion->sg_dma_pool) {
 398                 dma_pool_destroy(fusion->sg_dma_pool);
 399                 fusion->sg_dma_pool = NULL;
 400         }
 401         if (fusion->sense_dma_pool) {
 402                 dma_pool_destroy(fusion->sense_dma_pool);
 403                 fusion->sense_dma_pool = NULL;
 404         }
 405 
 406 
 407         /* Reply Frame, Desc*/
 408         if (instance->is_rdpq)
 409                 megasas_free_rdpq_fusion(instance);
 410         else
 411                 megasas_free_reply_fusion(instance);
 412 
 413         /* Request Frame, Desc*/
 414         if (fusion->req_frames_desc)
 415                 dma_free_coherent(&instance->pdev->dev,
 416                         fusion->request_alloc_sz, fusion->req_frames_desc,
 417                         fusion->req_frames_desc_phys);
 418         if (fusion->io_request_frames)
 419                 dma_pool_free(fusion->io_request_frames_pool,
 420                         fusion->io_request_frames,
 421                         fusion->io_request_frames_phys);
 422         if (fusion->io_request_frames_pool) {
 423                 dma_pool_destroy(fusion->io_request_frames_pool);
 424                 fusion->io_request_frames_pool = NULL;
 425         }
 426 }
 427 
 428 /**
 429  * megasas_create_sg_sense_fusion -     Creates DMA pool for cmd frames
 430  * @instance:                   Adapter soft state
 431  *
 432  */
 433 static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
 434 {
 435         int i;
 436         u16 max_cmd;
 437         struct fusion_context *fusion;
 438         struct megasas_cmd_fusion *cmd;
 439         int sense_sz;
 440         u32 offset;
 441 
 442         fusion = instance->ctrl_context;
 443         max_cmd = instance->max_fw_cmds;
 444         sense_sz = instance->max_mpt_cmds * SCSI_SENSE_BUFFERSIZE;
 445 
 446         fusion->sg_dma_pool =
 447                         dma_pool_create("mr_sg", &instance->pdev->dev,
 448                                 instance->max_chain_frame_sz,
 449                                 MR_DEFAULT_NVME_PAGE_SIZE, 0);
 450         /* SCSI_SENSE_BUFFERSIZE  = 96 bytes */
 451         fusion->sense_dma_pool =
 452                         dma_pool_create("mr_sense", &instance->pdev->dev,
 453                                 sense_sz, 64, 0);
 454 
 455         if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
 456                 dev_err(&instance->pdev->dev,
 457                         "Failed from %s %d\n",  __func__, __LINE__);
 458                 return -ENOMEM;
 459         }
 460 
 461         fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
 462                                        GFP_KERNEL, &fusion->sense_phys_addr);
 463         if (!fusion->sense) {
 464                 dev_err(&instance->pdev->dev,
 465                         "failed from %s %d\n",  __func__, __LINE__);
 466                 return -ENOMEM;
 467         }
 468 
 469         /* sense buffer, request frame and reply desc pool requires to be in
 470          * same 4 gb region. Below function will check this.
 471          * In case of failure, new pci pool will be created with updated
 472          * alignment.
 473          * Older allocation and pool will be destroyed.
 474          * Alignment will be used such a way that next allocation if success,
 475          * will always meet same 4gb region requirement.
 476          * Actual requirement is not alignment, but we need start and end of
 477          * DMA address must have same upper 32 bit address.
 478          */
 479 
 480         if (!megasas_check_same_4gb_region(instance, fusion->sense_phys_addr,
 481                                            sense_sz)) {
 482                 dma_pool_free(fusion->sense_dma_pool, fusion->sense,
 483                               fusion->sense_phys_addr);
 484                 fusion->sense = NULL;
 485                 dma_pool_destroy(fusion->sense_dma_pool);
 486 
 487                 fusion->sense_dma_pool =
 488                         dma_pool_create("mr_sense_align", &instance->pdev->dev,
 489                                         sense_sz, roundup_pow_of_two(sense_sz),
 490                                         0);
 491                 if (!fusion->sense_dma_pool) {
 492                         dev_err(&instance->pdev->dev,
 493                                 "Failed from %s %d\n",  __func__, __LINE__);
 494                         return -ENOMEM;
 495                 }
 496                 fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
 497                                                GFP_KERNEL,
 498                                                &fusion->sense_phys_addr);
 499                 if (!fusion->sense) {
 500                         dev_err(&instance->pdev->dev,
 501                                 "failed from %s %d\n",  __func__, __LINE__);
 502                         return -ENOMEM;
 503                 }
 504         }
 505 
 506         /*
 507          * Allocate and attach a frame to each of the commands in cmd_list
 508          */
 509         for (i = 0; i < max_cmd; i++) {
 510                 cmd = fusion->cmd_list[i];
 511                 cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool,
 512                                         GFP_KERNEL, &cmd->sg_frame_phys_addr);
 513 
 514                 offset = SCSI_SENSE_BUFFERSIZE * i;
 515                 cmd->sense = (u8 *)fusion->sense + offset;
 516                 cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
 517 
 518                 if (!cmd->sg_frame) {
 519                         dev_err(&instance->pdev->dev,
 520                                 "Failed from %s %d\n",  __func__, __LINE__);
 521                         return -ENOMEM;
 522                 }
 523         }
 524 
 525         /* create sense buffer for the raid 1/10 fp */
 526         for (i = max_cmd; i < instance->max_mpt_cmds; i++) {
 527                 cmd = fusion->cmd_list[i];
 528                 offset = SCSI_SENSE_BUFFERSIZE * i;
 529                 cmd->sense = (u8 *)fusion->sense + offset;
 530                 cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
 531 
 532         }
 533 
 534         return 0;
 535 }
 536 
 537 static int
 538 megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
 539 {
 540         u32 max_mpt_cmd, i, j;
 541         struct fusion_context *fusion;
 542 
 543         fusion = instance->ctrl_context;
 544 
 545         max_mpt_cmd = instance->max_mpt_cmds;
 546 
 547         /*
 548          * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
 549          * Allocate the dynamic array first and then allocate individual
 550          * commands.
 551          */
 552         fusion->cmd_list =
 553                 kcalloc(max_mpt_cmd, sizeof(struct megasas_cmd_fusion *),
 554                         GFP_KERNEL);
 555         if (!fusion->cmd_list) {
 556                 dev_err(&instance->pdev->dev,
 557                         "Failed from %s %d\n",  __func__, __LINE__);
 558                 return -ENOMEM;
 559         }
 560 
 561         for (i = 0; i < max_mpt_cmd; i++) {
 562                 fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
 563                                               GFP_KERNEL);
 564                 if (!fusion->cmd_list[i]) {
 565                         for (j = 0; j < i; j++)
 566                                 kfree(fusion->cmd_list[j]);
 567                         kfree(fusion->cmd_list);
 568                         dev_err(&instance->pdev->dev,
 569                                 "Failed from %s %d\n",  __func__, __LINE__);
 570                         return -ENOMEM;
 571                 }
 572         }
 573 
 574         return 0;
 575 }
 576 
 577 static int
 578 megasas_alloc_request_fusion(struct megasas_instance *instance)
 579 {
 580         struct fusion_context *fusion;
 581 
 582         fusion = instance->ctrl_context;
 583 
 584 retry_alloc:
 585         fusion->io_request_frames_pool =
 586                         dma_pool_create("mr_ioreq", &instance->pdev->dev,
 587                                 fusion->io_frames_alloc_sz, 16, 0);
 588 
 589         if (!fusion->io_request_frames_pool) {
 590                 dev_err(&instance->pdev->dev,
 591                         "Failed from %s %d\n",  __func__, __LINE__);
 592                 return -ENOMEM;
 593         }
 594 
 595         fusion->io_request_frames =
 596                         dma_pool_alloc(fusion->io_request_frames_pool,
 597                                 GFP_KERNEL | __GFP_NOWARN,
 598                                 &fusion->io_request_frames_phys);
 599         if (!fusion->io_request_frames) {
 600                 if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) {
 601                         instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT;
 602                         dma_pool_destroy(fusion->io_request_frames_pool);
 603                         megasas_configure_queue_sizes(instance);
 604                         goto retry_alloc;
 605                 } else {
 606                         dev_err(&instance->pdev->dev,
 607                                 "Failed from %s %d\n",  __func__, __LINE__);
 608                         return -ENOMEM;
 609                 }
 610         }
 611 
 612         if (!megasas_check_same_4gb_region(instance,
 613                                            fusion->io_request_frames_phys,
 614                                            fusion->io_frames_alloc_sz)) {
 615                 dma_pool_free(fusion->io_request_frames_pool,
 616                               fusion->io_request_frames,
 617                               fusion->io_request_frames_phys);
 618                 fusion->io_request_frames = NULL;
 619                 dma_pool_destroy(fusion->io_request_frames_pool);
 620 
 621                 fusion->io_request_frames_pool =
 622                         dma_pool_create("mr_ioreq_align",
 623                                         &instance->pdev->dev,
 624                                         fusion->io_frames_alloc_sz,
 625                                         roundup_pow_of_two(fusion->io_frames_alloc_sz),
 626                                         0);
 627 
 628                 if (!fusion->io_request_frames_pool) {
 629                         dev_err(&instance->pdev->dev,
 630                                 "Failed from %s %d\n",  __func__, __LINE__);
 631                         return -ENOMEM;
 632                 }
 633 
 634                 fusion->io_request_frames =
 635                         dma_pool_alloc(fusion->io_request_frames_pool,
 636                                        GFP_KERNEL | __GFP_NOWARN,
 637                                        &fusion->io_request_frames_phys);
 638 
 639                 if (!fusion->io_request_frames) {
 640                         dev_err(&instance->pdev->dev,
 641                                 "Failed from %s %d\n",  __func__, __LINE__);
 642                         return -ENOMEM;
 643                 }
 644         }
 645 
 646         fusion->req_frames_desc =
 647                 dma_alloc_coherent(&instance->pdev->dev,
 648                                    fusion->request_alloc_sz,
 649                                    &fusion->req_frames_desc_phys, GFP_KERNEL);
 650         if (!fusion->req_frames_desc) {
 651                 dev_err(&instance->pdev->dev,
 652                         "Failed from %s %d\n",  __func__, __LINE__);
 653                 return -ENOMEM;
 654         }
 655 
 656         return 0;
 657 }
 658 
 659 static int
 660 megasas_alloc_reply_fusion(struct megasas_instance *instance)
 661 {
 662         int i, count;
 663         struct fusion_context *fusion;
 664         union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
 665         fusion = instance->ctrl_context;
 666 
 667         count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
 668         fusion->reply_frames_desc_pool =
 669                         dma_pool_create("mr_reply", &instance->pdev->dev,
 670                                 fusion->reply_alloc_sz * count, 16, 0);
 671 
 672         if (!fusion->reply_frames_desc_pool) {
 673                 dev_err(&instance->pdev->dev,
 674                         "Failed from %s %d\n",  __func__, __LINE__);
 675                 return -ENOMEM;
 676         }
 677 
 678         fusion->reply_frames_desc[0] =
 679                 dma_pool_alloc(fusion->reply_frames_desc_pool,
 680                         GFP_KERNEL, &fusion->reply_frames_desc_phys[0]);
 681         if (!fusion->reply_frames_desc[0]) {
 682                 dev_err(&instance->pdev->dev,
 683                         "Failed from %s %d\n",  __func__, __LINE__);
 684                 return -ENOMEM;
 685         }
 686 
 687         if (!megasas_check_same_4gb_region(instance,
 688                                            fusion->reply_frames_desc_phys[0],
 689                                            (fusion->reply_alloc_sz * count))) {
 690                 dma_pool_free(fusion->reply_frames_desc_pool,
 691                               fusion->reply_frames_desc[0],
 692                               fusion->reply_frames_desc_phys[0]);
 693                 fusion->reply_frames_desc[0] = NULL;
 694                 dma_pool_destroy(fusion->reply_frames_desc_pool);
 695 
 696                 fusion->reply_frames_desc_pool =
 697                         dma_pool_create("mr_reply_align",
 698                                         &instance->pdev->dev,
 699                                         fusion->reply_alloc_sz * count,
 700                                         roundup_pow_of_two(fusion->reply_alloc_sz * count),
 701                                         0);
 702 
 703                 if (!fusion->reply_frames_desc_pool) {
 704                         dev_err(&instance->pdev->dev,
 705                                 "Failed from %s %d\n",  __func__, __LINE__);
 706                         return -ENOMEM;
 707                 }
 708 
 709                 fusion->reply_frames_desc[0] =
 710                         dma_pool_alloc(fusion->reply_frames_desc_pool,
 711                                        GFP_KERNEL,
 712                                        &fusion->reply_frames_desc_phys[0]);
 713 
 714                 if (!fusion->reply_frames_desc[0]) {
 715                         dev_err(&instance->pdev->dev,
 716                                 "Failed from %s %d\n",  __func__, __LINE__);
 717                         return -ENOMEM;
 718                 }
 719         }
 720 
 721         reply_desc = fusion->reply_frames_desc[0];
 722         for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
 723                 reply_desc->Words = cpu_to_le64(ULLONG_MAX);
 724 
 725         /* This is not a rdpq mode, but driver still populate
 726          * reply_frame_desc array to use same msix index in ISR path.
 727          */
 728         for (i = 0; i < (count - 1); i++)
 729                 fusion->reply_frames_desc[i + 1] =
 730                         fusion->reply_frames_desc[i] +
 731                         (fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION);
 732 
 733         return 0;
 734 }
 735 
 736 static int
 737 megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
 738 {
 739         int i, j, k, msix_count;
 740         struct fusion_context *fusion;
 741         union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
 742         union MPI2_REPLY_DESCRIPTORS_UNION *rdpq_chunk_virt[RDPQ_MAX_CHUNK_COUNT];
 743         dma_addr_t rdpq_chunk_phys[RDPQ_MAX_CHUNK_COUNT];
 744         u8 dma_alloc_count, abs_index;
 745         u32 chunk_size, array_size, offset;
 746 
 747         fusion = instance->ctrl_context;
 748         chunk_size = fusion->reply_alloc_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
 749         array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
 750                      MAX_MSIX_QUEUES_FUSION;
 751 
 752         fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev,
 753                                                array_size, &fusion->rdpq_phys,
 754                                                GFP_KERNEL);
 755         if (!fusion->rdpq_virt) {
 756                 dev_err(&instance->pdev->dev,
 757                         "Failed from %s %d\n",  __func__, __LINE__);
 758                 return -ENOMEM;
 759         }
 760 
 761         msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
 762 
 763         fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
 764                                                          &instance->pdev->dev,
 765                                                          chunk_size, 16, 0);
 766         fusion->reply_frames_desc_pool_align =
 767                                 dma_pool_create("mr_rdpq_align",
 768                                                 &instance->pdev->dev,
 769                                                 chunk_size,
 770                                                 roundup_pow_of_two(chunk_size),
 771                                                 0);
 772 
 773         if (!fusion->reply_frames_desc_pool ||
 774             !fusion->reply_frames_desc_pool_align) {
 775                 dev_err(&instance->pdev->dev,
 776                         "Failed from %s %d\n",  __func__, __LINE__);
 777                 return -ENOMEM;
 778         }
 779 
 780 /*
 781  * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
 782  * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should be
 783  * within 4GB boundary and also reply queues in a set must have same
 784  * upper 32-bits in their memory address. so here driver is allocating the
 785  * DMA'able memory for reply queues according. Driver uses limitation of
 786  * VENTURA_SERIES to manage INVADER_SERIES as well.
 787  */
 788         dma_alloc_count = DIV_ROUND_UP(msix_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK);
 789 
 790         for (i = 0; i < dma_alloc_count; i++) {
 791                 rdpq_chunk_virt[i] =
 792                         dma_pool_alloc(fusion->reply_frames_desc_pool,
 793                                        GFP_KERNEL, &rdpq_chunk_phys[i]);
 794                 if (!rdpq_chunk_virt[i]) {
 795                         dev_err(&instance->pdev->dev,
 796                                 "Failed from %s %d\n",  __func__, __LINE__);
 797                         return -ENOMEM;
 798                 }
 799                 /* reply desc pool requires to be in same 4 gb region.
 800                  * Below function will check this.
 801                  * In case of failure, new pci pool will be created with updated
 802                  * alignment.
 803                  * For RDPQ buffers, driver always allocate two separate pci pool.
 804                  * Alignment will be used such a way that next allocation if
 805                  * success, will always meet same 4gb region requirement.
 806                  * rdpq_tracker keep track of each buffer's physical,
 807                  * virtual address and pci pool descriptor. It will help driver
 808                  * while freeing the resources.
 809                  *
 810                  */
 811                 if (!megasas_check_same_4gb_region(instance, rdpq_chunk_phys[i],
 812                                                    chunk_size)) {
 813                         dma_pool_free(fusion->reply_frames_desc_pool,
 814                                       rdpq_chunk_virt[i],
 815                                       rdpq_chunk_phys[i]);
 816 
 817                         rdpq_chunk_virt[i] =
 818                                 dma_pool_alloc(fusion->reply_frames_desc_pool_align,
 819                                                GFP_KERNEL, &rdpq_chunk_phys[i]);
 820                         if (!rdpq_chunk_virt[i]) {
 821                                 dev_err(&instance->pdev->dev,
 822                                         "Failed from %s %d\n",
 823                                         __func__, __LINE__);
 824                                 return -ENOMEM;
 825                         }
 826                         fusion->rdpq_tracker[i].dma_pool_ptr =
 827                                         fusion->reply_frames_desc_pool_align;
 828                 } else {
 829                         fusion->rdpq_tracker[i].dma_pool_ptr =
 830                                         fusion->reply_frames_desc_pool;
 831                 }
 832 
 833                 fusion->rdpq_tracker[i].pool_entry_phys = rdpq_chunk_phys[i];
 834                 fusion->rdpq_tracker[i].pool_entry_virt = rdpq_chunk_virt[i];
 835         }
 836 
 837         for (k = 0; k < dma_alloc_count; k++) {
 838                 for (i = 0; i < RDPQ_MAX_INDEX_IN_ONE_CHUNK; i++) {
 839                         abs_index = (k * RDPQ_MAX_INDEX_IN_ONE_CHUNK) + i;
 840 
 841                         if (abs_index == msix_count)
 842                                 break;
 843                         offset = fusion->reply_alloc_sz * i;
 844                         fusion->rdpq_virt[abs_index].RDPQBaseAddress =
 845                                         cpu_to_le64(rdpq_chunk_phys[k] + offset);
 846                         fusion->reply_frames_desc_phys[abs_index] =
 847                                         rdpq_chunk_phys[k] + offset;
 848                         fusion->reply_frames_desc[abs_index] =
 849                                         (union MPI2_REPLY_DESCRIPTORS_UNION *)((u8 *)rdpq_chunk_virt[k] + offset);
 850 
 851                         reply_desc = fusion->reply_frames_desc[abs_index];
 852                         for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
 853                                 reply_desc->Words = ULLONG_MAX;
 854                 }
 855         }
 856 
 857         return 0;
 858 }
 859 
 860 static void
 861 megasas_free_rdpq_fusion(struct megasas_instance *instance) {
 862 
 863         int i;
 864         struct fusion_context *fusion;
 865 
 866         fusion = instance->ctrl_context;
 867 
 868         for (i = 0; i < RDPQ_MAX_CHUNK_COUNT; i++) {
 869                 if (fusion->rdpq_tracker[i].pool_entry_virt)
 870                         dma_pool_free(fusion->rdpq_tracker[i].dma_pool_ptr,
 871                                       fusion->rdpq_tracker[i].pool_entry_virt,
 872                                       fusion->rdpq_tracker[i].pool_entry_phys);
 873 
 874         }
 875 
 876         dma_pool_destroy(fusion->reply_frames_desc_pool);
 877         dma_pool_destroy(fusion->reply_frames_desc_pool_align);
 878 
 879         if (fusion->rdpq_virt)
 880                 dma_free_coherent(&instance->pdev->dev,
 881                         sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
 882                         fusion->rdpq_virt, fusion->rdpq_phys);
 883 }
 884 
 885 static void
 886 megasas_free_reply_fusion(struct megasas_instance *instance) {
 887 
 888         struct fusion_context *fusion;
 889 
 890         fusion = instance->ctrl_context;
 891 
 892         if (fusion->reply_frames_desc[0])
 893                 dma_pool_free(fusion->reply_frames_desc_pool,
 894                         fusion->reply_frames_desc[0],
 895                         fusion->reply_frames_desc_phys[0]);
 896 
 897         dma_pool_destroy(fusion->reply_frames_desc_pool);
 898 
 899 }
 900 
 901 
 902 /**
 903  * megasas_alloc_cmds_fusion -  Allocates the command packets
 904  * @instance:           Adapter soft state
 905  *
 906  *
 907  * Each frame has a 32-bit field called context. This context is used to get
 908  * back the megasas_cmd_fusion from the frame when a frame gets completed
 909  * In this driver, the 32 bit values are the indices into an array cmd_list.
 910  * This array is used only to look up the megasas_cmd_fusion given the context.
 911  * The free commands themselves are maintained in a linked list called cmd_pool.
 912  *
 913  * cmds are formed in the io_request and sg_frame members of the
 914  * megasas_cmd_fusion. The context field is used to get a request descriptor
 915  * and is used as SMID of the cmd.
 916  * SMID value range is from 1 to max_fw_cmds.
 917  */
 918 static int
 919 megasas_alloc_cmds_fusion(struct megasas_instance *instance)
 920 {
 921         int i;
 922         struct fusion_context *fusion;
 923         struct megasas_cmd_fusion *cmd;
 924         u32 offset;
 925         dma_addr_t io_req_base_phys;
 926         u8 *io_req_base;
 927 
 928 
 929         fusion = instance->ctrl_context;
 930 
 931         if (megasas_alloc_request_fusion(instance))
 932                 goto fail_exit;
 933 
 934         if (instance->is_rdpq) {
 935                 if (megasas_alloc_rdpq_fusion(instance))
 936                         goto fail_exit;
 937         } else
 938                 if (megasas_alloc_reply_fusion(instance))
 939                         goto fail_exit;
 940 
 941         if (megasas_alloc_cmdlist_fusion(instance))
 942                 goto fail_exit;
 943 
 944         dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
 945                  instance->max_fw_cmds);
 946 
 947         /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
 948         io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
 949         io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
 950 
 951         /*
 952          * Add all the commands to command pool (fusion->cmd_pool)
 953          */
 954 
 955         /* SMID 0 is reserved. Set SMID/index from 1 */
 956         for (i = 0; i < instance->max_mpt_cmds; i++) {
 957                 cmd = fusion->cmd_list[i];
 958                 offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
 959                 memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
 960                 cmd->index = i + 1;
 961                 cmd->scmd = NULL;
 962                 cmd->sync_cmd_idx =
 963                 (i >= instance->max_scsi_cmds && i < instance->max_fw_cmds) ?
 964                                 (i - instance->max_scsi_cmds) :
 965                                 (u32)ULONG_MAX; /* Set to Invalid */
 966                 cmd->instance = instance;
 967                 cmd->io_request =
 968                         (struct MPI2_RAID_SCSI_IO_REQUEST *)
 969                   (io_req_base + offset);
 970                 memset(cmd->io_request, 0,
 971                        sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
 972                 cmd->io_request_phys_addr = io_req_base_phys + offset;
 973                 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
 974         }
 975 
 976         if (megasas_create_sg_sense_fusion(instance))
 977                 goto fail_exit;
 978 
 979         return 0;
 980 
 981 fail_exit:
 982         megasas_free_cmds_fusion(instance);
 983         return -ENOMEM;
 984 }
 985 
 986 /**
 987  * wait_and_poll -      Issues a polling command
 988  * @instance:                   Adapter soft state
 989  * @cmd:                        Command packet to be issued
 990  *
 991  * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
 992  */
 993 int
 994 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
 995         int seconds)
 996 {
 997         int i;
 998         struct megasas_header *frame_hdr = &cmd->frame->hdr;
 999         u32 status_reg;
1000 
1001         u32 msecs = seconds * 1000;
1002 
1003         /*
1004          * Wait for cmd_status to change
1005          */
1006         for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
1007                 rmb();
1008                 msleep(20);
1009                 if (!(i % 5000)) {
1010                         status_reg = instance->instancet->read_fw_status_reg(instance)
1011                                         & MFI_STATE_MASK;
1012                         if (status_reg == MFI_STATE_FAULT)
1013                                 break;
1014                 }
1015         }
1016 
1017         if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS)
1018                 return DCMD_TIMEOUT;
1019         else if (frame_hdr->cmd_status == MFI_STAT_OK)
1020                 return DCMD_SUCCESS;
1021         else
1022                 return DCMD_FAILED;
1023 }
1024 
1025 /**
1026  * megasas_ioc_init_fusion -    Initializes the FW
1027  * @instance:           Adapter soft state
1028  *
1029  * Issues the IOC Init cmd
1030  */
1031 int
1032 megasas_ioc_init_fusion(struct megasas_instance *instance)
1033 {
1034         struct megasas_init_frame *init_frame;
1035         struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL;
1036         dma_addr_t      ioc_init_handle;
1037         struct megasas_cmd *cmd;
1038         u8 ret, cur_rdpq_mode;
1039         struct fusion_context *fusion;
1040         union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
1041         int i;
1042         struct megasas_header *frame_hdr;
1043         const char *sys_info;
1044         MFI_CAPABILITIES *drv_ops;
1045         u32 scratch_pad_1;
1046         ktime_t time;
1047         bool cur_fw_64bit_dma_capable;
1048         bool cur_intr_coalescing;
1049 
1050         fusion = instance->ctrl_context;
1051 
1052         ioc_init_handle = fusion->ioc_init_request_phys;
1053         IOCInitMessage = fusion->ioc_init_request;
1054 
1055         cmd = fusion->ioc_init_cmd;
1056 
1057         scratch_pad_1 = megasas_readl
1058                 (instance, &instance->reg_set->outbound_scratch_pad_1);
1059 
1060         cur_rdpq_mode = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
1061 
1062         if (instance->adapter_type == INVADER_SERIES) {
1063                 cur_fw_64bit_dma_capable =
1064                         (scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
1065 
1066                 if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) {
1067                         dev_err(&instance->pdev->dev, "Driver was operating on 64bit "
1068                                 "DMA mask, but upcoming FW does not support 64bit DMA mask\n");
1069                         megaraid_sas_kill_hba(instance);
1070                         ret = 1;
1071                         goto fail_fw_init;
1072                 }
1073         }
1074 
1075         if (instance->is_rdpq && !cur_rdpq_mode) {
1076                 dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
1077                         " from RDPQ mode to non RDPQ mode\n");
1078                 ret = 1;
1079                 goto fail_fw_init;
1080         }
1081 
1082         cur_intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
1083                                                         true : false;
1084 
1085         if ((instance->low_latency_index_start ==
1086                 MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
1087                 instance->perf_mode = MR_BALANCED_PERF_MODE;
1088 
1089         dev_info(&instance->pdev->dev, "Performance mode :%s\n",
1090                 MEGASAS_PERF_MODE_2STR(instance->perf_mode));
1091 
1092         instance->fw_sync_cache_support = (scratch_pad_1 &
1093                 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
1094         dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
1095                  instance->fw_sync_cache_support ? "Yes" : "No");
1096 
1097         memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
1098 
1099         IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
1100         IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
1101         IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
1102         IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
1103         IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
1104 
1105         IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
1106         IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ?
1107                         cpu_to_le64(fusion->rdpq_phys) :
1108                         cpu_to_le64(fusion->reply_frames_desc_phys[0]);
1109         IOCInitMessage->MsgFlags = instance->is_rdpq ?
1110                         MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
1111         IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
1112         IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr));
1113         IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
1114         IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
1115 
1116         time = ktime_get_real();
1117         /* Convert to milliseconds as per FW requirement */
1118         IOCInitMessage->TimeStamp = cpu_to_le64(ktime_to_ms(time));
1119 
1120         init_frame = (struct megasas_init_frame *)cmd->frame;
1121         memset(init_frame, 0, IOC_INIT_FRAME_SIZE);
1122 
1123         frame_hdr = &cmd->frame->hdr;
1124         frame_hdr->cmd_status = 0xFF;
1125         frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1126 
1127         init_frame->cmd = MFI_CMD_INIT;
1128         init_frame->cmd_status = 0xFF;
1129 
1130         drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
1131 
1132         /* driver support Extended MSIX */
1133         if (instance->adapter_type >= INVADER_SERIES)
1134                 drv_ops->mfi_capabilities.support_additional_msix = 1;
1135         /* driver supports HA / Remote LUN over Fast Path interface */
1136         drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
1137 
1138         drv_ops->mfi_capabilities.support_max_255lds = 1;
1139         drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1;
1140         drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1;
1141 
1142         if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
1143                 drv_ops->mfi_capabilities.support_ext_io_size = 1;
1144 
1145         drv_ops->mfi_capabilities.support_fp_rlbypass = 1;
1146         if (!dual_qdepth_disable)
1147                 drv_ops->mfi_capabilities.support_ext_queue_depth = 1;
1148 
1149         drv_ops->mfi_capabilities.support_qd_throttling = 1;
1150         drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
1151         drv_ops->mfi_capabilities.support_nvme_passthru = 1;
1152         drv_ops->mfi_capabilities.support_fw_exposed_dev_list = 1;
1153 
1154         if (instance->consistent_mask_64bit)
1155                 drv_ops->mfi_capabilities.support_64bit_mode = 1;
1156 
1157         /* Convert capability to LE32 */
1158         cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
1159 
1160         sys_info = dmi_get_system_info(DMI_PRODUCT_UUID);
1161         if (instance->system_info_buf && sys_info) {
1162                 memcpy(instance->system_info_buf->systemId, sys_info,
1163                         strlen(sys_info) > 64 ? 64 : strlen(sys_info));
1164                 instance->system_info_buf->systemIdLength =
1165                         strlen(sys_info) > 64 ? 64 : strlen(sys_info);
1166                 init_frame->system_info_lo = cpu_to_le32(lower_32_bits(instance->system_info_h));
1167                 init_frame->system_info_hi = cpu_to_le32(upper_32_bits(instance->system_info_h));
1168         }
1169 
1170         init_frame->queue_info_new_phys_addr_hi =
1171                 cpu_to_le32(upper_32_bits(ioc_init_handle));
1172         init_frame->queue_info_new_phys_addr_lo =
1173                 cpu_to_le32(lower_32_bits(ioc_init_handle));
1174         init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
1175 
1176         /*
1177          * Each bit in replyqueue_mask represents one group of MSI-x vectors
1178          * (each group has 8 vectors)
1179          */
1180         switch (instance->perf_mode) {
1181         case MR_BALANCED_PERF_MODE:
1182                 init_frame->replyqueue_mask =
1183                        cpu_to_le16(~(~0 << instance->low_latency_index_start/8));
1184                 break;
1185         case MR_IOPS_PERF_MODE:
1186                 init_frame->replyqueue_mask =
1187                        cpu_to_le16(~(~0 << instance->msix_vectors/8));
1188                 break;
1189         }
1190 
1191 
1192         req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
1193         req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
1194         req_desc.MFAIo.RequestFlags =
1195                 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
1196                 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1197 
1198         /*
1199          * disable the intr before firing the init frame
1200          */
1201         instance->instancet->disable_intr(instance);
1202 
1203         for (i = 0; i < (10 * 1000); i += 20) {
1204                 if (megasas_readl(instance, &instance->reg_set->doorbell) & 1)
1205                         msleep(20);
1206                 else
1207                         break;
1208         }
1209 
1210         /* For AERO also, IOC_INIT requires 64 bit descriptor write */
1211         megasas_write_64bit_req_desc(instance, &req_desc);
1212 
1213         wait_and_poll(instance, cmd, MFI_IO_TIMEOUT_SECS);
1214 
1215         frame_hdr = &cmd->frame->hdr;
1216         if (frame_hdr->cmd_status != 0) {
1217                 ret = 1;
1218                 goto fail_fw_init;
1219         }
1220 
1221         if (instance->adapter_type >= AERO_SERIES) {
1222                 scratch_pad_1 = megasas_readl
1223                         (instance, &instance->reg_set->outbound_scratch_pad_1);
1224 
1225                 instance->atomic_desc_support =
1226                         (scratch_pad_1 & MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0;
1227 
1228                 dev_info(&instance->pdev->dev, "FW supports atomic descriptor\t: %s\n",
1229                         instance->atomic_desc_support ? "Yes" : "No");
1230         }
1231 
1232         return 0;
1233 
1234 fail_fw_init:
1235         dev_err(&instance->pdev->dev,
1236                 "Init cmd return status FAILED for SCSI host %d\n",
1237                 instance->host->host_no);
1238 
1239         return ret;
1240 }
1241 
1242 /**
1243  * megasas_sync_pd_seq_num -    JBOD SEQ MAP
1244  * @instance:           Adapter soft state
1245  * @pend:               set to 1, if it is pended jbod map.
1246  *
1247  * Issue Jbod map to the firmware. If it is pended command,
1248  * issue command and return. If it is first instance of jbod map
1249  * issue and receive command.
1250  */
1251 int
1252 megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
1253         int ret = 0;
1254         size_t pd_seq_map_sz;
1255         struct megasas_cmd *cmd;
1256         struct megasas_dcmd_frame *dcmd;
1257         struct fusion_context *fusion = instance->ctrl_context;
1258         struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1259         dma_addr_t pd_seq_h;
1260 
1261         pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
1262         pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
1263         pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES - 1);
1264 
1265         cmd = megasas_get_cmd(instance);
1266         if (!cmd) {
1267                 dev_err(&instance->pdev->dev,
1268                         "Could not get mfi cmd. Fail from %s %d\n",
1269                         __func__, __LINE__);
1270                 return -ENOMEM;
1271         }
1272 
1273         dcmd = &cmd->frame->dcmd;
1274 
1275         memset(pd_sync, 0, pd_seq_map_sz);
1276         memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1277 
1278         if (pend) {
1279                 dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
1280                 dcmd->flags = MFI_FRAME_DIR_WRITE;
1281                 instance->jbod_seq_cmd = cmd;
1282         } else {
1283                 dcmd->flags = MFI_FRAME_DIR_READ;
1284         }
1285 
1286         dcmd->cmd = MFI_CMD_DCMD;
1287         dcmd->cmd_status = 0xFF;
1288         dcmd->sge_count = 1;
1289         dcmd->timeout = 0;
1290         dcmd->pad_0 = 0;
1291         dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
1292         dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
1293 
1294         megasas_set_dma_settings(instance, dcmd, pd_seq_h, pd_seq_map_sz);
1295 
1296         if (pend) {
1297                 instance->instancet->issue_dcmd(instance, cmd);
1298                 return 0;
1299         }
1300 
1301         /* Below code is only for non pended DCMD */
1302         if (!instance->mask_interrupts)
1303                 ret = megasas_issue_blocked_cmd(instance, cmd,
1304                         MFI_IO_TIMEOUT_SECS);
1305         else
1306                 ret = megasas_issue_polled(instance, cmd);
1307 
1308         if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
1309                 dev_warn(&instance->pdev->dev,
1310                         "driver supports max %d JBOD, but FW reports %d\n",
1311                         MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count));
1312                 ret = -EINVAL;
1313         }
1314 
1315         if (ret == DCMD_TIMEOUT)
1316                 megaraid_sas_kill_hba(instance);
1317 
1318         if (ret == DCMD_SUCCESS)
1319                 instance->pd_seq_map_id++;
1320 
1321         megasas_return_cmd(instance, cmd);
1322         return ret;
1323 }
1324 
1325 /*
1326  * megasas_get_ld_map_info -    Returns FW's ld_map structure
1327  * @instance:                           Adapter soft state
1328  * @pend:                               Pend the command or not
1329  * Issues an internal command (DCMD) to get the FW's controller PD
1330  * list structure.  This information is mainly used to find out SYSTEM
1331  * supported by the FW.
1332  * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
1333  * dcmd.mbox.b[0]       - number of LDs being sync'd
1334  * dcmd.mbox.b[1]       - 0 - complete command immediately.
1335  *                      - 1 - pend till config change
1336  * dcmd.mbox.b[2]       - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
1337  *                      - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
1338  *                              uses extended struct MR_FW_RAID_MAP_EXT
1339  */
1340 static int
1341 megasas_get_ld_map_info(struct megasas_instance *instance)
1342 {
1343         int ret = 0;
1344         struct megasas_cmd *cmd;
1345         struct megasas_dcmd_frame *dcmd;
1346         void *ci;
1347         dma_addr_t ci_h = 0;
1348         u32 size_map_info;
1349         struct fusion_context *fusion;
1350 
1351         cmd = megasas_get_cmd(instance);
1352 
1353         if (!cmd) {
1354                 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n");
1355                 return -ENOMEM;
1356         }
1357 
1358         fusion = instance->ctrl_context;
1359 
1360         if (!fusion) {
1361                 megasas_return_cmd(instance, cmd);
1362                 return -ENXIO;
1363         }
1364 
1365         dcmd = &cmd->frame->dcmd;
1366 
1367         size_map_info = fusion->current_map_sz;
1368 
1369         ci = (void *) fusion->ld_map[(instance->map_id & 1)];
1370         ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
1371 
1372         if (!ci) {
1373                 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n");
1374                 megasas_return_cmd(instance, cmd);
1375                 return -ENOMEM;
1376         }
1377 
1378         memset(ci, 0, fusion->max_map_sz);
1379         memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1380         dcmd->cmd = MFI_CMD_DCMD;
1381         dcmd->cmd_status = 0xFF;
1382         dcmd->sge_count = 1;
1383         dcmd->flags = MFI_FRAME_DIR_READ;
1384         dcmd->timeout = 0;
1385         dcmd->pad_0 = 0;
1386         dcmd->data_xfer_len = cpu_to_le32(size_map_info);
1387         dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
1388 
1389         megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
1390 
1391         if (!instance->mask_interrupts)
1392                 ret = megasas_issue_blocked_cmd(instance, cmd,
1393                         MFI_IO_TIMEOUT_SECS);
1394         else
1395                 ret = megasas_issue_polled(instance, cmd);
1396 
1397         if (ret == DCMD_TIMEOUT)
1398                 megaraid_sas_kill_hba(instance);
1399 
1400         megasas_return_cmd(instance, cmd);
1401 
1402         return ret;
1403 }
1404 
1405 u8
1406 megasas_get_map_info(struct megasas_instance *instance)
1407 {
1408         struct fusion_context *fusion = instance->ctrl_context;
1409 
1410         fusion->fast_path_io = 0;
1411         if (!megasas_get_ld_map_info(instance)) {
1412                 if (MR_ValidateMapInfo(instance, instance->map_id)) {
1413                         fusion->fast_path_io = 1;
1414                         return 0;
1415                 }
1416         }
1417         return 1;
1418 }
1419 
1420 /*
1421  * megasas_sync_map_info -      Returns FW's ld_map structure
1422  * @instance:                           Adapter soft state
1423  *
1424  * Issues an internal command (DCMD) to get the FW's controller PD
1425  * list structure.  This information is mainly used to find out SYSTEM
1426  * supported by the FW.
1427  */
1428 int
1429 megasas_sync_map_info(struct megasas_instance *instance)
1430 {
1431         int i;
1432         struct megasas_cmd *cmd;
1433         struct megasas_dcmd_frame *dcmd;
1434         u16 num_lds;
1435         struct fusion_context *fusion;
1436         struct MR_LD_TARGET_SYNC *ci = NULL;
1437         struct MR_DRV_RAID_MAP_ALL *map;
1438         struct MR_LD_RAID  *raid;
1439         struct MR_LD_TARGET_SYNC *ld_sync;
1440         dma_addr_t ci_h = 0;
1441         u32 size_map_info;
1442 
1443         cmd = megasas_get_cmd(instance);
1444 
1445         if (!cmd) {
1446                 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n");
1447                 return -ENOMEM;
1448         }
1449 
1450         fusion = instance->ctrl_context;
1451 
1452         if (!fusion) {
1453                 megasas_return_cmd(instance, cmd);
1454                 return 1;
1455         }
1456 
1457         map = fusion->ld_drv_map[instance->map_id & 1];
1458 
1459         num_lds = le16_to_cpu(map->raidMap.ldCount);
1460 
1461         dcmd = &cmd->frame->dcmd;
1462 
1463         memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1464 
1465         ci = (struct MR_LD_TARGET_SYNC *)
1466           fusion->ld_map[(instance->map_id - 1) & 1];
1467         memset(ci, 0, fusion->max_map_sz);
1468 
1469         ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
1470 
1471         ld_sync = (struct MR_LD_TARGET_SYNC *)ci;
1472 
1473         for (i = 0; i < num_lds; i++, ld_sync++) {
1474                 raid = MR_LdRaidGet(i, map);
1475                 ld_sync->targetId = MR_GetLDTgtId(i, map);
1476                 ld_sync->seqNum = raid->seqNum;
1477         }
1478 
1479         size_map_info = fusion->current_map_sz;
1480 
1481         dcmd->cmd = MFI_CMD_DCMD;
1482         dcmd->cmd_status = 0xFF;
1483         dcmd->sge_count = 1;
1484         dcmd->flags = MFI_FRAME_DIR_WRITE;
1485         dcmd->timeout = 0;
1486         dcmd->pad_0 = 0;
1487         dcmd->data_xfer_len = cpu_to_le32(size_map_info);
1488         dcmd->mbox.b[0] = num_lds;
1489         dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
1490         dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
1491 
1492         megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
1493 
1494         instance->map_update_cmd = cmd;
1495 
1496         instance->instancet->issue_dcmd(instance, cmd);
1497 
1498         return 0;
1499 }
1500 
1501 /*
1502  * meagasas_display_intel_branding - Display branding string
1503  * @instance: per adapter object
1504  *
1505  * Return nothing.
1506  */
1507 static void
1508 megasas_display_intel_branding(struct megasas_instance *instance)
1509 {
1510         if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
1511                 return;
1512 
1513         switch (instance->pdev->device) {
1514         case PCI_DEVICE_ID_LSI_INVADER:
1515                 switch (instance->pdev->subsystem_device) {
1516                 case MEGARAID_INTEL_RS3DC080_SSDID:
1517                         dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1518                                 instance->host->host_no,
1519                                 MEGARAID_INTEL_RS3DC080_BRANDING);
1520                         break;
1521                 case MEGARAID_INTEL_RS3DC040_SSDID:
1522                         dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1523                                 instance->host->host_no,
1524                                 MEGARAID_INTEL_RS3DC040_BRANDING);
1525                         break;
1526                 case MEGARAID_INTEL_RS3SC008_SSDID:
1527                         dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1528                                 instance->host->host_no,
1529                                 MEGARAID_INTEL_RS3SC008_BRANDING);
1530                         break;
1531                 case MEGARAID_INTEL_RS3MC044_SSDID:
1532                         dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1533                                 instance->host->host_no,
1534                                 MEGARAID_INTEL_RS3MC044_BRANDING);
1535                         break;
1536                 default:
1537                         break;
1538                 }
1539                 break;
1540         case PCI_DEVICE_ID_LSI_FURY:
1541                 switch (instance->pdev->subsystem_device) {
1542                 case MEGARAID_INTEL_RS3WC080_SSDID:
1543                         dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1544                                 instance->host->host_no,
1545                                 MEGARAID_INTEL_RS3WC080_BRANDING);
1546                         break;
1547                 case MEGARAID_INTEL_RS3WC040_SSDID:
1548                         dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1549                                 instance->host->host_no,
1550                                 MEGARAID_INTEL_RS3WC040_BRANDING);
1551                         break;
1552                 default:
1553                         break;
1554                 }
1555                 break;
1556         case PCI_DEVICE_ID_LSI_CUTLASS_52:
1557         case PCI_DEVICE_ID_LSI_CUTLASS_53:
1558                 switch (instance->pdev->subsystem_device) {
1559                 case MEGARAID_INTEL_RMS3BC160_SSDID:
1560                         dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1561                                 instance->host->host_no,
1562                                 MEGARAID_INTEL_RMS3BC160_BRANDING);
1563                         break;
1564                 default:
1565                         break;
1566                 }
1567                 break;
1568         default:
1569                 break;
1570         }
1571 }
1572 
1573 /**
1574  * megasas_allocate_raid_maps - Allocate memory for RAID maps
1575  * @instance:                           Adapter soft state
1576  *
1577  * return:                              if success: return 0
1578  *                                      failed:  return -ENOMEM
1579  */
1580 static inline int megasas_allocate_raid_maps(struct megasas_instance *instance)
1581 {
1582         struct fusion_context *fusion;
1583         int i = 0;
1584 
1585         fusion = instance->ctrl_context;
1586 
1587         fusion->drv_map_pages = get_order(fusion->drv_map_sz);
1588 
1589         for (i = 0; i < 2; i++) {
1590                 fusion->ld_map[i] = NULL;
1591 
1592                 fusion->ld_drv_map[i] = (void *)
1593                         __get_free_pages(__GFP_ZERO | GFP_KERNEL,
1594                                          fusion->drv_map_pages);
1595 
1596                 if (!fusion->ld_drv_map[i]) {
1597                         fusion->ld_drv_map[i] = vzalloc(fusion->drv_map_sz);
1598 
1599                         if (!fusion->ld_drv_map[i]) {
1600                                 dev_err(&instance->pdev->dev,
1601                                         "Could not allocate memory for local map"
1602                                         " size requested: %d\n",
1603                                         fusion->drv_map_sz);
1604                                 goto ld_drv_map_alloc_fail;
1605                         }
1606                 }
1607         }
1608 
1609         for (i = 0; i < 2; i++) {
1610                 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
1611                                                        fusion->max_map_sz,
1612                                                        &fusion->ld_map_phys[i],
1613                                                        GFP_KERNEL);
1614                 if (!fusion->ld_map[i]) {
1615                         dev_err(&instance->pdev->dev,
1616                                 "Could not allocate memory for map info %s:%d\n",
1617                                 __func__, __LINE__);
1618                         goto ld_map_alloc_fail;
1619                 }
1620         }
1621 
1622         return 0;
1623 
1624 ld_map_alloc_fail:
1625         for (i = 0; i < 2; i++) {
1626                 if (fusion->ld_map[i])
1627                         dma_free_coherent(&instance->pdev->dev,
1628                                           fusion->max_map_sz,
1629                                           fusion->ld_map[i],
1630                                           fusion->ld_map_phys[i]);
1631         }
1632 
1633 ld_drv_map_alloc_fail:
1634         for (i = 0; i < 2; i++) {
1635                 if (fusion->ld_drv_map[i]) {
1636                         if (is_vmalloc_addr(fusion->ld_drv_map[i]))
1637                                 vfree(fusion->ld_drv_map[i]);
1638                         else
1639                                 free_pages((ulong)fusion->ld_drv_map[i],
1640                                            fusion->drv_map_pages);
1641                 }
1642         }
1643 
1644         return -ENOMEM;
1645 }
1646 
1647 /**
1648  * megasas_configure_queue_sizes -      Calculate size of request desc queue,
1649  *                                      reply desc queue,
1650  *                                      IO request frame queue, set can_queue.
1651  * @instance:                           Adapter soft state
1652  * @return:                             void
1653  */
1654 static inline
1655 void megasas_configure_queue_sizes(struct megasas_instance *instance)
1656 {
1657         struct fusion_context *fusion;
1658         u16 max_cmd;
1659 
1660         fusion = instance->ctrl_context;
1661         max_cmd = instance->max_fw_cmds;
1662 
1663         if (instance->adapter_type >= VENTURA_SERIES)
1664                 instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS;
1665         else
1666                 instance->max_mpt_cmds = instance->max_fw_cmds;
1667 
1668         instance->max_scsi_cmds = instance->max_fw_cmds - instance->max_mfi_cmds;
1669         instance->cur_can_queue = instance->max_scsi_cmds;
1670         instance->host->can_queue = instance->cur_can_queue;
1671 
1672         fusion->reply_q_depth = 2 * ((max_cmd + 1 + 15) / 16) * 16;
1673 
1674         fusion->request_alloc_sz = sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *
1675                                           instance->max_mpt_cmds;
1676         fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) *
1677                                         (fusion->reply_q_depth);
1678         fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
1679                 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1680                  * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
1681 }
1682 
1683 static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance)
1684 {
1685         struct fusion_context *fusion;
1686         struct megasas_cmd *cmd;
1687 
1688         fusion = instance->ctrl_context;
1689 
1690         cmd = kzalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
1691 
1692         if (!cmd) {
1693                 dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
1694                         __func__, __LINE__);
1695                 return -ENOMEM;
1696         }
1697 
1698         cmd->frame = dma_alloc_coherent(&instance->pdev->dev,
1699                                         IOC_INIT_FRAME_SIZE,
1700                                         &cmd->frame_phys_addr, GFP_KERNEL);
1701 
1702         if (!cmd->frame) {
1703                 dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
1704                         __func__, __LINE__);
1705                 kfree(cmd);
1706                 return -ENOMEM;
1707         }
1708 
1709         fusion->ioc_init_cmd = cmd;
1710         return 0;
1711 }
1712 
1713 /**
1714  * megasas_free_ioc_init_cmd -  Free IOC INIT command frame
1715  * @instance:           Adapter soft state
1716  */
1717 static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance)
1718 {
1719         struct fusion_context *fusion;
1720 
1721         fusion = instance->ctrl_context;
1722 
1723         if (fusion->ioc_init_cmd && fusion->ioc_init_cmd->frame)
1724                 dma_free_coherent(&instance->pdev->dev,
1725                                   IOC_INIT_FRAME_SIZE,
1726                                   fusion->ioc_init_cmd->frame,
1727                                   fusion->ioc_init_cmd->frame_phys_addr);
1728 
1729         kfree(fusion->ioc_init_cmd);
1730 }
1731 
1732 /**
1733  * megasas_init_adapter_fusion -        Initializes the FW
1734  * @instance:           Adapter soft state
1735  *
1736  * This is the main function for initializing firmware.
1737  */
1738 static u32
1739 megasas_init_adapter_fusion(struct megasas_instance *instance)
1740 {
1741         struct fusion_context *fusion;
1742         u32 scratch_pad_1;
1743         int i = 0, count;
1744         u32 status_reg;
1745 
1746         fusion = instance->ctrl_context;
1747 
1748         megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
1749 
1750         /*
1751          * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
1752          */
1753         instance->max_mfi_cmds =
1754                 MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
1755 
1756         megasas_configure_queue_sizes(instance);
1757 
1758         scratch_pad_1 = megasas_readl(instance,
1759                                       &instance->reg_set->outbound_scratch_pad_1);
1760         /* If scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
1761          * Firmware support extended IO chain frame which is 4 times more than
1762          * legacy Firmware.
1763          * Legacy Firmware - Frame size is (8 * 128) = 1K
1764          * 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
1765          */
1766         if (scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
1767                 instance->max_chain_frame_sz =
1768                         ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1769                         MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO;
1770         else
1771                 instance->max_chain_frame_sz =
1772                         ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1773                         MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
1774 
1775         if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
1776                 dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n",
1777                         instance->max_chain_frame_sz,
1778                         MEGASAS_CHAIN_FRAME_SZ_MIN);
1779                 instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN;
1780         }
1781 
1782         fusion->max_sge_in_main_msg =
1783                 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1784                         - offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16;
1785 
1786         fusion->max_sge_in_chain =
1787                 instance->max_chain_frame_sz
1788                         / sizeof(union MPI2_SGE_IO_UNION);
1789 
1790         instance->max_num_sge =
1791                 rounddown_pow_of_two(fusion->max_sge_in_main_msg
1792                         + fusion->max_sge_in_chain - 2);
1793 
1794         /* Used for pass thru MFI frame (DCMD) */
1795         fusion->chain_offset_mfi_pthru =
1796                 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16;
1797 
1798         fusion->chain_offset_io_request =
1799                 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1800                  sizeof(union MPI2_SGE_IO_UNION))/16;
1801 
1802         count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
1803         for (i = 0 ; i < count; i++)
1804                 fusion->last_reply_idx[i] = 0;
1805 
1806         /*
1807          * For fusion adapters, 3 commands for IOCTL and 8 commands
1808          * for driver's internal DCMDs.
1809          */
1810         instance->max_scsi_cmds = instance->max_fw_cmds -
1811                                 (MEGASAS_FUSION_INTERNAL_CMDS +
1812                                 MEGASAS_FUSION_IOCTL_CMDS);
1813         sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
1814 
1815         if (megasas_alloc_ioc_init_frame(instance))
1816                 return 1;
1817 
1818         /*
1819          * Allocate memory for descriptors
1820          * Create a pool of commands
1821          */
1822         if (megasas_alloc_cmds(instance))
1823                 goto fail_alloc_mfi_cmds;
1824         if (megasas_alloc_cmds_fusion(instance))
1825                 goto fail_alloc_cmds;
1826 
1827         if (megasas_ioc_init_fusion(instance)) {
1828                 status_reg = instance->instancet->read_fw_status_reg(instance);
1829                 if (((status_reg & MFI_STATE_MASK) == MFI_STATE_FAULT) &&
1830                     (status_reg & MFI_RESET_ADAPTER)) {
1831                         /* Do a chip reset and then retry IOC INIT once */
1832                         if (megasas_adp_reset_wait_for_ready
1833                                 (instance, true, 0) == FAILED)
1834                                 goto fail_ioc_init;
1835 
1836                         if (megasas_ioc_init_fusion(instance))
1837                                 goto fail_ioc_init;
1838                 } else {
1839                         goto fail_ioc_init;
1840                 }
1841         }
1842 
1843         megasas_display_intel_branding(instance);
1844         if (megasas_get_ctrl_info(instance)) {
1845                 dev_err(&instance->pdev->dev,
1846                         "Could not get controller info. Fail from %s %d\n",
1847                         __func__, __LINE__);
1848                 goto fail_ioc_init;
1849         }
1850 
1851         instance->flag_ieee = 1;
1852         instance->r1_ldio_hint_default =  MR_R1_LDIO_PIGGYBACK_DEFAULT;
1853         instance->threshold_reply_count = instance->max_fw_cmds / 4;
1854         fusion->fast_path_io = 0;
1855 
1856         if (megasas_allocate_raid_maps(instance))
1857                 goto fail_ioc_init;
1858 
1859         if (!megasas_get_map_info(instance))
1860                 megasas_sync_map_info(instance);
1861 
1862         return 0;
1863 
1864 fail_ioc_init:
1865         megasas_free_cmds_fusion(instance);
1866 fail_alloc_cmds:
1867         megasas_free_cmds(instance);
1868 fail_alloc_mfi_cmds:
1869         megasas_free_ioc_init_cmd(instance);
1870         return 1;
1871 }
1872 
1873 /**
1874  * megasas_fault_detect_work    -       Worker function of
1875  *                                      FW fault handling workqueue.
1876  */
1877 static void
1878 megasas_fault_detect_work(struct work_struct *work)
1879 {
1880         struct megasas_instance *instance =
1881                 container_of(work, struct megasas_instance,
1882                              fw_fault_work.work);
1883         u32 fw_state, dma_state, status;
1884 
1885         /* Check the fw state */
1886         fw_state = instance->instancet->read_fw_status_reg(instance) &
1887                         MFI_STATE_MASK;
1888 
1889         if (fw_state == MFI_STATE_FAULT) {
1890                 dma_state = instance->instancet->read_fw_status_reg(instance) &
1891                                 MFI_STATE_DMADONE;
1892                 /* Start collecting crash, if DMA bit is done */
1893                 if (instance->crash_dump_drv_support &&
1894                     instance->crash_dump_app_support && dma_state) {
1895                         megasas_fusion_crash_dump(instance);
1896                 } else {
1897                         if (instance->unload == 0) {
1898                                 status = megasas_reset_fusion(instance->host, 0);
1899                                 if (status != SUCCESS) {
1900                                         dev_err(&instance->pdev->dev,
1901                                                 "Failed from %s %d, do not re-arm timer\n",
1902                                                 __func__, __LINE__);
1903                                         return;
1904                                 }
1905                         }
1906                 }
1907         }
1908 
1909         if (instance->fw_fault_work_q)
1910                 queue_delayed_work(instance->fw_fault_work_q,
1911                         &instance->fw_fault_work,
1912                         msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL));
1913 }
1914 
1915 int
1916 megasas_fusion_start_watchdog(struct megasas_instance *instance)
1917 {
1918         /* Check if the Fault WQ is already started */
1919         if (instance->fw_fault_work_q)
1920                 return SUCCESS;
1921 
1922         INIT_DELAYED_WORK(&instance->fw_fault_work, megasas_fault_detect_work);
1923 
1924         snprintf(instance->fault_handler_work_q_name,
1925                  sizeof(instance->fault_handler_work_q_name),
1926                  "poll_megasas%d_status", instance->host->host_no);
1927 
1928         instance->fw_fault_work_q =
1929                 create_singlethread_workqueue(instance->fault_handler_work_q_name);
1930         if (!instance->fw_fault_work_q) {
1931                 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1932                         __func__, __LINE__);
1933                 return FAILED;
1934         }
1935 
1936         queue_delayed_work(instance->fw_fault_work_q,
1937                            &instance->fw_fault_work,
1938                            msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL));
1939 
1940         return SUCCESS;
1941 }
1942 
1943 void
1944 megasas_fusion_stop_watchdog(struct megasas_instance *instance)
1945 {
1946         struct workqueue_struct *wq;
1947 
1948         if (instance->fw_fault_work_q) {
1949                 wq = instance->fw_fault_work_q;
1950                 instance->fw_fault_work_q = NULL;
1951                 if (!cancel_delayed_work_sync(&instance->fw_fault_work))
1952                         flush_workqueue(wq);
1953                 destroy_workqueue(wq);
1954         }
1955 }
1956 
1957 /**
1958  * map_cmd_status -     Maps FW cmd status to OS cmd status
1959  * @cmd :               Pointer to cmd
1960  * @status :            status of cmd returned by FW
1961  * @ext_status :        ext status of cmd returned by FW
1962  */
1963 
1964 static void
1965 map_cmd_status(struct fusion_context *fusion,
1966                 struct scsi_cmnd *scmd, u8 status, u8 ext_status,
1967                 u32 data_length, u8 *sense)
1968 {
1969         u8 cmd_type;
1970         int resid;
1971 
1972         cmd_type = megasas_cmd_type(scmd);
1973         switch (status) {
1974 
1975         case MFI_STAT_OK:
1976                 scmd->result = DID_OK << 16;
1977                 break;
1978 
1979         case MFI_STAT_SCSI_IO_FAILED:
1980         case MFI_STAT_LD_INIT_IN_PROGRESS:
1981                 scmd->result = (DID_ERROR << 16) | ext_status;
1982                 break;
1983 
1984         case MFI_STAT_SCSI_DONE_WITH_ERROR:
1985 
1986                 scmd->result = (DID_OK << 16) | ext_status;
1987                 if (ext_status == SAM_STAT_CHECK_CONDITION) {
1988                         memset(scmd->sense_buffer, 0,
1989                                SCSI_SENSE_BUFFERSIZE);
1990                         memcpy(scmd->sense_buffer, sense,
1991                                SCSI_SENSE_BUFFERSIZE);
1992                         scmd->result |= DRIVER_SENSE << 24;
1993                 }
1994 
1995                 /*
1996                  * If the  IO request is partially completed, then MR FW will
1997                  * update "io_request->DataLength" field with actual number of
1998                  * bytes transferred.Driver will set residual bytes count in
1999                  * SCSI command structure.
2000                  */
2001                 resid = (scsi_bufflen(scmd) - data_length);
2002                 scsi_set_resid(scmd, resid);
2003 
2004                 if (resid &&
2005                         ((cmd_type == READ_WRITE_LDIO) ||
2006                         (cmd_type == READ_WRITE_SYSPDIO)))
2007                         scmd_printk(KERN_INFO, scmd, "BRCM Debug mfi stat 0x%x, data len"
2008                                 " requested/completed 0x%x/0x%x\n",
2009                                 status, scsi_bufflen(scmd), data_length);
2010                 break;
2011 
2012         case MFI_STAT_LD_OFFLINE:
2013         case MFI_STAT_DEVICE_NOT_FOUND:
2014                 scmd->result = DID_BAD_TARGET << 16;
2015                 break;
2016         case MFI_STAT_CONFIG_SEQ_MISMATCH:
2017                 scmd->result = DID_IMM_RETRY << 16;
2018                 break;
2019         default:
2020                 scmd->result = DID_ERROR << 16;
2021                 break;
2022         }
2023 }
2024 
2025 /**
2026  * megasas_is_prp_possible -
2027  * Checks if native NVMe PRPs can be built for the IO
2028  *
2029  * @instance:           Adapter soft state
2030  * @scmd:               SCSI command from the mid-layer
2031  * @sge_count:          scatter gather element count.
2032  *
2033  * Returns:             true: PRPs can be built
2034  *                      false: IEEE SGLs needs to be built
2035  */
2036 static bool
2037 megasas_is_prp_possible(struct megasas_instance *instance,
2038                         struct scsi_cmnd *scmd, int sge_count)
2039 {
2040         int i;
2041         u32 data_length = 0;
2042         struct scatterlist *sg_scmd;
2043         bool build_prp = false;
2044         u32 mr_nvme_pg_size;
2045 
2046         mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
2047                                 MR_DEFAULT_NVME_PAGE_SIZE);
2048         data_length = scsi_bufflen(scmd);
2049         sg_scmd = scsi_sglist(scmd);
2050 
2051         /*
2052          * NVMe uses one PRP for each page (or part of a page)
2053          * look at the data length - if 4 pages or less then IEEE is OK
2054          * if  > 5 pages then we need to build a native SGL
2055          * if > 4 and <= 5 pages, then check physical address of 1st SG entry
2056          * if this first size in the page is >= the residual beyond 4 pages
2057          * then use IEEE, otherwise use native SGL
2058          */
2059 
2060         if (data_length > (mr_nvme_pg_size * 5)) {
2061                 build_prp = true;
2062         } else if ((data_length > (mr_nvme_pg_size * 4)) &&
2063                         (data_length <= (mr_nvme_pg_size * 5)))  {
2064                 /* check if 1st SG entry size is < residual beyond 4 pages */
2065                 if (sg_dma_len(sg_scmd) < (data_length - (mr_nvme_pg_size * 4)))
2066                         build_prp = true;
2067         }
2068 
2069 /*
2070  * Below code detects gaps/holes in IO data buffers.
2071  * What does holes/gaps mean?
2072  * Any SGE except first one in a SGL starts at non NVME page size
2073  * aligned address OR Any SGE except last one in a SGL ends at
2074  * non NVME page size boundary.
2075  *
2076  * Driver has already informed block layer by setting boundary rules for
2077  * bio merging done at NVME page size boundary calling kernel API
2078  * blk_queue_virt_boundary inside slave_config.
2079  * Still there is possibility of IO coming with holes to driver because of
2080  * IO merging done by IO scheduler.
2081  *
2082  * With SCSI BLK MQ enabled, there will be no IO with holes as there is no
2083  * IO scheduling so no IO merging.
2084  *
2085  * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and
2086  * then sending IOs with holes.
2087  *
2088  * Though driver can request block layer to disable IO merging by calling-
2089  * blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
2090  * user may tune sysfs parameter- nomerges again to 0 or 1.
2091  *
2092  * If in future IO scheduling is enabled with SCSI BLK MQ,
2093  * this algorithm to detect holes will be required in driver
2094  * for SCSI BLK MQ enabled case as well.
2095  *
2096  *
2097  */
2098         scsi_for_each_sg(scmd, sg_scmd, sge_count, i) {
2099                 if ((i != 0) && (i != (sge_count - 1))) {
2100                         if (mega_mod64(sg_dma_len(sg_scmd), mr_nvme_pg_size) ||
2101                             mega_mod64(sg_dma_address(sg_scmd),
2102                                        mr_nvme_pg_size)) {
2103                                 build_prp = false;
2104                                 break;
2105                         }
2106                 }
2107 
2108                 if ((sge_count > 1) && (i == 0)) {
2109                         if ((mega_mod64((sg_dma_address(sg_scmd) +
2110                                         sg_dma_len(sg_scmd)),
2111                                         mr_nvme_pg_size))) {
2112                                 build_prp = false;
2113                                 break;
2114                         }
2115                 }
2116 
2117                 if ((sge_count > 1) && (i == (sge_count - 1))) {
2118                         if (mega_mod64(sg_dma_address(sg_scmd),
2119                                        mr_nvme_pg_size)) {
2120                                 build_prp = false;
2121                                 break;
2122                         }
2123                 }
2124         }
2125 
2126         return build_prp;
2127 }
2128 
2129 /**
2130  * megasas_make_prp_nvme -
2131  * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
2132  *
2133  * @instance:           Adapter soft state
2134  * @scmd:               SCSI command from the mid-layer
2135  * @sgl_ptr:            SGL to be filled in
2136  * @cmd:                Fusion command frame
2137  * @sge_count:          scatter gather element count.
2138  *
2139  * Returns:             true: PRPs are built
2140  *                      false: IEEE SGLs needs to be built
2141  */
2142 static bool
2143 megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd,
2144                       struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
2145                       struct megasas_cmd_fusion *cmd, int sge_count)
2146 {
2147         int sge_len, offset, num_prp_in_chain = 0;
2148         struct MPI25_IEEE_SGE_CHAIN64 *main_chain_element, *ptr_first_sgl;
2149         u64 *ptr_sgl;
2150         dma_addr_t ptr_sgl_phys;
2151         u64 sge_addr;
2152         u32 page_mask, page_mask_result;
2153         struct scatterlist *sg_scmd;
2154         u32 first_prp_len;
2155         bool build_prp = false;
2156         int data_len = scsi_bufflen(scmd);
2157         u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
2158                                         MR_DEFAULT_NVME_PAGE_SIZE);
2159 
2160         build_prp = megasas_is_prp_possible(instance, scmd, sge_count);
2161 
2162         if (!build_prp)
2163                 return false;
2164 
2165         /*
2166          * Nvme has a very convoluted prp format.  One prp is required
2167          * for each page or partial page. Driver need to split up OS sg_list
2168          * entries if it is longer than one page or cross a page
2169          * boundary.  Driver also have to insert a PRP list pointer entry as
2170          * the last entry in each physical page of the PRP list.
2171          *
2172          * NOTE: The first PRP "entry" is actually placed in the first
2173          * SGL entry in the main message as IEEE 64 format.  The 2nd
2174          * entry in the main message is the chain element, and the rest
2175          * of the PRP entries are built in the contiguous pcie buffer.
2176          */
2177         page_mask = mr_nvme_pg_size - 1;
2178         ptr_sgl = (u64 *)cmd->sg_frame;
2179         ptr_sgl_phys = cmd->sg_frame_phys_addr;
2180         memset(ptr_sgl, 0, instance->max_chain_frame_sz);
2181 
2182         /* Build chain frame element which holds all prps except first*/
2183         main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *)
2184             ((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64));
2185 
2186         main_chain_element->Address = cpu_to_le64(ptr_sgl_phys);
2187         main_chain_element->NextChainOffset = 0;
2188         main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2189                                         IEEE_SGE_FLAGS_SYSTEM_ADDR |
2190                                         MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2191 
2192         /* Build first prp, sge need not to be page aligned*/
2193         ptr_first_sgl = sgl_ptr;
2194         sg_scmd = scsi_sglist(scmd);
2195         sge_addr = sg_dma_address(sg_scmd);
2196         sge_len = sg_dma_len(sg_scmd);
2197 
2198         offset = (u32)(sge_addr & page_mask);
2199         first_prp_len = mr_nvme_pg_size - offset;
2200 
2201         ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2202         ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2203 
2204         data_len -= first_prp_len;
2205 
2206         if (sge_len > first_prp_len) {
2207                 sge_addr += first_prp_len;
2208                 sge_len -= first_prp_len;
2209         } else if (sge_len == first_prp_len) {
2210                 sg_scmd = sg_next(sg_scmd);
2211                 sge_addr = sg_dma_address(sg_scmd);
2212                 sge_len = sg_dma_len(sg_scmd);
2213         }
2214 
2215         for (;;) {
2216                 offset = (u32)(sge_addr & page_mask);
2217 
2218                 /* Put PRP pointer due to page boundary*/
2219                 page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask;
2220                 if (unlikely(!page_mask_result)) {
2221                         scmd_printk(KERN_NOTICE,
2222                                     scmd, "page boundary ptr_sgl: 0x%p\n",
2223                                     ptr_sgl);
2224                         ptr_sgl_phys += 8;
2225                         *ptr_sgl = cpu_to_le64(ptr_sgl_phys);
2226                         ptr_sgl++;
2227                         num_prp_in_chain++;
2228                 }
2229 
2230                 *ptr_sgl = cpu_to_le64(sge_addr);
2231                 ptr_sgl++;
2232                 ptr_sgl_phys += 8;
2233                 num_prp_in_chain++;
2234 
2235                 sge_addr += mr_nvme_pg_size;
2236                 sge_len -= mr_nvme_pg_size;
2237                 data_len -= mr_nvme_pg_size;
2238 
2239                 if (data_len <= 0)
2240                         break;
2241 
2242                 if (sge_len > 0)
2243                         continue;
2244 
2245                 sg_scmd = sg_next(sg_scmd);
2246                 sge_addr = sg_dma_address(sg_scmd);
2247                 sge_len = sg_dma_len(sg_scmd);
2248         }
2249 
2250         main_chain_element->Length =
2251                         cpu_to_le32(num_prp_in_chain * sizeof(u64));
2252 
2253         return build_prp;
2254 }
2255 
2256 /**
2257  * megasas_make_sgl_fusion -    Prepares 32-bit SGL
2258  * @instance:           Adapter soft state
2259  * @scp:                SCSI command from the mid-layer
2260  * @sgl_ptr:            SGL to be filled in
2261  * @cmd:                cmd we are working on
2262  * @sge_count           sge count
2263  *
2264  */
2265 static void
2266 megasas_make_sgl_fusion(struct megasas_instance *instance,
2267                         struct scsi_cmnd *scp,
2268                         struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
2269                         struct megasas_cmd_fusion *cmd, int sge_count)
2270 {
2271         int i, sg_processed;
2272         struct scatterlist *os_sgl;
2273         struct fusion_context *fusion;
2274 
2275         fusion = instance->ctrl_context;
2276 
2277         if (instance->adapter_type >= INVADER_SERIES) {
2278                 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
2279                 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
2280                 sgl_ptr_end->Flags = 0;
2281         }
2282 
2283         scsi_for_each_sg(scp, os_sgl, sge_count, i) {
2284                 sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
2285                 sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
2286                 sgl_ptr->Flags = 0;
2287                 if (instance->adapter_type >= INVADER_SERIES)
2288                         if (i == sge_count - 1)
2289                                 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
2290                 sgl_ptr++;
2291                 sg_processed = i + 1;
2292 
2293                 if ((sg_processed ==  (fusion->max_sge_in_main_msg - 1)) &&
2294                     (sge_count > fusion->max_sge_in_main_msg)) {
2295 
2296                         struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
2297                         if (instance->adapter_type >= INVADER_SERIES) {
2298                                 if ((le16_to_cpu(cmd->io_request->IoFlags) &
2299                                         MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
2300                                         MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
2301                                         cmd->io_request->ChainOffset =
2302                                                 fusion->
2303                                                 chain_offset_io_request;
2304                                 else
2305                                         cmd->io_request->ChainOffset = 0;
2306                         } else
2307                                 cmd->io_request->ChainOffset =
2308                                         fusion->chain_offset_io_request;
2309 
2310                         sg_chain = sgl_ptr;
2311                         /* Prepare chain element */
2312                         sg_chain->NextChainOffset = 0;
2313                         if (instance->adapter_type >= INVADER_SERIES)
2314                                 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
2315                         else
2316                                 sg_chain->Flags =
2317                                         (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2318                                          MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
2319                         sg_chain->Length =  cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
2320                         sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
2321 
2322                         sgl_ptr =
2323                           (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
2324                         memset(sgl_ptr, 0, instance->max_chain_frame_sz);
2325                 }
2326         }
2327 }
2328 
2329 /**
2330  * megasas_make_sgl -   Build Scatter Gather List(SGLs)
2331  * @scp:                SCSI command pointer
2332  * @instance:           Soft instance of controller
2333  * @cmd:                Fusion command pointer
2334  *
2335  * This function will build sgls based on device type.
2336  * For nvme drives, there is different way of building sgls in nvme native
2337  * format- PRPs(Physical Region Page).
2338  *
2339  * Returns the number of sg lists actually used, zero if the sg lists
2340  * is NULL, or -ENOMEM if the mapping failed
2341  */
2342 static
2343 int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp,
2344                      struct megasas_cmd_fusion *cmd)
2345 {
2346         int sge_count;
2347         bool build_prp = false;
2348         struct MPI25_IEEE_SGE_CHAIN64 *sgl_chain64;
2349 
2350         sge_count = scsi_dma_map(scp);
2351 
2352         if ((sge_count > instance->max_num_sge) || (sge_count <= 0))
2353                 return sge_count;
2354 
2355         sgl_chain64 = (struct MPI25_IEEE_SGE_CHAIN64 *)&cmd->io_request->SGL;
2356         if ((le16_to_cpu(cmd->io_request->IoFlags) &
2357             MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) &&
2358             (cmd->pd_interface == NVME_PD))
2359                 build_prp = megasas_make_prp_nvme(instance, scp, sgl_chain64,
2360                                                   cmd, sge_count);
2361 
2362         if (!build_prp)
2363                 megasas_make_sgl_fusion(instance, scp, sgl_chain64,
2364                                         cmd, sge_count);
2365 
2366         return sge_count;
2367 }
2368 
2369 /**
2370  * megasas_set_pd_lba - Sets PD LBA
2371  * @cdb:                CDB
2372  * @cdb_len:            cdb length
2373  * @start_blk:          Start block of IO
2374  *
2375  * Used to set the PD LBA in CDB for FP IOs
2376  */
2377 static void
2378 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
2379                    struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
2380                    struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
2381 {
2382         struct MR_LD_RAID *raid;
2383         u16 ld;
2384         u64 start_blk = io_info->pdBlock;
2385         u8 *cdb = io_request->CDB.CDB32;
2386         u32 num_blocks = io_info->numBlocks;
2387         u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
2388 
2389         /* Check if T10 PI (DIF) is enabled for this LD */
2390         ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
2391         raid = MR_LdRaidGet(ld, local_map_ptr);
2392         if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
2393                 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
2394                 cdb[0] =  MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
2395                 cdb[7] =  MEGASAS_SCSI_ADDL_CDB_LEN;
2396 
2397                 if (scp->sc_data_direction == DMA_FROM_DEVICE)
2398                         cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
2399                 else
2400                         cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
2401                 cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL;
2402 
2403                 /* LBA */
2404                 cdb[12] = (u8)((start_blk >> 56) & 0xff);
2405                 cdb[13] = (u8)((start_blk >> 48) & 0xff);
2406                 cdb[14] = (u8)((start_blk >> 40) & 0xff);
2407                 cdb[15] = (u8)((start_blk >> 32) & 0xff);
2408                 cdb[16] = (u8)((start_blk >> 24) & 0xff);
2409                 cdb[17] = (u8)((start_blk >> 16) & 0xff);
2410                 cdb[18] = (u8)((start_blk >> 8) & 0xff);
2411                 cdb[19] = (u8)(start_blk & 0xff);
2412 
2413                 /* Logical block reference tag */
2414                 io_request->CDB.EEDP32.PrimaryReferenceTag =
2415                         cpu_to_be32(ref_tag);
2416                 io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff);
2417                 io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
2418 
2419                 /* Transfer length */
2420                 cdb[28] = (u8)((num_blocks >> 24) & 0xff);
2421                 cdb[29] = (u8)((num_blocks >> 16) & 0xff);
2422                 cdb[30] = (u8)((num_blocks >> 8) & 0xff);
2423                 cdb[31] = (u8)(num_blocks & 0xff);
2424 
2425                 /* set SCSI IO EEDPFlags */
2426                 if (scp->sc_data_direction == DMA_FROM_DEVICE) {
2427                         io_request->EEDPFlags = cpu_to_le16(
2428                                 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG  |
2429                                 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2430                                 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
2431                                 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
2432                                 MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE |
2433                                 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2434                 } else {
2435                         io_request->EEDPFlags = cpu_to_le16(
2436                                 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2437                                 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
2438                 }
2439                 io_request->Control |= cpu_to_le32((0x4 << 26));
2440                 io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
2441         } else {
2442                 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
2443                 if (((cdb_len == 12) || (cdb_len == 16)) &&
2444                     (start_blk <= 0xffffffff)) {
2445                         if (cdb_len == 16) {
2446                                 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
2447                                 flagvals = cdb[1];
2448                                 groupnum = cdb[14];
2449                                 control = cdb[15];
2450                         } else {
2451                                 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
2452                                 flagvals = cdb[1];
2453                                 groupnum = cdb[10];
2454                                 control = cdb[11];
2455                         }
2456 
2457                         memset(cdb, 0, sizeof(io_request->CDB.CDB32));
2458 
2459                         cdb[0] = opcode;
2460                         cdb[1] = flagvals;
2461                         cdb[6] = groupnum;
2462                         cdb[9] = control;
2463 
2464                         /* Transfer length */
2465                         cdb[8] = (u8)(num_blocks & 0xff);
2466                         cdb[7] = (u8)((num_blocks >> 8) & 0xff);
2467 
2468                         io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
2469                         cdb_len = 10;
2470                 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
2471                         /* Convert to 16 byte CDB for large LBA's */
2472                         switch (cdb_len) {
2473                         case 6:
2474                                 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
2475                                 control = cdb[5];
2476                                 break;
2477                         case 10:
2478                                 opcode =
2479                                         cdb[0] == READ_10 ? READ_16 : WRITE_16;
2480                                 flagvals = cdb[1];
2481                                 groupnum = cdb[6];
2482                                 control = cdb[9];
2483                                 break;
2484                         case 12:
2485                                 opcode =
2486                                         cdb[0] == READ_12 ? READ_16 : WRITE_16;
2487                                 flagvals = cdb[1];
2488                                 groupnum = cdb[10];
2489                                 control = cdb[11];
2490                                 break;
2491                         }
2492 
2493                         memset(cdb, 0, sizeof(io_request->CDB.CDB32));
2494 
2495                         cdb[0] = opcode;
2496                         cdb[1] = flagvals;
2497                         cdb[14] = groupnum;
2498                         cdb[15] = control;
2499 
2500                         /* Transfer length */
2501                         cdb[13] = (u8)(num_blocks & 0xff);
2502                         cdb[12] = (u8)((num_blocks >> 8) & 0xff);
2503                         cdb[11] = (u8)((num_blocks >> 16) & 0xff);
2504                         cdb[10] = (u8)((num_blocks >> 24) & 0xff);
2505 
2506                         io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
2507                         cdb_len = 16;
2508                 }
2509 
2510                 /* Normal case, just load LBA here */
2511                 switch (cdb_len) {
2512                 case 6:
2513                 {
2514                         u8 val = cdb[1] & 0xE0;
2515                         cdb[3] = (u8)(start_blk & 0xff);
2516                         cdb[2] = (u8)((start_blk >> 8) & 0xff);
2517                         cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f);
2518                         break;
2519                 }
2520                 case 10:
2521                         cdb[5] = (u8)(start_blk & 0xff);
2522                         cdb[4] = (u8)((start_blk >> 8) & 0xff);
2523                         cdb[3] = (u8)((start_blk >> 16) & 0xff);
2524                         cdb[2] = (u8)((start_blk >> 24) & 0xff);
2525                         break;
2526                 case 12:
2527                         cdb[5]    = (u8)(start_blk & 0xff);
2528                         cdb[4]    = (u8)((start_blk >> 8) & 0xff);
2529                         cdb[3]    = (u8)((start_blk >> 16) & 0xff);
2530                         cdb[2]    = (u8)((start_blk >> 24) & 0xff);
2531                         break;
2532                 case 16:
2533                         cdb[9]    = (u8)(start_blk & 0xff);
2534                         cdb[8]    = (u8)((start_blk >> 8) & 0xff);
2535                         cdb[7]    = (u8)((start_blk >> 16) & 0xff);
2536                         cdb[6]    = (u8)((start_blk >> 24) & 0xff);
2537                         cdb[5]    = (u8)((start_blk >> 32) & 0xff);
2538                         cdb[4]    = (u8)((start_blk >> 40) & 0xff);
2539                         cdb[3]    = (u8)((start_blk >> 48) & 0xff);
2540                         cdb[2]    = (u8)((start_blk >> 56) & 0xff);
2541                         break;
2542                 }
2543         }
2544 }
2545 
2546 /**
2547  * megasas_stream_detect -      stream detection on read and and write IOs
2548  * @instance:           Adapter soft state
2549  * @cmd:                    Command to be prepared
2550  * @io_info:            IO Request info
2551  *
2552  */
2553 
2554 /** stream detection on read and and write IOs */
2555 static void megasas_stream_detect(struct megasas_instance *instance,
2556                                   struct megasas_cmd_fusion *cmd,
2557                                   struct IO_REQUEST_INFO *io_info)
2558 {
2559         struct fusion_context *fusion = instance->ctrl_context;
2560         u32 device_id = io_info->ldTgtId;
2561         struct LD_STREAM_DETECT *current_ld_sd
2562                 = fusion->stream_detect_by_ld[device_id];
2563         u32 *track_stream = &current_ld_sd->mru_bit_map, stream_num;
2564         u32 shifted_values, unshifted_values;
2565         u32 index_value_mask, shifted_values_mask;
2566         int i;
2567         bool is_read_ahead = false;
2568         struct STREAM_DETECT *current_sd;
2569         /* find possible stream */
2570         for (i = 0; i < MAX_STREAMS_TRACKED; ++i) {
2571                 stream_num = (*track_stream >>
2572                         (i * BITS_PER_INDEX_STREAM)) &
2573                         STREAM_MASK;
2574                 current_sd = &current_ld_sd->stream_track[stream_num];
2575                 /* if we found a stream, update the raid
2576                  *  context and also update the mruBitMap
2577                  */
2578                 /*      boundary condition */
2579                 if ((current_sd->next_seq_lba) &&
2580                     (io_info->ldStartBlock >= current_sd->next_seq_lba) &&
2581                     (io_info->ldStartBlock <= (current_sd->next_seq_lba + 32)) &&
2582                     (current_sd->is_read == io_info->isRead)) {
2583 
2584                         if ((io_info->ldStartBlock != current_sd->next_seq_lba) &&
2585                             ((!io_info->isRead) || (!is_read_ahead)))
2586                                 /*
2587                                  * Once the API availible we need to change this.
2588                                  * At this point we are not allowing any gap
2589                                  */
2590                                 continue;
2591 
2592                         SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35);
2593                         current_sd->next_seq_lba =
2594                         io_info->ldStartBlock + io_info->numBlocks;
2595                         /*
2596                          *      update the mruBitMap LRU
2597                          */
2598                         shifted_values_mask =
2599                                 (1 <<  i * BITS_PER_INDEX_STREAM) - 1;
2600                         shifted_values = ((*track_stream & shifted_values_mask)
2601                                                 << BITS_PER_INDEX_STREAM);
2602                         index_value_mask =
2603                                 STREAM_MASK << i * BITS_PER_INDEX_STREAM;
2604                         unshifted_values =
2605                                 *track_stream & ~(shifted_values_mask |
2606                                 index_value_mask);
2607                         *track_stream =
2608                                 unshifted_values | shifted_values | stream_num;
2609                         return;
2610                 }
2611         }
2612         /*
2613          * if we did not find any stream, create a new one
2614          * from the least recently used
2615          */
2616         stream_num = (*track_stream >>
2617                 ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) &
2618                 STREAM_MASK;
2619         current_sd = &current_ld_sd->stream_track[stream_num];
2620         current_sd->is_read = io_info->isRead;
2621         current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks;
2622         *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num);
2623         return;
2624 }
2625 
2626 /**
2627  * megasas_set_raidflag_cpu_affinity - This function sets the cpu
2628  * affinity (cpu of the controller) and raid_flags in the raid context
2629  * based on IO type.
2630  *
2631  * @praid_context:      IO RAID context
2632  * @raid:               LD raid map
2633  * @fp_possible:        Is fast path possible?
2634  * @is_read:            Is read IO?
2635  *
2636  */
2637 static void
2638 megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion,
2639                                 union RAID_CONTEXT_UNION *praid_context,
2640                                 struct MR_LD_RAID *raid, bool fp_possible,
2641                                 u8 is_read, u32 scsi_buff_len)
2642 {
2643         u8 cpu_sel = MR_RAID_CTX_CPUSEL_0;
2644         struct RAID_CONTEXT_G35 *rctx_g35;
2645 
2646         rctx_g35 = &praid_context->raid_context_g35;
2647         if (fp_possible) {
2648                 if (is_read) {
2649                         if ((raid->cpuAffinity.pdRead.cpu0) &&
2650                             (raid->cpuAffinity.pdRead.cpu1))
2651                                 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2652                         else if (raid->cpuAffinity.pdRead.cpu1)
2653                                 cpu_sel = MR_RAID_CTX_CPUSEL_1;
2654                 } else {
2655                         if ((raid->cpuAffinity.pdWrite.cpu0) &&
2656                             (raid->cpuAffinity.pdWrite.cpu1))
2657                                 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2658                         else if (raid->cpuAffinity.pdWrite.cpu1)
2659                                 cpu_sel = MR_RAID_CTX_CPUSEL_1;
2660                         /* Fast path cache by pass capable R0/R1 VD */
2661                         if ((raid->level <= 1) &&
2662                             (raid->capability.fp_cache_bypass_capable)) {
2663                                 rctx_g35->routing_flags |=
2664                                         (1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT);
2665                                 rctx_g35->raid_flags =
2666                                         (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
2667                                         << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
2668                         }
2669                 }
2670         } else {
2671                 if (is_read) {
2672                         if ((raid->cpuAffinity.ldRead.cpu0) &&
2673                             (raid->cpuAffinity.ldRead.cpu1))
2674                                 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2675                         else if (raid->cpuAffinity.ldRead.cpu1)
2676                                 cpu_sel = MR_RAID_CTX_CPUSEL_1;
2677                 } else {
2678                         if ((raid->cpuAffinity.ldWrite.cpu0) &&
2679                             (raid->cpuAffinity.ldWrite.cpu1))
2680                                 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2681                         else if (raid->cpuAffinity.ldWrite.cpu1)
2682                                 cpu_sel = MR_RAID_CTX_CPUSEL_1;
2683 
2684                         if (is_stream_detected(rctx_g35) &&
2685                             ((raid->level == 5) || (raid->level == 6)) &&
2686                             (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
2687                             (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS))
2688                                 cpu_sel = MR_RAID_CTX_CPUSEL_0;
2689                 }
2690         }
2691 
2692         rctx_g35->routing_flags |=
2693                 (cpu_sel << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
2694 
2695         /* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
2696          * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS.
2697          * IO Subtype is not bitmap.
2698          */
2699         if ((fusion->pcie_bw_limitation) && (raid->level == 1) && (!is_read) &&
2700                         (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)) {
2701                 praid_context->raid_context_g35.raid_flags =
2702                         (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
2703                         << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
2704         }
2705 }
2706 
2707 /**
2708  * megasas_build_ldio_fusion -  Prepares IOs to devices
2709  * @instance:           Adapter soft state
2710  * @scp:                SCSI command
2711  * @cmd:                Command to be prepared
2712  *
2713  * Prepares the io_request and chain elements (sg_frame) for IO
2714  * The IO can be for PD (Fast Path) or LD
2715  */
2716 static void
2717 megasas_build_ldio_fusion(struct megasas_instance *instance,
2718                           struct scsi_cmnd *scp,
2719                           struct megasas_cmd_fusion *cmd)
2720 {
2721         bool fp_possible;
2722         u16 ld;
2723         u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
2724         u32 scsi_buff_len;
2725         struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
2726         struct IO_REQUEST_INFO io_info;
2727         struct fusion_context *fusion;
2728         struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
2729         u8 *raidLUN;
2730         unsigned long spinlock_flags;
2731         struct MR_LD_RAID *raid = NULL;
2732         struct MR_PRIV_DEVICE *mrdev_priv;
2733         struct RAID_CONTEXT *rctx;
2734         struct RAID_CONTEXT_G35 *rctx_g35;
2735 
2736         device_id = MEGASAS_DEV_INDEX(scp);
2737 
2738         fusion = instance->ctrl_context;
2739 
2740         io_request = cmd->io_request;
2741         rctx = &io_request->RaidContext.raid_context;
2742         rctx_g35 = &io_request->RaidContext.raid_context_g35;
2743 
2744         rctx->virtual_disk_tgt_id = cpu_to_le16(device_id);
2745         rctx->status = 0;
2746         rctx->ex_status = 0;
2747 
2748         start_lba_lo = 0;
2749         start_lba_hi = 0;
2750         fp_possible = false;
2751 
2752         /*
2753          * 6-byte READ(0x08) or WRITE(0x0A) cdb
2754          */
2755         if (scp->cmd_len == 6) {
2756                 datalength = (u32) scp->cmnd[4];
2757                 start_lba_lo = ((u32) scp->cmnd[1] << 16) |
2758                         ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
2759 
2760                 start_lba_lo &= 0x1FFFFF;
2761         }
2762 
2763         /*
2764          * 10-byte READ(0x28) or WRITE(0x2A) cdb
2765          */
2766         else if (scp->cmd_len == 10) {
2767                 datalength = (u32) scp->cmnd[8] |
2768                         ((u32) scp->cmnd[7] << 8);
2769                 start_lba_lo = ((u32) scp->cmnd[2] << 24) |
2770                         ((u32) scp->cmnd[3] << 16) |
2771                         ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
2772         }
2773 
2774         /*
2775          * 12-byte READ(0xA8) or WRITE(0xAA) cdb
2776          */
2777         else if (scp->cmd_len == 12) {
2778                 datalength = ((u32) scp->cmnd[6] << 24) |
2779                         ((u32) scp->cmnd[7] << 16) |
2780                         ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
2781                 start_lba_lo = ((u32) scp->cmnd[2] << 24) |
2782                         ((u32) scp->cmnd[3] << 16) |
2783                         ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
2784         }
2785 
2786         /*
2787          * 16-byte READ(0x88) or WRITE(0x8A) cdb
2788          */
2789         else if (scp->cmd_len == 16) {
2790                 datalength = ((u32) scp->cmnd[10] << 24) |
2791                         ((u32) scp->cmnd[11] << 16) |
2792                         ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
2793                 start_lba_lo = ((u32) scp->cmnd[6] << 24) |
2794                         ((u32) scp->cmnd[7] << 16) |
2795                         ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
2796 
2797                 start_lba_hi = ((u32) scp->cmnd[2] << 24) |
2798                         ((u32) scp->cmnd[3] << 16) |
2799                         ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
2800         }
2801 
2802         memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
2803         io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
2804         io_info.numBlocks = datalength;
2805         io_info.ldTgtId = device_id;
2806         io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2807         scsi_buff_len = scsi_bufflen(scp);
2808         io_request->DataLength = cpu_to_le32(scsi_buff_len);
2809         io_info.data_arms = 1;
2810 
2811         if (scp->sc_data_direction == DMA_FROM_DEVICE)
2812                 io_info.isRead = 1;
2813 
2814         local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
2815         ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
2816 
2817         if (ld < instance->fw_supported_vd_count)
2818                 raid = MR_LdRaidGet(ld, local_map_ptr);
2819 
2820         if (!raid || (!fusion->fast_path_io)) {
2821                 rctx->reg_lock_flags  = 0;
2822                 fp_possible = false;
2823         } else {
2824                 if (MR_BuildRaidContext(instance, &io_info, rctx,
2825                                         local_map_ptr, &raidLUN))
2826                         fp_possible = (io_info.fpOkForIo > 0) ? true : false;
2827         }
2828 
2829         if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
2830                 atomic_read(&scp->device->device_busy) >
2831                 (io_info.data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
2832                 cmd->request_desc->SCSIIO.MSIxIndex =
2833                         mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
2834                                 MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
2835         else if (instance->msix_load_balance)
2836                 cmd->request_desc->SCSIIO.MSIxIndex =
2837                         (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
2838                                     instance->msix_vectors));
2839         else
2840                 cmd->request_desc->SCSIIO.MSIxIndex =
2841                         instance->reply_map[raw_smp_processor_id()];
2842 
2843         if (instance->adapter_type >= VENTURA_SERIES) {
2844                 /* FP for Optimal raid level 1.
2845                  * All large RAID-1 writes (> 32 KiB, both WT and WB modes)
2846                  * are built by the driver as LD I/Os.
2847                  * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os
2848                  * (there is never a reason to process these as buffered writes)
2849                  * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os
2850                  * with the SLD bit asserted.
2851                  */
2852                 if (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
2853                         mrdev_priv = scp->device->hostdata;
2854 
2855                         if (atomic_inc_return(&instance->fw_outstanding) >
2856                                 (instance->host->can_queue)) {
2857                                 fp_possible = false;
2858                                 atomic_dec(&instance->fw_outstanding);
2859                         } else if (fusion->pcie_bw_limitation &&
2860                                 ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
2861                                    (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0))) {
2862                                 fp_possible = false;
2863                                 atomic_dec(&instance->fw_outstanding);
2864                                 if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
2865                                         atomic_set(&mrdev_priv->r1_ldio_hint,
2866                                                    instance->r1_ldio_hint_default);
2867                         }
2868                 }
2869 
2870                 if (!fp_possible ||
2871                     (io_info.isRead && io_info.ra_capable)) {
2872                         spin_lock_irqsave(&instance->stream_lock,
2873                                           spinlock_flags);
2874                         megasas_stream_detect(instance, cmd, &io_info);
2875                         spin_unlock_irqrestore(&instance->stream_lock,
2876                                                spinlock_flags);
2877                         /* In ventura if stream detected for a read and it is
2878                          * read ahead capable make this IO as LDIO
2879                          */
2880                         if (is_stream_detected(rctx_g35))
2881                                 fp_possible = false;
2882                 }
2883 
2884                 /* If raid is NULL, set CPU affinity to default CPU0 */
2885                 if (raid)
2886                         megasas_set_raidflag_cpu_affinity(fusion, &io_request->RaidContext,
2887                                 raid, fp_possible, io_info.isRead,
2888                                 scsi_buff_len);
2889                 else
2890                         rctx_g35->routing_flags |=
2891                                 (MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
2892         }
2893 
2894         if (fp_possible) {
2895                 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
2896                                    local_map_ptr, start_lba_lo);
2897                 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2898                 cmd->request_desc->SCSIIO.RequestFlags =
2899                         (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
2900                          << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2901                 if (instance->adapter_type == INVADER_SERIES) {
2902                         rctx->type = MPI2_TYPE_CUDA;
2903                         rctx->nseg = 0x1;
2904                         io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
2905                         rctx->reg_lock_flags |=
2906                           (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
2907                            MR_RL_FLAGS_SEQ_NUM_ENABLE);
2908                 } else if (instance->adapter_type >= VENTURA_SERIES) {
2909                         rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT);
2910                         rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
2911                         rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2912                         io_request->IoFlags |=
2913                                 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
2914                 }
2915                 if (fusion->load_balance_info &&
2916                         (fusion->load_balance_info[device_id].loadBalanceFlag) &&
2917                         (io_info.isRead)) {
2918                         io_info.devHandle =
2919                                 get_updated_dev_handle(instance,
2920                                         &fusion->load_balance_info[device_id],
2921                                         &io_info, local_map_ptr);
2922                         scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
2923                         cmd->pd_r1_lb = io_info.pd_after_lb;
2924                         if (instance->adapter_type >= VENTURA_SERIES)
2925                                 rctx_g35->span_arm = io_info.span_arm;
2926                         else
2927                                 rctx->span_arm = io_info.span_arm;
2928 
2929                 } else
2930                         scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
2931 
2932                 if (instance->adapter_type >= VENTURA_SERIES)
2933                         cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
2934                 else
2935                         cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2936 
2937                 if ((raidLUN[0] == 1) &&
2938                         (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) {
2939                         instance->dev_handle = !(instance->dev_handle);
2940                         io_info.devHandle =
2941                                 local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle];
2942                 }
2943 
2944                 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
2945                 io_request->DevHandle = io_info.devHandle;
2946                 cmd->pd_interface = io_info.pd_interface;
2947                 /* populate the LUN field */
2948                 memcpy(io_request->LUN, raidLUN, 8);
2949         } else {
2950                 rctx->timeout_value =
2951                         cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
2952                 cmd->request_desc->SCSIIO.RequestFlags =
2953                         (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
2954                          << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2955                 if (instance->adapter_type == INVADER_SERIES) {
2956                         if (io_info.do_fp_rlbypass ||
2957                         (rctx->reg_lock_flags == REGION_TYPE_UNUSED))
2958                                 cmd->request_desc->SCSIIO.RequestFlags =
2959                                         (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
2960                                         MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2961                         rctx->type = MPI2_TYPE_CUDA;
2962                         rctx->reg_lock_flags |=
2963                                 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
2964                                         MR_RL_FLAGS_SEQ_NUM_ENABLE);
2965                         rctx->nseg = 0x1;
2966                 } else if (instance->adapter_type >= VENTURA_SERIES) {
2967                         rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2968                         rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT);
2969                         rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
2970                 }
2971                 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
2972                 io_request->DevHandle = cpu_to_le16(device_id);
2973 
2974         } /* Not FP */
2975 }
2976 
2977 /**
2978  * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk
2979  * @instance:           Adapter soft state
2980  * @scp:                SCSI command
2981  * @cmd:                Command to be prepared
2982  *
2983  * Prepares the io_request frame for non-rw io cmds for vd.
2984  */
2985 static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
2986                           struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd)
2987 {
2988         u32 device_id;
2989         struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
2990         u16 ld;
2991         struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
2992         struct fusion_context *fusion = instance->ctrl_context;
2993         u8                          span, physArm;
2994         __le16                      devHandle;
2995         u32                         arRef, pd;
2996         struct MR_LD_RAID                  *raid;
2997         struct RAID_CONTEXT                *pRAID_Context;
2998         u8 fp_possible = 1;
2999 
3000         io_request = cmd->io_request;
3001         device_id = MEGASAS_DEV_INDEX(scmd);
3002         local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
3003         io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
3004         /* get RAID_Context pointer */
3005         pRAID_Context = &io_request->RaidContext.raid_context;
3006         /* Check with FW team */
3007         pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
3008         pRAID_Context->reg_lock_row_lba    = 0;
3009         pRAID_Context->reg_lock_length    = 0;
3010 
3011         if (fusion->fast_path_io && (
3012                 device_id < instance->fw_supported_vd_count)) {
3013 
3014                 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
3015                 if (ld >= instance->fw_supported_vd_count - 1)
3016                         fp_possible = 0;
3017                 else {
3018                         raid = MR_LdRaidGet(ld, local_map_ptr);
3019                         if (!(raid->capability.fpNonRWCapable))
3020                                 fp_possible = 0;
3021                 }
3022         } else
3023                 fp_possible = 0;
3024 
3025         if (!fp_possible) {
3026                 io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
3027                 io_request->DevHandle = cpu_to_le16(device_id);
3028                 io_request->LUN[1] = scmd->device->lun;
3029                 pRAID_Context->timeout_value =
3030                         cpu_to_le16 (scmd->request->timeout / HZ);
3031                 cmd->request_desc->SCSIIO.RequestFlags =
3032                         (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
3033                         MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3034         } else {
3035 
3036                 /* set RAID context values */
3037                 pRAID_Context->config_seq_num = raid->seqNum;
3038                 if (instance->adapter_type < VENTURA_SERIES)
3039                         pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
3040                 pRAID_Context->timeout_value =
3041                         cpu_to_le16(raid->fpIoTimeoutForLd);
3042 
3043                 /* get the DevHandle for the PD (since this is
3044                    fpNonRWCapable, this is a single disk RAID0) */
3045                 span = physArm = 0;
3046                 arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
3047                 pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
3048                 devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
3049 
3050                 /* build request descriptor */
3051                 cmd->request_desc->SCSIIO.RequestFlags =
3052                         (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
3053                         MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3054                 cmd->request_desc->SCSIIO.DevHandle = devHandle;
3055 
3056                 /* populate the LUN field */
3057                 memcpy(io_request->LUN, raid->LUN, 8);
3058 
3059                 /* build the raidScsiIO structure */
3060                 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
3061                 io_request->DevHandle = devHandle;
3062         }
3063 }
3064 
3065 /**
3066  * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd
3067  * @instance:           Adapter soft state
3068  * @scp:                SCSI command
3069  * @cmd:                Command to be prepared
3070  * @fp_possible:        parameter to detect fast path or firmware path io.
3071  *
3072  * Prepares the io_request frame for rw/non-rw io cmds for syspds
3073  */
3074 static void
3075 megasas_build_syspd_fusion(struct megasas_instance *instance,
3076         struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd,
3077         bool fp_possible)
3078 {
3079         u32 device_id;
3080         struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
3081         u16 pd_index = 0;
3082         u16 os_timeout_value;
3083         u16 timeout_limit;
3084         struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
3085         struct RAID_CONTEXT     *pRAID_Context;
3086         struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
3087         struct MR_PRIV_DEVICE *mr_device_priv_data;
3088         struct fusion_context *fusion = instance->ctrl_context;
3089         pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1];
3090 
3091         device_id = MEGASAS_DEV_INDEX(scmd);
3092         pd_index = MEGASAS_PD_INDEX(scmd);
3093         os_timeout_value = scmd->request->timeout / HZ;
3094         mr_device_priv_data = scmd->device->hostdata;
3095         cmd->pd_interface = mr_device_priv_data->interface_type;
3096 
3097         io_request = cmd->io_request;
3098         /* get RAID_Context pointer */
3099         pRAID_Context = &io_request->RaidContext.raid_context;
3100         pRAID_Context->reg_lock_flags = 0;
3101         pRAID_Context->reg_lock_row_lba = 0;
3102         pRAID_Context->reg_lock_length = 0;
3103         io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
3104         io_request->LUN[1] = scmd->device->lun;
3105         pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
3106                 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
3107 
3108         /* If FW supports PD sequence number */
3109         if (instance->support_seqnum_jbod_fp) {
3110                 if (instance->use_seqnum_jbod_fp &&
3111                         instance->pd_list[pd_index].driveType == TYPE_DISK) {
3112 
3113                         /* More than 256 PD/JBOD support for Ventura */
3114                         if (instance->support_morethan256jbod)
3115                                 pRAID_Context->virtual_disk_tgt_id =
3116                                         pd_sync->seq[pd_index].pd_target_id;
3117                         else
3118                                 pRAID_Context->virtual_disk_tgt_id =
3119                                         cpu_to_le16(device_id +
3120                                         (MAX_PHYSICAL_DEVICES - 1));
3121                         pRAID_Context->config_seq_num =
3122                                 pd_sync->seq[pd_index].seqNum;
3123                         io_request->DevHandle =
3124                                 pd_sync->seq[pd_index].devHandle;
3125                         if (instance->adapter_type >= VENTURA_SERIES) {
3126                                 io_request->RaidContext.raid_context_g35.routing_flags |=
3127                                         (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
3128                                 io_request->RaidContext.raid_context_g35.nseg_type |=
3129                                         (1 << RAID_CONTEXT_NSEG_SHIFT);
3130                                 io_request->RaidContext.raid_context_g35.nseg_type |=
3131                                         (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
3132                         } else {
3133                                 pRAID_Context->type = MPI2_TYPE_CUDA;
3134                                 pRAID_Context->nseg = 0x1;
3135                                 pRAID_Context->reg_lock_flags |=
3136                                         (MR_RL_FLAGS_SEQ_NUM_ENABLE |
3137                                          MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
3138                         }
3139                 } else {
3140                         pRAID_Context->virtual_disk_tgt_id =
3141                                 cpu_to_le16(device_id +
3142                                 (MAX_PHYSICAL_DEVICES - 1));
3143                         pRAID_Context->config_seq_num = 0;
3144                         io_request->DevHandle = cpu_to_le16(0xFFFF);
3145                 }
3146         } else {
3147                 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
3148                 pRAID_Context->config_seq_num = 0;
3149 
3150                 if (fusion->fast_path_io) {
3151                         local_map_ptr =
3152                                 fusion->ld_drv_map[(instance->map_id & 1)];
3153                         io_request->DevHandle =
3154                                 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
3155                 } else {
3156                         io_request->DevHandle = cpu_to_le16(0xFFFF);
3157                 }
3158         }
3159 
3160         cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
3161 
3162         if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
3163                 atomic_read(&scmd->device->device_busy) > MR_DEVICE_HIGH_IOPS_DEPTH)
3164                 cmd->request_desc->SCSIIO.MSIxIndex =
3165                         mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
3166                                 MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
3167         else if (instance->msix_load_balance)
3168                 cmd->request_desc->SCSIIO.MSIxIndex =
3169                         (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
3170                                     instance->msix_vectors));
3171         else
3172                 cmd->request_desc->SCSIIO.MSIxIndex =
3173                         instance->reply_map[raw_smp_processor_id()];
3174 
3175         if (!fp_possible) {
3176                 /* system pd firmware path */
3177                 io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
3178                 cmd->request_desc->SCSIIO.RequestFlags =
3179                         (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
3180                                 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3181                 pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value);
3182                 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
3183         } else {
3184                 if (os_timeout_value)
3185                         os_timeout_value++;
3186 
3187                 /* system pd Fast Path */
3188                 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
3189                 timeout_limit = (scmd->device->type == TYPE_DISK) ?
3190                                 255 : 0xFFFF;
3191                 pRAID_Context->timeout_value =
3192                         cpu_to_le16((os_timeout_value > timeout_limit) ?
3193                         timeout_limit : os_timeout_value);
3194                 if (instance->adapter_type >= INVADER_SERIES)
3195                         io_request->IoFlags |=
3196                                 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
3197 
3198                 cmd->request_desc->SCSIIO.RequestFlags =
3199                         (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
3200                                 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3201         }
3202 }
3203 
3204 /**
3205  * megasas_build_io_fusion -    Prepares IOs to devices
3206  * @instance:           Adapter soft state
3207  * @scp:                SCSI command
3208  * @cmd:                Command to be prepared
3209  *
3210  * Invokes helper functions to prepare request frames
3211  * and sets flags appropriate for IO/Non-IO cmd
3212  */
3213 static int
3214 megasas_build_io_fusion(struct megasas_instance *instance,
3215                         struct scsi_cmnd *scp,
3216                         struct megasas_cmd_fusion *cmd)
3217 {
3218         int sge_count;
3219         u8  cmd_type;
3220         struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
3221         struct MR_PRIV_DEVICE *mr_device_priv_data;
3222         mr_device_priv_data = scp->device->hostdata;
3223 
3224         /* Zero out some fields so they don't get reused */
3225         memset(io_request->LUN, 0x0, 8);
3226         io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
3227         io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
3228         io_request->EEDPFlags = 0;
3229         io_request->Control = 0;
3230         io_request->EEDPBlockSize = 0;
3231         io_request->ChainOffset = 0;
3232         io_request->RaidContext.raid_context.raid_flags = 0;
3233         io_request->RaidContext.raid_context.type = 0;
3234         io_request->RaidContext.raid_context.nseg = 0;
3235 
3236         memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
3237         /*
3238          * Just the CDB length,rest of the Flags are zero
3239          * This will be modified for FP in build_ldio_fusion
3240          */
3241         io_request->IoFlags = cpu_to_le16(scp->cmd_len);
3242 
3243         switch (cmd_type = megasas_cmd_type(scp)) {
3244         case READ_WRITE_LDIO:
3245                 megasas_build_ldio_fusion(instance, scp, cmd);
3246                 break;
3247         case NON_READ_WRITE_LDIO:
3248                 megasas_build_ld_nonrw_fusion(instance, scp, cmd);
3249                 break;
3250         case READ_WRITE_SYSPDIO:
3251                 megasas_build_syspd_fusion(instance, scp, cmd, true);
3252                 break;
3253         case NON_READ_WRITE_SYSPDIO:
3254                 if (instance->secure_jbod_support ||
3255                     mr_device_priv_data->is_tm_capable)
3256                         megasas_build_syspd_fusion(instance, scp, cmd, false);
3257                 else
3258                         megasas_build_syspd_fusion(instance, scp, cmd, true);
3259                 break;
3260         default:
3261                 break;
3262         }
3263 
3264         /*
3265          * Construct SGL
3266          */
3267 
3268         sge_count = megasas_make_sgl(instance, scp, cmd);
3269 
3270         if (sge_count > instance->max_num_sge || (sge_count < 0)) {
3271                 dev_err(&instance->pdev->dev,
3272                         "%s %d sge_count (%d) is out of range. Range is:  0-%d\n",
3273                         __func__, __LINE__, sge_count, instance->max_num_sge);
3274                 return 1;
3275         }
3276 
3277         if (instance->adapter_type >= VENTURA_SERIES) {
3278                 set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
3279                 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
3280                 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
3281         } else {
3282                 /* numSGE store lower 8 bit of sge_count.
3283                  * numSGEExt store higher 8 bit of sge_count
3284                  */
3285                 io_request->RaidContext.raid_context.num_sge = sge_count;
3286                 io_request->RaidContext.raid_context.num_sge_ext =
3287                         (u8)(sge_count >> 8);
3288         }
3289 
3290         io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
3291 
3292         if (scp->sc_data_direction == DMA_TO_DEVICE)
3293                 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
3294         else if (scp->sc_data_direction == DMA_FROM_DEVICE)
3295                 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
3296 
3297         io_request->SGLOffset0 =
3298                 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
3299 
3300         io_request->SenseBufferLowAddress =
3301                 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
3302         io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
3303 
3304         cmd->scmd = scp;
3305         scp->SCp.ptr = (char *)cmd;
3306 
3307         return 0;
3308 }
3309 
3310 static union MEGASAS_REQUEST_DESCRIPTOR_UNION *
3311 megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
3312 {
3313         u8 *p;
3314         struct fusion_context *fusion;
3315 
3316         fusion = instance->ctrl_context;
3317         p = fusion->req_frames_desc +
3318                 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * index;
3319 
3320         return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
3321 }
3322 
3323 
3324 /* megasas_prepate_secondRaid1_IO
3325  *  It prepares the raid 1 second IO
3326  */
3327 static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
3328                                            struct megasas_cmd_fusion *cmd,
3329                                            struct megasas_cmd_fusion *r1_cmd)
3330 {
3331         union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
3332         struct fusion_context *fusion;
3333         fusion = instance->ctrl_context;
3334         req_desc = cmd->request_desc;
3335         /* copy the io request frame as well as 8 SGEs data for r1 command*/
3336         memcpy(r1_cmd->io_request, cmd->io_request,
3337                (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)));
3338         memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
3339                (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
3340         /*sense buffer is different for r1 command*/
3341         r1_cmd->io_request->SenseBufferLowAddress =
3342                         cpu_to_le32(lower_32_bits(r1_cmd->sense_phys_addr));
3343         r1_cmd->scmd = cmd->scmd;
3344         req_desc2 = megasas_get_request_descriptor(instance,
3345                                                    (r1_cmd->index - 1));
3346         req_desc2->Words = 0;
3347         r1_cmd->request_desc = req_desc2;
3348         req_desc2->SCSIIO.SMID = cpu_to_le16(r1_cmd->index);
3349         req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags;
3350         r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
3351         r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
3352         r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle;
3353         cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid =
3354                         cpu_to_le16(r1_cmd->index);
3355         r1_cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid =
3356                         cpu_to_le16(cmd->index);
3357         /*MSIxIndex of both commands request descriptors should be same*/
3358         r1_cmd->request_desc->SCSIIO.MSIxIndex =
3359                         cmd->request_desc->SCSIIO.MSIxIndex;
3360         /*span arm is different for r1 cmd*/
3361         r1_cmd->io_request->RaidContext.raid_context_g35.span_arm =
3362                         cmd->io_request->RaidContext.raid_context_g35.span_arm + 1;
3363 }
3364 
3365 /**
3366  * megasas_build_and_issue_cmd_fusion -Main routine for building and
3367  *                                     issuing non IOCTL cmd
3368  * @instance:                   Adapter soft state
3369  * @scmd:                       pointer to scsi cmd from OS
3370  */
3371 static u32
3372 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
3373                                    struct scsi_cmnd *scmd)
3374 {
3375         struct megasas_cmd_fusion *cmd, *r1_cmd = NULL;
3376         union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3377         u32 index;
3378 
3379         if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) &&
3380                 instance->ldio_threshold &&
3381                 (atomic_inc_return(&instance->ldio_outstanding) >
3382                 instance->ldio_threshold)) {
3383                 atomic_dec(&instance->ldio_outstanding);
3384                 return SCSI_MLQUEUE_DEVICE_BUSY;
3385         }
3386 
3387         if (atomic_inc_return(&instance->fw_outstanding) >
3388                         instance->host->can_queue) {
3389                 atomic_dec(&instance->fw_outstanding);
3390                 return SCSI_MLQUEUE_HOST_BUSY;
3391         }
3392 
3393         cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
3394 
3395         if (!cmd) {
3396                 atomic_dec(&instance->fw_outstanding);
3397                 return SCSI_MLQUEUE_HOST_BUSY;
3398         }
3399 
3400         index = cmd->index;
3401 
3402         req_desc = megasas_get_request_descriptor(instance, index-1);
3403 
3404         req_desc->Words = 0;
3405         cmd->request_desc = req_desc;
3406 
3407         if (megasas_build_io_fusion(instance, scmd, cmd)) {
3408                 megasas_return_cmd_fusion(instance, cmd);
3409                 dev_err(&instance->pdev->dev, "Error building command\n");
3410                 cmd->request_desc = NULL;
3411                 atomic_dec(&instance->fw_outstanding);
3412                 return SCSI_MLQUEUE_HOST_BUSY;
3413         }
3414 
3415         req_desc = cmd->request_desc;
3416         req_desc->SCSIIO.SMID = cpu_to_le16(index);
3417 
3418         if (cmd->io_request->ChainOffset != 0 &&
3419             cmd->io_request->ChainOffset != 0xF)
3420                 dev_err(&instance->pdev->dev, "The chain offset value is not "
3421                        "correct : %x\n", cmd->io_request->ChainOffset);
3422         /*
3423          *      if it is raid 1/10 fp write capable.
3424          *      try to get second command from pool and construct it.
3425          *      From FW, it has confirmed that lba values of two PDs
3426          *      corresponds to single R1/10 LD are always same
3427          *
3428          */
3429         /*      driver side count always should be less than max_fw_cmds
3430          *      to get new command
3431          */
3432         if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
3433                 r1_cmd = megasas_get_cmd_fusion(instance,
3434                                 (scmd->request->tag + instance->max_fw_cmds));
3435                 megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd);
3436         }
3437 
3438 
3439         /*
3440          * Issue the command to the FW
3441          */
3442 
3443         megasas_fire_cmd_fusion(instance, req_desc);
3444 
3445         if (r1_cmd)
3446                 megasas_fire_cmd_fusion(instance, r1_cmd->request_desc);
3447 
3448 
3449         return 0;
3450 }
3451 
3452 /**
3453  * megasas_complete_r1_command -
3454  * completes R1 FP write commands which has valid peer smid
3455  * @instance:                   Adapter soft state
3456  * @cmd_fusion:                 MPT command frame
3457  *
3458  */
3459 static inline void
3460 megasas_complete_r1_command(struct megasas_instance *instance,
3461                             struct megasas_cmd_fusion *cmd)
3462 {
3463         u8 *sense, status, ex_status;
3464         u32 data_length;
3465         u16 peer_smid;
3466         struct fusion_context *fusion;
3467         struct megasas_cmd_fusion *r1_cmd = NULL;
3468         struct scsi_cmnd *scmd_local = NULL;
3469         struct RAID_CONTEXT_G35 *rctx_g35;
3470 
3471         rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35;
3472         fusion = instance->ctrl_context;
3473         peer_smid = le16_to_cpu(rctx_g35->flow_specific.peer_smid);
3474 
3475         r1_cmd = fusion->cmd_list[peer_smid - 1];
3476         scmd_local = cmd->scmd;
3477         status = rctx_g35->status;
3478         ex_status = rctx_g35->ex_status;
3479         data_length = cmd->io_request->DataLength;
3480         sense = cmd->sense;
3481 
3482         cmd->cmd_completed = true;
3483 
3484         /* Check if peer command is completed or not*/
3485         if (r1_cmd->cmd_completed) {
3486                 rctx_g35 = &r1_cmd->io_request->RaidContext.raid_context_g35;
3487                 if (rctx_g35->status != MFI_STAT_OK) {
3488                         status = rctx_g35->status;
3489                         ex_status = rctx_g35->ex_status;
3490                         data_length = r1_cmd->io_request->DataLength;
3491                         sense = r1_cmd->sense;
3492                 }
3493 
3494                 megasas_return_cmd_fusion(instance, r1_cmd);
3495                 map_cmd_status(fusion, scmd_local, status, ex_status,
3496                                le32_to_cpu(data_length), sense);
3497                 if (instance->ldio_threshold &&
3498                     megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
3499                         atomic_dec(&instance->ldio_outstanding);
3500                 scmd_local->SCp.ptr = NULL;
3501                 megasas_return_cmd_fusion(instance, cmd);
3502                 scsi_dma_unmap(scmd_local);
3503                 scmd_local->scsi_done(scmd_local);
3504         }
3505 }
3506 
3507 /**
3508  * complete_cmd_fusion -        Completes command
3509  * @instance:                   Adapter soft state
3510  * Completes all commands that is in reply descriptor queue
3511  */
3512 static int
3513 complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
3514                     struct megasas_irq_context *irq_context)
3515 {
3516         union MPI2_REPLY_DESCRIPTORS_UNION *desc;
3517         struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
3518         struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
3519         struct fusion_context *fusion;
3520         struct megasas_cmd *cmd_mfi;
3521         struct megasas_cmd_fusion *cmd_fusion;
3522         u16 smid, num_completed;
3523         u8 reply_descript_type, *sense, status, extStatus;
3524         u32 device_id, data_length;
3525         union desc_value d_val;
3526         struct LD_LOAD_BALANCE_INFO *lbinfo;
3527         int threshold_reply_count = 0;
3528         struct scsi_cmnd *scmd_local = NULL;
3529         struct MR_TASK_MANAGE_REQUEST *mr_tm_req;
3530         struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
3531 
3532         fusion = instance->ctrl_context;
3533 
3534         if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
3535                 return IRQ_HANDLED;
3536 
3537         desc = fusion->reply_frames_desc[MSIxIndex] +
3538                                 fusion->last_reply_idx[MSIxIndex];
3539 
3540         reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
3541 
3542         d_val.word = desc->Words;
3543 
3544         reply_descript_type = reply_desc->ReplyFlags &
3545                 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
3546 
3547         if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
3548                 return IRQ_NONE;
3549 
3550         num_completed = 0;
3551 
3552         while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
3553                d_val.u.high != cpu_to_le32(UINT_MAX)) {
3554 
3555                 smid = le16_to_cpu(reply_desc->SMID);
3556                 cmd_fusion = fusion->cmd_list[smid - 1];
3557                 scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
3558                                                 cmd_fusion->io_request;
3559 
3560                 scmd_local = cmd_fusion->scmd;
3561                 status = scsi_io_req->RaidContext.raid_context.status;
3562                 extStatus = scsi_io_req->RaidContext.raid_context.ex_status;
3563                 sense = cmd_fusion->sense;
3564                 data_length = scsi_io_req->DataLength;
3565 
3566                 switch (scsi_io_req->Function) {
3567                 case MPI2_FUNCTION_SCSI_TASK_MGMT:
3568                         mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *)
3569                                                 cmd_fusion->io_request;
3570                         mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *)
3571                                                 &mr_tm_req->TmRequest;
3572                         dev_dbg(&instance->pdev->dev, "TM completion:"
3573                                 "type: 0x%x TaskMID: 0x%x\n",
3574                                 mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
3575                         complete(&cmd_fusion->done);
3576                         break;
3577                 case MPI2_FUNCTION_SCSI_IO_REQUEST:  /*Fast Path IO.*/
3578                         /* Update load balancing info */
3579                         if (fusion->load_balance_info &&
3580                             (cmd_fusion->scmd->SCp.Status &
3581                             MEGASAS_LOAD_BALANCE_FLAG)) {
3582                                 device_id = MEGASAS_DEV_INDEX(scmd_local);
3583                                 lbinfo = &fusion->load_balance_info[device_id];
3584                                 atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
3585                                 cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
3586                         }
3587                         /* Fall through - and complete IO */
3588                 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
3589                         atomic_dec(&instance->fw_outstanding);
3590                         if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
3591                                 map_cmd_status(fusion, scmd_local, status,
3592                                                extStatus, le32_to_cpu(data_length),
3593                                                sense);
3594                                 if (instance->ldio_threshold &&
3595                                     (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO))
3596                                         atomic_dec(&instance->ldio_outstanding);
3597                                 scmd_local->SCp.ptr = NULL;
3598                                 megasas_return_cmd_fusion(instance, cmd_fusion);
3599                                 scsi_dma_unmap(scmd_local);
3600                                 scmd_local->scsi_done(scmd_local);
3601                         } else  /* Optimal VD - R1 FP command completion. */
3602                                 megasas_complete_r1_command(instance, cmd_fusion);
3603                         break;
3604                 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
3605                         cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
3606                         /* Poll mode. Dummy free.
3607                          * In case of Interrupt mode, caller has reverse check.
3608                          */
3609                         if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
3610                                 cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
3611                                 megasas_return_cmd(instance, cmd_mfi);
3612                         } else
3613                                 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
3614                         break;
3615                 }
3616 
3617                 fusion->last_reply_idx[MSIxIndex]++;
3618                 if (fusion->last_reply_idx[MSIxIndex] >=
3619                     fusion->reply_q_depth)
3620                         fusion->last_reply_idx[MSIxIndex] = 0;
3621 
3622                 desc->Words = cpu_to_le64(ULLONG_MAX);
3623                 num_completed++;
3624                 threshold_reply_count++;
3625 
3626                 /* Get the next reply descriptor */
3627                 if (!fusion->last_reply_idx[MSIxIndex])
3628                         desc = fusion->reply_frames_desc[MSIxIndex];
3629                 else
3630                         desc++;
3631 
3632                 reply_desc =
3633                   (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
3634 
3635                 d_val.word = desc->Words;
3636 
3637                 reply_descript_type = reply_desc->ReplyFlags &
3638                         MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
3639 
3640                 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
3641                         break;
3642                 /*
3643                  * Write to reply post host index register after completing threshold
3644                  * number of reply counts and still there are more replies in reply queue
3645                  * pending to be completed
3646                  */
3647                 if (threshold_reply_count >= instance->threshold_reply_count) {
3648                         if (instance->msix_combined)
3649                                 writel(((MSIxIndex & 0x7) << 24) |
3650                                         fusion->last_reply_idx[MSIxIndex],
3651                                         instance->reply_post_host_index_addr[MSIxIndex/8]);
3652                         else
3653                                 writel((MSIxIndex << 24) |
3654                                         fusion->last_reply_idx[MSIxIndex],
3655                                         instance->reply_post_host_index_addr[0]);
3656                         threshold_reply_count = 0;
3657                         if (irq_context) {
3658                                 if (!irq_context->irq_poll_scheduled) {
3659                                         irq_context->irq_poll_scheduled = true;
3660                                         irq_context->irq_line_enable = true;
3661                                         irq_poll_sched(&irq_context->irqpoll);
3662                                 }
3663                                 return num_completed;
3664                         }
3665                 }
3666         }
3667 
3668         if (num_completed) {
3669                 wmb();
3670                 if (instance->msix_combined)
3671                         writel(((MSIxIndex & 0x7) << 24) |
3672                                 fusion->last_reply_idx[MSIxIndex],
3673                                 instance->reply_post_host_index_addr[MSIxIndex/8]);
3674                 else
3675                         writel((MSIxIndex << 24) |
3676                                 fusion->last_reply_idx[MSIxIndex],
3677                                 instance->reply_post_host_index_addr[0]);
3678                 megasas_check_and_restore_queue_depth(instance);
3679         }
3680         return num_completed;
3681 }
3682 
3683 /**
3684  * megasas_enable_irq_poll() - enable irqpoll
3685  */
3686 static void megasas_enable_irq_poll(struct megasas_instance *instance)
3687 {
3688         u32 count, i;
3689         struct megasas_irq_context *irq_ctx;
3690 
3691         count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
3692 
3693         for (i = 0; i < count; i++) {
3694                 irq_ctx = &instance->irq_context[i];
3695                 irq_poll_enable(&irq_ctx->irqpoll);
3696         }
3697 }
3698 
3699 /**
3700  * megasas_sync_irqs -  Synchronizes all IRQs owned by adapter
3701  * @instance:                   Adapter soft state
3702  */
3703 static void megasas_sync_irqs(unsigned long instance_addr)
3704 {
3705         u32 count, i;
3706         struct megasas_instance *instance =
3707                 (struct megasas_instance *)instance_addr;
3708         struct megasas_irq_context *irq_ctx;
3709 
3710         count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
3711 
3712         for (i = 0; i < count; i++) {
3713                 synchronize_irq(pci_irq_vector(instance->pdev, i));
3714                 irq_ctx = &instance->irq_context[i];
3715                 irq_poll_disable(&irq_ctx->irqpoll);
3716                 if (irq_ctx->irq_poll_scheduled) {
3717                         irq_ctx->irq_poll_scheduled = false;
3718                         enable_irq(irq_ctx->os_irq);
3719                 }
3720         }
3721 }
3722 
3723 /**
3724  * megasas_irqpoll() - process a queue for completed reply descriptors
3725  * @irqpoll:    IRQ poll structure associated with queue to poll.
3726  * @budget:     Threshold of reply descriptors to process per poll.
3727  *
3728  * Return: The number of entries processed.
3729  */
3730 
3731 int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
3732 {
3733         struct megasas_irq_context *irq_ctx;
3734         struct megasas_instance *instance;
3735         int num_entries;
3736 
3737         irq_ctx = container_of(irqpoll, struct megasas_irq_context, irqpoll);
3738         instance = irq_ctx->instance;
3739 
3740         if (irq_ctx->irq_line_enable) {
3741                 disable_irq(irq_ctx->os_irq);
3742                 irq_ctx->irq_line_enable = false;
3743         }
3744 
3745         num_entries = complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx);
3746         if (num_entries < budget) {
3747                 irq_poll_complete(irqpoll);
3748                 irq_ctx->irq_poll_scheduled = false;
3749                 enable_irq(irq_ctx->os_irq);
3750         }
3751 
3752         return num_entries;
3753 }
3754 
3755 /**
3756  * megasas_complete_cmd_dpc_fusion -    Completes command
3757  * @instance:                   Adapter soft state
3758  *
3759  * Tasklet to complete cmds
3760  */
3761 static void
3762 megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
3763 {
3764         struct megasas_instance *instance =
3765                 (struct megasas_instance *)instance_addr;
3766         u32 count, MSIxIndex;
3767 
3768         count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
3769 
3770         /* If we have already declared adapter dead, donot complete cmds */
3771         if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
3772                 return;
3773 
3774         for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
3775                 complete_cmd_fusion(instance, MSIxIndex, NULL);
3776 }
3777 
3778 /**
3779  * megasas_isr_fusion - isr entry point
3780  */
3781 static irqreturn_t megasas_isr_fusion(int irq, void *devp)
3782 {
3783         struct megasas_irq_context *irq_context = devp;
3784         struct megasas_instance *instance = irq_context->instance;
3785         u32 mfiStatus;
3786 
3787         if (instance->mask_interrupts)
3788                 return IRQ_NONE;
3789 
3790 #if defined(ENABLE_IRQ_POLL)
3791         if (irq_context->irq_poll_scheduled)
3792                 return IRQ_HANDLED;
3793 #endif
3794 
3795         if (!instance->msix_vectors) {
3796                 mfiStatus = instance->instancet->clear_intr(instance);
3797                 if (!mfiStatus)
3798                         return IRQ_NONE;
3799         }
3800 
3801         /* If we are resetting, bail */
3802         if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
3803                 instance->instancet->clear_intr(instance);
3804                 return IRQ_HANDLED;
3805         }
3806 
3807         return complete_cmd_fusion(instance, irq_context->MSIxIndex, irq_context)
3808                         ? IRQ_HANDLED : IRQ_NONE;
3809 }
3810 
3811 /**
3812  * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
3813  * @instance:                   Adapter soft state
3814  * mfi_cmd:                     megasas_cmd pointer
3815  *
3816  */
3817 static void
3818 build_mpt_mfi_pass_thru(struct megasas_instance *instance,
3819                         struct megasas_cmd *mfi_cmd)
3820 {
3821         struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3822         struct MPI2_RAID_SCSI_IO_REQUEST *io_req;
3823         struct megasas_cmd_fusion *cmd;
3824         struct fusion_context *fusion;
3825         struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
3826 
3827         fusion = instance->ctrl_context;
3828 
3829         cmd = megasas_get_cmd_fusion(instance,
3830                         instance->max_scsi_cmds + mfi_cmd->index);
3831 
3832         /*  Save the smid. To be used for returning the cmd */
3833         mfi_cmd->context.smid = cmd->index;
3834 
3835         /*
3836          * For cmds where the flag is set, store the flag and check
3837          * on completion. For cmds with this flag, don't call
3838          * megasas_complete_cmd
3839          */
3840 
3841         if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
3842                 mfi_cmd->flags |= DRV_DCMD_POLLED_MODE;
3843 
3844         io_req = cmd->io_request;
3845 
3846         if (instance->adapter_type >= INVADER_SERIES) {
3847                 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
3848                         (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
3849                 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
3850                 sgl_ptr_end->Flags = 0;
3851         }
3852 
3853         mpi25_ieee_chain =
3854           (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
3855 
3856         io_req->Function    = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3857         io_req->SGLOffset0  = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST,
3858                                        SGL) / 4;
3859         io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
3860 
3861         mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
3862 
3863         mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3864                 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3865 
3866         mpi25_ieee_chain->Length = cpu_to_le32(instance->mfi_frame_size);
3867 }
3868 
3869 /**
3870  * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
3871  * @instance:                   Adapter soft state
3872  * @cmd:                        mfi cmd to build
3873  *
3874  */
3875 static union MEGASAS_REQUEST_DESCRIPTOR_UNION *
3876 build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
3877 {
3878         union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL;
3879         u16 index;
3880 
3881         build_mpt_mfi_pass_thru(instance, cmd);
3882         index = cmd->context.smid;
3883 
3884         req_desc = megasas_get_request_descriptor(instance, index - 1);
3885 
3886         req_desc->Words = 0;
3887         req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
3888                                          MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3889 
3890         req_desc->SCSIIO.SMID = cpu_to_le16(index);
3891 
3892         return req_desc;
3893 }
3894 
3895 /**
3896  * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
3897  * @instance:                   Adapter soft state
3898  * @cmd:                        mfi cmd pointer
3899  *
3900  */
3901 static void
3902 megasas_issue_dcmd_fusion(struct megasas_instance *instance,
3903                           struct megasas_cmd *cmd)
3904 {
3905         union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3906 
3907         req_desc = build_mpt_cmd(instance, cmd);
3908 
3909         megasas_fire_cmd_fusion(instance, req_desc);
3910         return;
3911 }
3912 
3913 /**
3914  * megasas_release_fusion -     Reverses the FW initialization
3915  * @instance:                   Adapter soft state
3916  */
3917 void
3918 megasas_release_fusion(struct megasas_instance *instance)
3919 {
3920         megasas_free_ioc_init_cmd(instance);
3921         megasas_free_cmds(instance);
3922         megasas_free_cmds_fusion(instance);
3923 
3924         iounmap(instance->reg_set);
3925 
3926         pci_release_selected_regions(instance->pdev, 1<<instance->bar);
3927 }
3928 
3929 /**
3930  * megasas_read_fw_status_reg_fusion - returns the current FW status value
3931  * @regs:                       MFI register set
3932  */
3933 static u32
3934 megasas_read_fw_status_reg_fusion(struct megasas_instance *instance)
3935 {
3936         return megasas_readl(instance, &instance->reg_set->outbound_scratch_pad_0);
3937 }
3938 
3939 /**
3940  * megasas_alloc_host_crash_buffer -    Host buffers for Crash dump collection from Firmware
3941  * @instance:                           Controller's soft instance
3942  * return:                              Number of allocated host crash buffers
3943  */
3944 static void
3945 megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
3946 {
3947         unsigned int i;
3948 
3949         for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
3950                 instance->crash_buf[i] = vzalloc(CRASH_DMA_BUF_SIZE);
3951                 if (!instance->crash_buf[i]) {
3952                         dev_info(&instance->pdev->dev, "Firmware crash dump "
3953                                 "memory allocation failed at index %d\n", i);
3954                         break;
3955                 }
3956         }
3957         instance->drv_buf_alloc = i;
3958 }
3959 
3960 /**
3961  * megasas_free_host_crash_buffer -     Host buffers for Crash dump collection from Firmware
3962  * @instance:                           Controller's soft instance
3963  */
3964 void
3965 megasas_free_host_crash_buffer(struct megasas_instance *instance)
3966 {
3967         unsigned int i;
3968         for (i = 0; i < instance->drv_buf_alloc; i++) {
3969                 if (instance->crash_buf[i])
3970                         vfree(instance->crash_buf[i]);
3971         }
3972         instance->drv_buf_index = 0;
3973         instance->drv_buf_alloc = 0;
3974         instance->fw_crash_state = UNAVAILABLE;
3975         instance->fw_crash_buffer_size = 0;
3976 }
3977 
3978 /**
3979  * megasas_adp_reset_fusion -   For controller reset
3980  * @regs:                               MFI register set
3981  */
3982 static int
3983 megasas_adp_reset_fusion(struct megasas_instance *instance,
3984                          struct megasas_register_set __iomem *regs)
3985 {
3986         u32 host_diag, abs_state, retry;
3987 
3988         /* Now try to reset the chip */
3989         writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3990         writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3991         writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3992         writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3993         writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3994         writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3995         writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3996 
3997         /* Check that the diag write enable (DRWE) bit is on */
3998         host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag);
3999         retry = 0;
4000         while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
4001                 msleep(100);
4002                 host_diag = megasas_readl(instance,
4003                                           &instance->reg_set->fusion_host_diag);
4004                 if (retry++ == 100) {
4005                         dev_warn(&instance->pdev->dev,
4006                                 "Host diag unlock failed from %s %d\n",
4007                                 __func__, __LINE__);
4008                         break;
4009                 }
4010         }
4011         if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
4012                 return -1;
4013 
4014         /* Send chip reset command */
4015         writel(host_diag | HOST_DIAG_RESET_ADAPTER,
4016                 &instance->reg_set->fusion_host_diag);
4017         msleep(3000);
4018 
4019         /* Make sure reset adapter bit is cleared */
4020         host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag);
4021         retry = 0;
4022         while (host_diag & HOST_DIAG_RESET_ADAPTER) {
4023                 msleep(100);
4024                 host_diag = megasas_readl(instance,
4025                                           &instance->reg_set->fusion_host_diag);
4026                 if (retry++ == 1000) {
4027                         dev_warn(&instance->pdev->dev,
4028                                 "Diag reset adapter never cleared %s %d\n",
4029                                 __func__, __LINE__);
4030                         break;
4031                 }
4032         }
4033         if (host_diag & HOST_DIAG_RESET_ADAPTER)
4034                 return -1;
4035 
4036         abs_state = instance->instancet->read_fw_status_reg(instance)
4037                         & MFI_STATE_MASK;
4038         retry = 0;
4039 
4040         while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
4041                 msleep(100);
4042                 abs_state = instance->instancet->
4043                         read_fw_status_reg(instance) & MFI_STATE_MASK;
4044         }
4045         if (abs_state <= MFI_STATE_FW_INIT) {
4046                 dev_warn(&instance->pdev->dev,
4047                         "fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n",
4048                         abs_state, __func__, __LINE__);
4049                 return -1;
4050         }
4051 
4052         return 0;
4053 }
4054 
4055 /**
4056  * megasas_check_reset_fusion - For controller reset check
4057  * @regs:                               MFI register set
4058  */
4059 static int
4060 megasas_check_reset_fusion(struct megasas_instance *instance,
4061                            struct megasas_register_set __iomem *regs)
4062 {
4063         return 0;
4064 }
4065 
4066 /**
4067  * megasas_trigger_snap_dump -  Trigger snap dump in FW
4068  * @instance:                   Soft instance of adapter
4069  */
4070 static inline void megasas_trigger_snap_dump(struct megasas_instance *instance)
4071 {
4072         int j;
4073         u32 fw_state, abs_state;
4074 
4075         if (!instance->disableOnlineCtrlReset) {
4076                 dev_info(&instance->pdev->dev, "Trigger snap dump\n");
4077                 writel(MFI_ADP_TRIGGER_SNAP_DUMP,
4078                        &instance->reg_set->doorbell);
4079                 readl(&instance->reg_set->doorbell);
4080         }
4081 
4082         for (j = 0; j < instance->snapdump_wait_time; j++) {
4083                 abs_state = instance->instancet->read_fw_status_reg(instance);
4084                 fw_state = abs_state & MFI_STATE_MASK;
4085                 if (fw_state == MFI_STATE_FAULT) {
4086                         dev_printk(KERN_ERR, &instance->pdev->dev,
4087                                    "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n",
4088                                    abs_state & MFI_STATE_FAULT_CODE,
4089                                    abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
4090                         return;
4091                 }
4092                 msleep(1000);
4093         }
4094 }
4095 
4096 /* This function waits for outstanding commands on fusion to complete */
4097 static int
4098 megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
4099                                     int reason, int *convert)
4100 {
4101         int i, outstanding, retval = 0, hb_seconds_missed = 0;
4102         u32 fw_state, abs_state;
4103         u32 waittime_for_io_completion;
4104 
4105         waittime_for_io_completion =
4106                 min_t(u32, resetwaittime,
4107                         (resetwaittime - instance->snapdump_wait_time));
4108 
4109         if (reason == MFI_IO_TIMEOUT_OCR) {
4110                 dev_info(&instance->pdev->dev,
4111                         "MFI command is timed out\n");
4112                 megasas_complete_cmd_dpc_fusion((unsigned long)instance);
4113                 if (instance->snapdump_wait_time)
4114                         megasas_trigger_snap_dump(instance);
4115                 retval = 1;
4116                 goto out;
4117         }
4118 
4119         for (i = 0; i < waittime_for_io_completion; i++) {
4120                 /* Check if firmware is in fault state */
4121                 abs_state = instance->instancet->read_fw_status_reg(instance);
4122                 fw_state = abs_state & MFI_STATE_MASK;
4123                 if (fw_state == MFI_STATE_FAULT) {
4124                         dev_printk(KERN_ERR, &instance->pdev->dev,
4125                                    "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n",
4126                                    abs_state & MFI_STATE_FAULT_CODE,
4127                                    abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
4128                         megasas_complete_cmd_dpc_fusion((unsigned long)instance);
4129                         if (instance->requestorId && reason) {
4130                                 dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT"
4131                                 " state while polling during"
4132                                 " I/O timeout handling for %d\n",
4133                                 instance->host->host_no);
4134                                 *convert = 1;
4135                         }
4136 
4137                         retval = 1;
4138                         goto out;
4139                 }
4140 
4141 
4142                 /* If SR-IOV VF mode & heartbeat timeout, don't wait */
4143                 if (instance->requestorId && !reason) {
4144                         retval = 1;
4145                         goto out;
4146                 }
4147 
4148                 /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
4149                 if (instance->requestorId && (reason == SCSIIO_TIMEOUT_OCR)) {
4150                         if (instance->hb_host_mem->HB.fwCounter !=
4151                             instance->hb_host_mem->HB.driverCounter) {
4152                                 instance->hb_host_mem->HB.driverCounter =
4153                                         instance->hb_host_mem->HB.fwCounter;
4154                                 hb_seconds_missed = 0;
4155                         } else {
4156                                 hb_seconds_missed++;
4157                                 if (hb_seconds_missed ==
4158                                     (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
4159                                         dev_warn(&instance->pdev->dev, "SR-IOV:"
4160                                                " Heartbeat never completed "
4161                                                " while polling during I/O "
4162                                                " timeout handling for "
4163                                                "scsi%d.\n",
4164                                                instance->host->host_no);
4165                                                *convert = 1;
4166                                                retval = 1;
4167                                                goto out;
4168                                 }
4169                         }
4170                 }
4171 
4172                 megasas_complete_cmd_dpc_fusion((unsigned long)instance);
4173                 outstanding = atomic_read(&instance->fw_outstanding);
4174                 if (!outstanding)
4175                         goto out;
4176 
4177                 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
4178                         dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
4179                                "commands to complete for scsi%d\n", i,
4180                                outstanding, instance->host->host_no);
4181                 }
4182                 msleep(1000);
4183         }
4184 
4185         if (instance->snapdump_wait_time) {
4186                 megasas_trigger_snap_dump(instance);
4187                 retval = 1;
4188                 goto out;
4189         }
4190 
4191         if (atomic_read(&instance->fw_outstanding)) {
4192                 dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
4193                        "will reset adapter scsi%d.\n",
4194                        instance->host->host_no);
4195                 *convert = 1;
4196                 retval = 1;
4197         }
4198 
4199 out:
4200         return retval;
4201 }
4202 
4203 void  megasas_reset_reply_desc(struct megasas_instance *instance)
4204 {
4205         int i, j, count;
4206         struct fusion_context *fusion;
4207         union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
4208 
4209         fusion = instance->ctrl_context;
4210         count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
4211         for (i = 0 ; i < count ; i++) {
4212                 fusion->last_reply_idx[i] = 0;
4213                 reply_desc = fusion->reply_frames_desc[i];
4214                 for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++)
4215                         reply_desc->Words = cpu_to_le64(ULLONG_MAX);
4216         }
4217 }
4218 
4219 /*
4220  * megasas_refire_mgmt_cmd :    Re-fire management commands
4221  * @instance:                           Controller's soft instance
4222 */
4223 static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
4224 {
4225         int j;
4226         struct megasas_cmd_fusion *cmd_fusion;
4227         struct fusion_context *fusion;
4228         struct megasas_cmd *cmd_mfi;
4229         union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
4230         struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
4231         u16 smid;
4232         bool refire_cmd = 0;
4233         u8 result;
4234         u32 opcode = 0;
4235 
4236         fusion = instance->ctrl_context;
4237 
4238         /* Re-fire management commands.
4239          * Do not traverse complet MPT frame pool. Start from max_scsi_cmds.
4240          */
4241         for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) {
4242                 cmd_fusion = fusion->cmd_list[j];
4243                 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
4244                 smid = le16_to_cpu(cmd_mfi->context.smid);
4245                 result = REFIRE_CMD;
4246 
4247                 if (!smid)
4248                         continue;
4249 
4250                 req_desc = megasas_get_request_descriptor(instance, smid - 1);
4251 
4252                 switch (cmd_mfi->frame->hdr.cmd) {
4253                 case MFI_CMD_DCMD:
4254                         opcode = le32_to_cpu(cmd_mfi->frame->dcmd.opcode);
4255                          /* Do not refire shutdown command */
4256                         if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
4257                                 cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
4258                                 result = COMPLETE_CMD;
4259                                 break;
4260                         }
4261 
4262                         refire_cmd = ((opcode != MR_DCMD_LD_MAP_GET_INFO)) &&
4263                                       (opcode != MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
4264                                       !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
4265 
4266                         if (!refire_cmd)
4267                                 result = RETURN_CMD;
4268 
4269                         break;
4270                 case MFI_CMD_NVME:
4271                         if (!instance->support_nvme_passthru) {
4272                                 cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD;
4273                                 result = COMPLETE_CMD;
4274                         }
4275 
4276                         break;
4277                 case MFI_CMD_TOOLBOX:
4278                         if (!instance->support_pci_lane_margining) {
4279                                 cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD;
4280                                 result = COMPLETE_CMD;
4281                         }
4282 
4283                         break;
4284                 default:
4285                         break;
4286                 }
4287 
4288                 scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
4289                                 cmd_fusion->io_request;
4290                 if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT)
4291                         result = RETURN_CMD;
4292 
4293                 switch (result) {
4294                 case REFIRE_CMD:
4295                         megasas_fire_cmd_fusion(instance, req_desc);
4296                         break;
4297                 case RETURN_CMD:
4298                         megasas_return_cmd(instance, cmd_mfi);
4299                         break;
4300                 case COMPLETE_CMD:
4301                         megasas_complete_cmd(instance, cmd_mfi, DID_OK);
4302                         break;
4303                 }
4304         }
4305 }
4306 
4307 /*
4308  * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device
4309  * @instance: per adapter struct
4310  * @channel: the channel assigned by the OS
4311  * @id: the id assigned by the OS
4312  *
4313  * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED
4314  */
4315 
4316 static int megasas_track_scsiio(struct megasas_instance *instance,
4317                 int id, int channel)
4318 {
4319         int i, found = 0;
4320         struct megasas_cmd_fusion *cmd_fusion;
4321         struct fusion_context *fusion;
4322         fusion = instance->ctrl_context;
4323 
4324         for (i = 0 ; i < instance->max_scsi_cmds; i++) {
4325                 cmd_fusion = fusion->cmd_list[i];
4326                 if (cmd_fusion->scmd &&
4327                         (cmd_fusion->scmd->device->id == id &&
4328                         cmd_fusion->scmd->device->channel == channel)) {
4329                         dev_info(&instance->pdev->dev,
4330                                 "SCSI commands pending to target"
4331                                 "channel %d id %d \tSMID: 0x%x\n",
4332                                 channel, id, cmd_fusion->index);
4333                         scsi_print_command(cmd_fusion->scmd);
4334                         found = 1;
4335                         break;
4336                 }
4337         }
4338 
4339         return found ? FAILED : SUCCESS;
4340 }
4341 
4342 /**
4343  * megasas_tm_response_code - translation of device response code
4344  * @ioc: per adapter object
4345  * @mpi_reply: MPI reply returned by firmware
4346  *
4347  * Return nothing.
4348  */
4349 static void
4350 megasas_tm_response_code(struct megasas_instance *instance,
4351                 struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply)
4352 {
4353         char *desc;
4354 
4355         switch (mpi_reply->ResponseCode) {
4356         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
4357                 desc = "task management request completed";
4358                 break;
4359         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
4360                 desc = "invalid frame";
4361                 break;
4362         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
4363                 desc = "task management request not supported";
4364                 break;
4365         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
4366                 desc = "task management request failed";
4367                 break;
4368         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
4369                 desc = "task management request succeeded";
4370                 break;
4371         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
4372                 desc = "invalid lun";
4373                 break;
4374         case 0xA:
4375                 desc = "overlapped tag attempted";
4376                 break;
4377         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
4378                 desc = "task queued, however not sent to target";
4379                 break;
4380         default:
4381                 desc = "unknown";
4382                 break;
4383         }
4384         dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n",
4385                 mpi_reply->ResponseCode, desc);
4386         dev_dbg(&instance->pdev->dev,
4387                 "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo"
4388                 " 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
4389                 mpi_reply->TerminationCount, mpi_reply->DevHandle,
4390                 mpi_reply->Function, mpi_reply->TaskType,
4391                 mpi_reply->IOCStatus, mpi_reply->IOCLogInfo);
4392 }
4393 
4394 /**
4395  * megasas_issue_tm - main routine for sending tm requests
4396  * @instance: per adapter struct
4397  * @device_handle: device handle
4398  * @channel: the channel assigned by the OS
4399  * @id: the id assigned by the OS
4400  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c)
4401  * @smid_task: smid assigned to the task
4402  * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
4403  * Context: user
4404  *
4405  * MegaRaid use MPT interface for Task Magement request.
4406  * A generic API for sending task management requests to firmware.
4407  *
4408  * Return SUCCESS or FAILED.
4409  */
4410 static int
4411 megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
4412         uint channel, uint id, u16 smid_task, u8 type,
4413         struct MR_PRIV_DEVICE *mr_device_priv_data)
4414 {
4415         struct MR_TASK_MANAGE_REQUEST *mr_request;
4416         struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request;
4417         unsigned long timeleft;
4418         struct megasas_cmd_fusion *cmd_fusion;
4419         struct megasas_cmd *cmd_mfi;
4420         union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
4421         struct fusion_context *fusion = NULL;
4422         struct megasas_cmd_fusion *scsi_lookup;
4423         int rc;
4424         int timeout = MEGASAS_DEFAULT_TM_TIMEOUT;
4425         struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
4426 
4427         fusion = instance->ctrl_context;
4428 
4429         cmd_mfi = megasas_get_cmd(instance);
4430 
4431         if (!cmd_mfi) {
4432                 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
4433                         __func__, __LINE__);
4434                 return -ENOMEM;
4435         }
4436 
4437         cmd_fusion = megasas_get_cmd_fusion(instance,
4438                         instance->max_scsi_cmds + cmd_mfi->index);
4439 
4440         /*  Save the smid. To be used for returning the cmd */
4441         cmd_mfi->context.smid = cmd_fusion->index;
4442 
4443         req_desc = megasas_get_request_descriptor(instance,
4444                         (cmd_fusion->index - 1));
4445 
4446         cmd_fusion->request_desc = req_desc;
4447         req_desc->Words = 0;
4448 
4449         mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request;
4450         memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST));
4451         mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
4452         mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4453         mpi_request->DevHandle = cpu_to_le16(device_handle);
4454         mpi_request->TaskType = type;
4455         mpi_request->TaskMID = cpu_to_le16(smid_task);
4456         mpi_request->LUN[1] = 0;
4457 
4458 
4459         req_desc = cmd_fusion->request_desc;
4460         req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index);
4461         req_desc->HighPriority.RequestFlags =
4462                 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
4463                 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
4464         req_desc->HighPriority.MSIxIndex =  0;
4465         req_desc->HighPriority.LMID = 0;
4466         req_desc->HighPriority.Reserved1 = 0;
4467 
4468         if (channel < MEGASAS_MAX_PD_CHANNELS)
4469                 mr_request->tmReqFlags.isTMForPD = 1;
4470         else
4471                 mr_request->tmReqFlags.isTMForLD = 1;
4472 
4473         init_completion(&cmd_fusion->done);
4474         megasas_fire_cmd_fusion(instance, req_desc);
4475 
4476         switch (type) {
4477         case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
4478                 timeout = mr_device_priv_data->task_abort_tmo;
4479                 break;
4480         case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
4481                 timeout = mr_device_priv_data->target_reset_tmo;
4482                 break;
4483         }
4484 
4485         timeleft = wait_for_completion_timeout(&cmd_fusion->done, timeout * HZ);
4486 
4487         if (!timeleft) {
4488                 dev_err(&instance->pdev->dev,
4489                         "task mgmt type 0x%x timed out\n", type);
4490                 mutex_unlock(&instance->reset_mutex);
4491                 rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
4492                 mutex_lock(&instance->reset_mutex);
4493                 return rc;
4494         }
4495 
4496         mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply;
4497         megasas_tm_response_code(instance, mpi_reply);
4498 
4499         megasas_return_cmd(instance, cmd_mfi);
4500         rc = SUCCESS;
4501         switch (type) {
4502         case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
4503                 scsi_lookup = fusion->cmd_list[smid_task - 1];
4504 
4505                 if (scsi_lookup->scmd == NULL)
4506                         break;
4507                 else {
4508                         instance->instancet->disable_intr(instance);
4509                         megasas_sync_irqs((unsigned long)instance);
4510                         instance->instancet->enable_intr(instance);
4511                         megasas_enable_irq_poll(instance);
4512                         if (scsi_lookup->scmd == NULL)
4513                                 break;
4514                 }
4515                 rc = FAILED;
4516                 break;
4517 
4518         case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
4519                 if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF))
4520                         break;
4521                 instance->instancet->disable_intr(instance);
4522                 megasas_sync_irqs((unsigned long)instance);
4523                 rc = megasas_track_scsiio(instance, id, channel);
4524                 instance->instancet->enable_intr(instance);
4525                 megasas_enable_irq_poll(instance);
4526 
4527                 break;
4528         case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
4529         case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
4530                 break;
4531         default:
4532                 rc = FAILED;
4533                 break;
4534         }
4535 
4536         return rc;
4537 
4538 }
4539 
4540 /*
4541  * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI
4542  * @instance: per adapter struct
4543  *
4544  * Return Non Zero index, if SMID found in outstanding commands
4545  */
4546 static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd)
4547 {
4548         int i, ret = 0;
4549         struct megasas_instance *instance;
4550         struct megasas_cmd_fusion *cmd_fusion;
4551         struct fusion_context *fusion;
4552 
4553         instance = (struct megasas_instance *)scmd->device->host->hostdata;
4554 
4555         fusion = instance->ctrl_context;
4556 
4557         for (i = 0; i < instance->max_scsi_cmds; i++) {
4558                 cmd_fusion = fusion->cmd_list[i];
4559                 if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) {
4560                         scmd_printk(KERN_NOTICE, scmd, "Abort request is for"
4561                                 " SMID: %d\n", cmd_fusion->index);
4562                         ret = cmd_fusion->index;
4563                         break;
4564                 }
4565         }
4566 
4567         return ret;
4568 }
4569 
4570 /*
4571 * megasas_get_tm_devhandle - Get devhandle for TM request
4572 * @sdev-                     OS provided scsi device
4573 *
4574 * Returns-                   devhandle/targetID of SCSI device
4575 */
4576 static u16 megasas_get_tm_devhandle(struct scsi_device *sdev)
4577 {
4578         u16 pd_index = 0;
4579         u32 device_id;
4580         struct megasas_instance *instance;
4581         struct fusion_context *fusion;
4582         struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
4583         u16 devhandle = (u16)ULONG_MAX;
4584 
4585         instance = (struct megasas_instance *)sdev->host->hostdata;
4586         fusion = instance->ctrl_context;
4587 
4588         if (!MEGASAS_IS_LOGICAL(sdev)) {
4589                 if (instance->use_seqnum_jbod_fp) {
4590                         pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
4591                                     + sdev->id;
4592                         pd_sync = (void *)fusion->pd_seq_sync
4593                                         [(instance->pd_seq_map_id - 1) & 1];
4594                         devhandle = pd_sync->seq[pd_index].devHandle;
4595                 } else
4596                         sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable"
4597                                 " without JBOD MAP support from %s %d\n", __func__, __LINE__);
4598         } else {
4599                 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
4600                                 + sdev->id;
4601                 devhandle = device_id;
4602         }
4603 
4604         return devhandle;
4605 }
4606 
4607 /*
4608  * megasas_task_abort_fusion : SCSI task abort function for fusion adapters
4609  * @scmd : pointer to scsi command object
4610  *
4611  * Return SUCCESS, if command aborted else FAILED
4612  */
4613 
4614 int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
4615 {
4616         struct megasas_instance *instance;
4617         u16 smid, devhandle;
4618         int ret;
4619         struct MR_PRIV_DEVICE *mr_device_priv_data;
4620         mr_device_priv_data = scmd->device->hostdata;
4621 
4622         instance = (struct megasas_instance *)scmd->device->host->hostdata;
4623 
4624         if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
4625                 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
4626                 "SCSI host:%d\n", instance->host->host_no);
4627                 ret = FAILED;
4628                 return ret;
4629         }
4630 
4631         if (!mr_device_priv_data) {
4632                 sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
4633                         "scmd(%p)\n", scmd);
4634                 scmd->result = DID_NO_CONNECT << 16;
4635                 ret = SUCCESS;
4636                 goto out;
4637         }
4638 
4639         if (!mr_device_priv_data->is_tm_capable) {
4640                 ret = FAILED;
4641                 goto out;
4642         }
4643 
4644         mutex_lock(&instance->reset_mutex);
4645 
4646         smid = megasas_fusion_smid_lookup(scmd);
4647 
4648         if (!smid) {
4649                 ret = SUCCESS;
4650                 scmd_printk(KERN_NOTICE, scmd, "Command for which abort is"
4651                         " issued is not found in outstanding commands\n");
4652                 mutex_unlock(&instance->reset_mutex);
4653                 goto out;
4654         }
4655 
4656         devhandle = megasas_get_tm_devhandle(scmd->device);
4657 
4658         if (devhandle == (u16)ULONG_MAX) {
4659                 ret = SUCCESS;
4660                 sdev_printk(KERN_INFO, scmd->device,
4661                         "task abort issued for invalid devhandle\n");
4662                 mutex_unlock(&instance->reset_mutex);
4663                 goto out;
4664         }
4665         sdev_printk(KERN_INFO, scmd->device,
4666                 "attempting task abort! scmd(0x%p) tm_dev_handle 0x%x\n",
4667                 scmd, devhandle);
4668 
4669         mr_device_priv_data->tm_busy = 1;
4670         ret = megasas_issue_tm(instance, devhandle,
4671                         scmd->device->channel, scmd->device->id, smid,
4672                         MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4673                         mr_device_priv_data);
4674         mr_device_priv_data->tm_busy = 0;
4675 
4676         mutex_unlock(&instance->reset_mutex);
4677         scmd_printk(KERN_INFO, scmd, "task abort %s!! scmd(0x%p)\n",
4678                         ((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
4679 out:
4680         scsi_print_command(scmd);
4681         if (megasas_dbg_lvl & TM_DEBUG)
4682                 megasas_dump_fusion_io(scmd);
4683 
4684         return ret;
4685 }
4686 
4687 /*
4688  * megasas_reset_target_fusion : target reset function for fusion adapters
4689  * scmd: SCSI command pointer
4690  *
4691  * Returns SUCCESS if all commands associated with target aborted else FAILED
4692  */
4693 
4694 int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
4695 {
4696 
4697         struct megasas_instance *instance;
4698         int ret = FAILED;
4699         u16 devhandle;
4700         struct MR_PRIV_DEVICE *mr_device_priv_data;
4701         mr_device_priv_data = scmd->device->hostdata;
4702 
4703         instance = (struct megasas_instance *)scmd->device->host->hostdata;
4704 
4705         if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
4706                 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
4707                 "SCSI host:%d\n", instance->host->host_no);
4708                 ret = FAILED;
4709                 return ret;
4710         }
4711 
4712         if (!mr_device_priv_data) {
4713                 sdev_printk(KERN_INFO, scmd->device,
4714                             "device been deleted! scmd: (0x%p)\n", scmd);
4715                 scmd->result = DID_NO_CONNECT << 16;
4716                 ret = SUCCESS;
4717                 goto out;
4718         }
4719 
4720         if (!mr_device_priv_data->is_tm_capable) {
4721                 ret = FAILED;
4722                 goto out;
4723         }
4724 
4725         mutex_lock(&instance->reset_mutex);
4726         devhandle = megasas_get_tm_devhandle(scmd->device);
4727 
4728         if (devhandle == (u16)ULONG_MAX) {
4729                 ret = SUCCESS;
4730                 sdev_printk(KERN_INFO, scmd->device,
4731                         "target reset issued for invalid devhandle\n");
4732                 mutex_unlock(&instance->reset_mutex);
4733                 goto out;
4734         }
4735 
4736         sdev_printk(KERN_INFO, scmd->device,
4737                 "attempting target reset! scmd(0x%p) tm_dev_handle: 0x%x\n",
4738                 scmd, devhandle);
4739         mr_device_priv_data->tm_busy = 1;
4740         ret = megasas_issue_tm(instance, devhandle,
4741                         scmd->device->channel, scmd->device->id, 0,
4742                         MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
4743                         mr_device_priv_data);
4744         mr_device_priv_data->tm_busy = 0;
4745         mutex_unlock(&instance->reset_mutex);
4746         scmd_printk(KERN_NOTICE, scmd, "target reset %s!!\n",
4747                 (ret == SUCCESS) ? "SUCCESS" : "FAILED");
4748 
4749 out:
4750         return ret;
4751 }
4752 
4753 /*SRIOV get other instance in cluster if any*/
4754 static struct
4755 megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
4756 {
4757         int i;
4758 
4759         for (i = 0; i < MAX_MGMT_ADAPTERS; i++) {
4760                 if (megasas_mgmt_info.instance[i] &&
4761                         (megasas_mgmt_info.instance[i] != instance) &&
4762                          megasas_mgmt_info.instance[i]->requestorId &&
4763                          megasas_mgmt_info.instance[i]->peerIsPresent &&
4764                         (memcmp((megasas_mgmt_info.instance[i]->clusterId),
4765                         instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0))
4766                         return megasas_mgmt_info.instance[i];
4767         }
4768         return NULL;
4769 }
4770 
4771 /* Check for a second path that is currently UP */
4772 int megasas_check_mpio_paths(struct megasas_instance *instance,
4773         struct scsi_cmnd *scmd)
4774 {
4775         struct megasas_instance *peer_instance = NULL;
4776         int retval = (DID_REQUEUE << 16);
4777 
4778         if (instance->peerIsPresent) {
4779                 peer_instance = megasas_get_peer_instance(instance);
4780                 if ((peer_instance) &&
4781                         (atomic_read(&peer_instance->adprecovery) ==
4782                         MEGASAS_HBA_OPERATIONAL))
4783                         retval = (DID_NO_CONNECT << 16);
4784         }
4785         return retval;
4786 }
4787 
4788 /* Core fusion reset function */
4789 int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
4790 {
4791         int retval = SUCCESS, i, j, convert = 0;
4792         struct megasas_instance *instance;
4793         struct megasas_cmd_fusion *cmd_fusion, *r1_cmd;
4794         struct fusion_context *fusion;
4795         u32 abs_state, status_reg, reset_adapter, fpio_count = 0;
4796         u32 io_timeout_in_crash_mode = 0;
4797         struct scsi_cmnd *scmd_local = NULL;
4798         struct scsi_device *sdev;
4799         int ret_target_prop = DCMD_FAILED;
4800         bool is_target_prop = false;
4801         bool do_adp_reset = true;
4802         int max_reset_tries = MEGASAS_FUSION_MAX_RESET_TRIES;
4803 
4804         instance = (struct megasas_instance *)shost->hostdata;
4805         fusion = instance->ctrl_context;
4806 
4807         mutex_lock(&instance->reset_mutex);
4808 
4809         if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
4810                 dev_warn(&instance->pdev->dev, "Hardware critical error, "
4811                        "returning FAILED for scsi%d.\n",
4812                         instance->host->host_no);
4813                 mutex_unlock(&instance->reset_mutex);
4814                 return FAILED;
4815         }
4816         status_reg = instance->instancet->read_fw_status_reg(instance);
4817         abs_state = status_reg & MFI_STATE_MASK;
4818 
4819         /* IO timeout detected, forcibly put FW in FAULT state */
4820         if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
4821                 instance->crash_dump_app_support && reason) {
4822                 dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, "
4823                         "forcibly FAULT Firmware\n");
4824                 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
4825                 status_reg = megasas_readl(instance, &instance->reg_set->doorbell);
4826                 writel(status_reg | MFI_STATE_FORCE_OCR,
4827                         &instance->reg_set->doorbell);
4828                 readl(&instance->reg_set->doorbell);
4829                 mutex_unlock(&instance->reset_mutex);
4830                 do {
4831                         ssleep(3);
4832                         io_timeout_in_crash_mode++;
4833                         dev_dbg(&instance->pdev->dev, "waiting for [%d] "
4834                                 "seconds for crash dump collection and OCR "
4835                                 "to be done\n", (io_timeout_in_crash_mode * 3));
4836                 } while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
4837                         (io_timeout_in_crash_mode < 80));
4838 
4839                 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
4840                         dev_info(&instance->pdev->dev, "OCR done for IO "
4841                                 "timeout case\n");
4842                         retval = SUCCESS;
4843                 } else {
4844                         dev_info(&instance->pdev->dev, "Controller is not "
4845                                 "operational after 240 seconds wait for IO "
4846                                 "timeout case in FW crash dump mode\n do "
4847                                 "OCR/kill adapter\n");
4848                         retval = megasas_reset_fusion(shost, 0);
4849                 }
4850                 return retval;
4851         }
4852 
4853         if (instance->requestorId && !instance->skip_heartbeat_timer_del)
4854                 del_timer_sync(&instance->sriov_heartbeat_timer);
4855         set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
4856         set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
4857         atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
4858         instance->instancet->disable_intr(instance);
4859         megasas_sync_irqs((unsigned long)instance);
4860 
4861         /* First try waiting for commands to complete */
4862         if (megasas_wait_for_outstanding_fusion(instance, reason,
4863                                                 &convert)) {
4864                 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
4865                 dev_warn(&instance->pdev->dev, "resetting fusion "
4866                        "adapter scsi%d.\n", instance->host->host_no);
4867                 if (convert)
4868                         reason = 0;
4869 
4870                 if (megasas_dbg_lvl & OCR_DEBUG)
4871                         dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n");
4872 
4873                 /* Now return commands back to the OS */
4874                 for (i = 0 ; i < instance->max_scsi_cmds; i++) {
4875                         cmd_fusion = fusion->cmd_list[i];
4876                         /*check for extra commands issued by driver*/
4877                         if (instance->adapter_type >= VENTURA_SERIES) {
4878                                 r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
4879                                 megasas_return_cmd_fusion(instance, r1_cmd);
4880                         }
4881                         scmd_local = cmd_fusion->scmd;
4882                         if (cmd_fusion->scmd) {
4883                                 if (megasas_dbg_lvl & OCR_DEBUG) {
4884                                         sdev_printk(KERN_INFO,
4885                                                 cmd_fusion->scmd->device, "SMID: 0x%x\n",
4886                                                 cmd_fusion->index);
4887                                         megasas_dump_fusion_io(cmd_fusion->scmd);
4888                                 }
4889 
4890                                 if (cmd_fusion->io_request->Function ==
4891                                         MPI2_FUNCTION_SCSI_IO_REQUEST)
4892                                         fpio_count++;
4893 
4894                                 scmd_local->result =
4895                                         megasas_check_mpio_paths(instance,
4896                                                         scmd_local);
4897                                 if (instance->ldio_threshold &&
4898                                         megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
4899                                         atomic_dec(&instance->ldio_outstanding);
4900                                 megasas_return_cmd_fusion(instance, cmd_fusion);
4901                                 scsi_dma_unmap(scmd_local);
4902                                 scmd_local->scsi_done(scmd_local);
4903                         }
4904                 }
4905 
4906                 dev_info(&instance->pdev->dev, "Outstanding fastpath IOs: %d\n",
4907                         fpio_count);
4908 
4909                 atomic_set(&instance->fw_outstanding, 0);
4910 
4911                 status_reg = instance->instancet->read_fw_status_reg(instance);
4912                 abs_state = status_reg & MFI_STATE_MASK;
4913                 reset_adapter = status_reg & MFI_RESET_ADAPTER;
4914                 if (instance->disableOnlineCtrlReset ||
4915                     (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
4916                         /* Reset not supported, kill adapter */
4917                         dev_warn(&instance->pdev->dev, "Reset not supported"
4918                                ", killing adapter scsi%d.\n",
4919                                 instance->host->host_no);
4920                         goto kill_hba;
4921                 }
4922 
4923                 /* Let SR-IOV VF & PF sync up if there was a HB failure */
4924                 if (instance->requestorId && !reason) {
4925                         msleep(MEGASAS_OCR_SETTLE_TIME_VF);
4926                         do_adp_reset = false;
4927                         max_reset_tries = MEGASAS_SRIOV_MAX_RESET_TRIES_VF;
4928                 }
4929 
4930                 /* Now try to reset the chip */
4931                 for (i = 0; i < max_reset_tries; i++) {
4932                         /*
4933                          * Do adp reset and wait for
4934                          * controller to transition to ready
4935                          */
4936                         if (megasas_adp_reset_wait_for_ready(instance,
4937                                 do_adp_reset, 1) == FAILED)
4938                                 continue;
4939 
4940                         /* Wait for FW to become ready */
4941                         if (megasas_transition_to_ready(instance, 1)) {
4942                                 dev_warn(&instance->pdev->dev,
4943                                         "Failed to transition controller to ready for "
4944                                         "scsi%d.\n", instance->host->host_no);
4945                                 continue;
4946                         }
4947                         megasas_reset_reply_desc(instance);
4948                         megasas_fusion_update_can_queue(instance, OCR_CONTEXT);
4949 
4950                         if (megasas_ioc_init_fusion(instance)) {
4951                                 continue;
4952                         }
4953 
4954                         if (megasas_get_ctrl_info(instance)) {
4955                                 dev_info(&instance->pdev->dev,
4956                                         "Failed from %s %d\n",
4957                                         __func__, __LINE__);
4958                                 goto kill_hba;
4959                         }
4960 
4961                         megasas_refire_mgmt_cmd(instance);
4962 
4963                         /* Reset load balance info */
4964                         if (fusion->load_balance_info)
4965                                 memset(fusion->load_balance_info, 0,
4966                                        (sizeof(struct LD_LOAD_BALANCE_INFO) *
4967                                        MAX_LOGICAL_DRIVES_EXT));
4968 
4969                         if (!megasas_get_map_info(instance))
4970                                 megasas_sync_map_info(instance);
4971 
4972                         megasas_setup_jbod_map(instance);
4973 
4974                         /* reset stream detection array */
4975                         if (instance->adapter_type >= VENTURA_SERIES) {
4976                                 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
4977                                         memset(fusion->stream_detect_by_ld[j],
4978                                         0, sizeof(struct LD_STREAM_DETECT));
4979                                  fusion->stream_detect_by_ld[j]->mru_bit_map
4980                                                 = MR_STREAM_BITMAP;
4981                                 }
4982                         }
4983 
4984                         clear_bit(MEGASAS_FUSION_IN_RESET,
4985                                   &instance->reset_flags);
4986                         instance->instancet->enable_intr(instance);
4987                         megasas_enable_irq_poll(instance);
4988                         shost_for_each_device(sdev, shost) {
4989                                 if ((instance->tgt_prop) &&
4990                                     (instance->nvme_page_size))
4991                                         ret_target_prop = megasas_get_target_prop(instance, sdev);
4992 
4993                                 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
4994                                 megasas_set_dynamic_target_properties(sdev, is_target_prop);
4995                         }
4996 
4997                         atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
4998 
4999                         dev_info(&instance->pdev->dev,
5000                                  "Adapter is OPERATIONAL for scsi:%d\n",
5001                                  instance->host->host_no);
5002 
5003                         /* Restart SR-IOV heartbeat */
5004                         if (instance->requestorId) {
5005                                 if (!megasas_sriov_start_heartbeat(instance, 0))
5006                                         megasas_start_timer(instance);
5007                                 else
5008                                         instance->skip_heartbeat_timer_del = 1;
5009                         }
5010 
5011                         if (instance->crash_dump_drv_support &&
5012                                 instance->crash_dump_app_support)
5013                                 megasas_set_crash_dump_params(instance,
5014                                         MR_CRASH_BUF_TURN_ON);
5015                         else
5016                                 megasas_set_crash_dump_params(instance,
5017                                         MR_CRASH_BUF_TURN_OFF);
5018 
5019                         if (instance->snapdump_wait_time) {
5020                                 megasas_get_snapdump_properties(instance);
5021                                 dev_info(&instance->pdev->dev,
5022                                          "Snap dump wait time\t: %d\n",
5023                                          instance->snapdump_wait_time);
5024                         }
5025 
5026                         retval = SUCCESS;
5027 
5028                         /* Adapter reset completed successfully */
5029                         dev_warn(&instance->pdev->dev,
5030                                  "Reset successful for scsi%d.\n",
5031                                  instance->host->host_no);
5032 
5033                         goto out;
5034                 }
5035                 /* Reset failed, kill the adapter */
5036                 dev_warn(&instance->pdev->dev, "Reset failed, killing "
5037                        "adapter scsi%d.\n", instance->host->host_no);
5038                 goto kill_hba;
5039         } else {
5040                 /* For VF: Restart HB timer if we didn't OCR */
5041                 if (instance->requestorId) {
5042                         megasas_start_timer(instance);
5043                 }
5044                 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
5045                 instance->instancet->enable_intr(instance);
5046                 megasas_enable_irq_poll(instance);
5047                 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
5048                 goto out;
5049         }
5050 kill_hba:
5051         megaraid_sas_kill_hba(instance);
5052         megasas_enable_irq_poll(instance);
5053         instance->skip_heartbeat_timer_del = 1;
5054         retval = FAILED;
5055 out:
5056         clear_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
5057         mutex_unlock(&instance->reset_mutex);
5058         return retval;
5059 }
5060 
5061 /* Fusion Crash dump collection */
5062 static void  megasas_fusion_crash_dump(struct megasas_instance *instance)
5063 {
5064         u32 status_reg;
5065         u8 partial_copy = 0;
5066         int wait = 0;
5067 
5068 
5069         status_reg = instance->instancet->read_fw_status_reg(instance);
5070 
5071         /*
5072          * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
5073          * to host crash buffers
5074          */
5075         if (instance->drv_buf_index == 0) {
5076                 /* Buffer is already allocated for old Crash dump.
5077                  * Do OCR and do not wait for crash dump collection
5078                  */
5079                 if (instance->drv_buf_alloc) {
5080                         dev_info(&instance->pdev->dev, "earlier crash dump is "
5081                                 "not yet copied by application, ignoring this "
5082                                 "crash dump and initiating OCR\n");
5083                         status_reg |= MFI_STATE_CRASH_DUMP_DONE;
5084                         writel(status_reg,
5085                                 &instance->reg_set->outbound_scratch_pad_0);
5086                         readl(&instance->reg_set->outbound_scratch_pad_0);
5087                         return;
5088                 }
5089                 megasas_alloc_host_crash_buffer(instance);
5090                 dev_info(&instance->pdev->dev, "Number of host crash buffers "
5091                         "allocated: %d\n", instance->drv_buf_alloc);
5092         }
5093 
5094         while (!(status_reg & MFI_STATE_CRASH_DUMP_DONE) &&
5095                (wait < MEGASAS_WATCHDOG_WAIT_COUNT)) {
5096                 if (!(status_reg & MFI_STATE_DMADONE)) {
5097                         /*
5098                          * Next crash dump buffer is not yet DMA'd by FW
5099                          * Check after 10ms. Wait for 1 second for FW to
5100                          * post the next buffer. If not bail out.
5101                          */
5102                         wait++;
5103                         msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS);
5104                         status_reg = instance->instancet->read_fw_status_reg(
5105                                         instance);
5106                         continue;
5107                 }
5108 
5109                 wait = 0;
5110                 if (instance->drv_buf_index >= instance->drv_buf_alloc) {
5111                         dev_info(&instance->pdev->dev,
5112                                  "Driver is done copying the buffer: %d\n",
5113                                  instance->drv_buf_alloc);
5114                         status_reg |= MFI_STATE_CRASH_DUMP_DONE;
5115                         partial_copy = 1;
5116                         break;
5117                 } else {
5118                         memcpy(instance->crash_buf[instance->drv_buf_index],
5119                                instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
5120                         instance->drv_buf_index++;
5121                         status_reg &= ~MFI_STATE_DMADONE;
5122                 }
5123 
5124                 writel(status_reg, &instance->reg_set->outbound_scratch_pad_0);
5125                 readl(&instance->reg_set->outbound_scratch_pad_0);
5126 
5127                 msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS);
5128                 status_reg = instance->instancet->read_fw_status_reg(instance);
5129         }
5130 
5131         if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
5132                 dev_info(&instance->pdev->dev, "Crash Dump is available,number "
5133                         "of copied buffers: %d\n", instance->drv_buf_index);
5134                 instance->fw_crash_buffer_size =  instance->drv_buf_index;
5135                 instance->fw_crash_state = AVAILABLE;
5136                 instance->drv_buf_index = 0;
5137                 writel(status_reg, &instance->reg_set->outbound_scratch_pad_0);
5138                 readl(&instance->reg_set->outbound_scratch_pad_0);
5139                 if (!partial_copy)
5140                         megasas_reset_fusion(instance->host, 0);
5141         }
5142 }
5143 
5144 
5145 /* Fusion OCR work queue */
5146 void megasas_fusion_ocr_wq(struct work_struct *work)
5147 {
5148         struct megasas_instance *instance =
5149                 container_of(work, struct megasas_instance, work_init);
5150 
5151         megasas_reset_fusion(instance->host, 0);
5152 }
5153 
5154 /* Allocate fusion context */
5155 int
5156 megasas_alloc_fusion_context(struct megasas_instance *instance)
5157 {
5158         struct fusion_context *fusion;
5159 
5160         instance->ctrl_context = kzalloc(sizeof(struct fusion_context),
5161                                          GFP_KERNEL);
5162         if (!instance->ctrl_context) {
5163                 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5164                         __func__, __LINE__);
5165                 return -ENOMEM;
5166         }
5167 
5168         fusion = instance->ctrl_context;
5169 
5170         fusion->log_to_span_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
5171                                               sizeof(LD_SPAN_INFO));
5172         fusion->log_to_span =
5173                 (PLD_SPAN_INFO)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
5174                                                 fusion->log_to_span_pages);
5175         if (!fusion->log_to_span) {
5176                 fusion->log_to_span =
5177                         vzalloc(array_size(MAX_LOGICAL_DRIVES_EXT,
5178                                            sizeof(LD_SPAN_INFO)));
5179                 if (!fusion->log_to_span) {
5180                         dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5181                                 __func__, __LINE__);
5182                         return -ENOMEM;
5183                 }
5184         }
5185 
5186         fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
5187                 sizeof(struct LD_LOAD_BALANCE_INFO));
5188         fusion->load_balance_info =
5189                 (struct LD_LOAD_BALANCE_INFO *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
5190                 fusion->load_balance_info_pages);
5191         if (!fusion->load_balance_info) {
5192                 fusion->load_balance_info =
5193                         vzalloc(array_size(MAX_LOGICAL_DRIVES_EXT,
5194                                            sizeof(struct LD_LOAD_BALANCE_INFO)));
5195                 if (!fusion->load_balance_info)
5196                         dev_err(&instance->pdev->dev, "Failed to allocate load_balance_info, "
5197                                 "continuing without Load Balance support\n");
5198         }
5199 
5200         return 0;
5201 }
5202 
5203 void
5204 megasas_free_fusion_context(struct megasas_instance *instance)
5205 {
5206         struct fusion_context *fusion = instance->ctrl_context;
5207 
5208         if (fusion) {
5209                 if (fusion->load_balance_info) {
5210                         if (is_vmalloc_addr(fusion->load_balance_info))
5211                                 vfree(fusion->load_balance_info);
5212                         else
5213                                 free_pages((ulong)fusion->load_balance_info,
5214                                         fusion->load_balance_info_pages);
5215                 }
5216 
5217                 if (fusion->log_to_span) {
5218                         if (is_vmalloc_addr(fusion->log_to_span))
5219                                 vfree(fusion->log_to_span);
5220                         else
5221                                 free_pages((ulong)fusion->log_to_span,
5222                                            fusion->log_to_span_pages);
5223                 }
5224 
5225                 kfree(fusion);
5226         }
5227 }
5228 
5229 struct megasas_instance_template megasas_instance_template_fusion = {
5230         .enable_intr = megasas_enable_intr_fusion,
5231         .disable_intr = megasas_disable_intr_fusion,
5232         .clear_intr = megasas_clear_intr_fusion,
5233         .read_fw_status_reg = megasas_read_fw_status_reg_fusion,
5234         .adp_reset = megasas_adp_reset_fusion,
5235         .check_reset = megasas_check_reset_fusion,
5236         .service_isr = megasas_isr_fusion,
5237         .tasklet = megasas_complete_cmd_dpc_fusion,
5238         .init_adapter = megasas_init_adapter_fusion,
5239         .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion,
5240         .issue_dcmd = megasas_issue_dcmd_fusion,
5241 };

/* [<][>][^][v][top][bottom][index][help] */