root/drivers/scsi/mvumi.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. tag_init
  2. tag_get_one
  3. tag_release_one
  4. tag_is_empty
  5. mvumi_unmap_pci_addr
  6. mvumi_map_pci_addr
  7. mvumi_alloc_mem_resource
  8. mvumi_release_mem_resource
  9. mvumi_make_sgl
  10. mvumi_internal_cmd_sgl
  11. mvumi_create_internal_cmd
  12. mvumi_delete_internal_cmd
  13. mvumi_get_cmd
  14. mvumi_return_cmd
  15. mvumi_free_cmds
  16. mvumi_alloc_cmds
  17. mvumi_check_ib_list_9143
  18. mvumi_check_ib_list_9580
  19. mvumi_get_ib_list_entry
  20. mvumi_send_ib_list_entry
  21. mvumi_check_ob_frame
  22. mvumi_check_ob_list_9143
  23. mvumi_check_ob_list_9580
  24. mvumi_receive_ob_list_entry
  25. mvumi_reset
  26. mvumi_wait_for_outstanding
  27. mvumi_wait_for_fw
  28. mvumi_backup_bar_addr
  29. mvumi_restore_bar_addr
  30. mvumi_pci_set_master
  31. mvumi_reset_host_9580
  32. mvumi_reset_host_9143
  33. mvumi_host_reset
  34. mvumi_issue_blocked_cmd
  35. mvumi_release_fw
  36. mvumi_flush_cache
  37. mvumi_calculate_checksum
  38. mvumi_hs_build_page
  39. mvumi_init_data
  40. mvumi_hs_process_page
  41. mvumi_handshake
  42. mvumi_handshake_event
  43. mvumi_check_handshake
  44. mvumi_start
  45. mvumi_complete_cmd
  46. mvumi_complete_internal_cmd
  47. mvumi_show_event
  48. mvumi_handle_hotplug
  49. mvumi_inquiry
  50. mvumi_detach_devices
  51. mvumi_rescan_devices
  52. mvumi_match_devices
  53. mvumi_remove_devices
  54. mvumi_probe_devices
  55. mvumi_rescan_bus
  56. mvumi_proc_msg
  57. mvumi_notification
  58. mvumi_get_event
  59. mvumi_scan_events
  60. mvumi_launch_events
  61. mvumi_handle_clob
  62. mvumi_isr_handler
  63. mvumi_send_command
  64. mvumi_fire_cmd
  65. mvumi_enable_intr
  66. mvumi_disable_intr
  67. mvumi_clear_intr
  68. mvumi_read_fw_status_reg
  69. mvumi_slave_configure
  70. mvumi_build_frame
  71. mvumi_queue_command
  72. mvumi_timed_out
  73. mvumi_bios_param
  74. mvumi_cfg_hw_reg
  75. mvumi_init_fw
  76. mvumi_io_attach
  77. mvumi_probe_one
  78. mvumi_detach_one
  79. mvumi_shutdown
  80. mvumi_suspend
  81. mvumi_resume

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Marvell UMI driver
   4  *
   5  * Copyright 2011 Marvell. <jyli@marvell.com>
   6 */
   7 
   8 #include <linux/kernel.h>
   9 #include <linux/module.h>
  10 #include <linux/moduleparam.h>
  11 #include <linux/init.h>
  12 #include <linux/device.h>
  13 #include <linux/pci.h>
  14 #include <linux/list.h>
  15 #include <linux/spinlock.h>
  16 #include <linux/interrupt.h>
  17 #include <linux/delay.h>
  18 #include <linux/ktime.h>
  19 #include <linux/blkdev.h>
  20 #include <linux/io.h>
  21 #include <scsi/scsi.h>
  22 #include <scsi/scsi_cmnd.h>
  23 #include <scsi/scsi_device.h>
  24 #include <scsi/scsi_host.h>
  25 #include <scsi/scsi_transport.h>
  26 #include <scsi/scsi_eh.h>
  27 #include <linux/uaccess.h>
  28 #include <linux/kthread.h>
  29 
  30 #include "mvumi.h"
  31 
  32 MODULE_LICENSE("GPL");
  33 MODULE_AUTHOR("jyli@marvell.com");
  34 MODULE_DESCRIPTION("Marvell UMI Driver");
  35 
  36 static const struct pci_device_id mvumi_pci_table[] = {
  37         { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
  38         { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
  39         { 0 }
  40 };
  41 
  42 MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
  43 
  44 static void tag_init(struct mvumi_tag *st, unsigned short size)
  45 {
  46         unsigned short i;
  47         BUG_ON(size != st->size);
  48         st->top = size;
  49         for (i = 0; i < size; i++)
  50                 st->stack[i] = size - 1 - i;
  51 }
  52 
  53 static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
  54 {
  55         BUG_ON(st->top <= 0);
  56         return st->stack[--st->top];
  57 }
  58 
  59 static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
  60                                                         unsigned short tag)
  61 {
  62         BUG_ON(st->top >= st->size);
  63         st->stack[st->top++] = tag;
  64 }
  65 
  66 static bool tag_is_empty(struct mvumi_tag *st)
  67 {
  68         if (st->top == 0)
  69                 return 1;
  70         else
  71                 return 0;
  72 }
  73 
  74 static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
  75 {
  76         int i;
  77 
  78         for (i = 0; i < MAX_BASE_ADDRESS; i++)
  79                 if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
  80                                                                 addr_array[i])
  81                         pci_iounmap(dev, addr_array[i]);
  82 }
  83 
  84 static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
  85 {
  86         int i;
  87 
  88         for (i = 0; i < MAX_BASE_ADDRESS; i++) {
  89                 if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
  90                         addr_array[i] = pci_iomap(dev, i, 0);
  91                         if (!addr_array[i]) {
  92                                 dev_err(&dev->dev, "failed to map Bar[%d]\n",
  93                                                                         i);
  94                                 mvumi_unmap_pci_addr(dev, addr_array);
  95                                 return -ENOMEM;
  96                         }
  97                 } else
  98                         addr_array[i] = NULL;
  99 
 100                 dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
 101         }
 102 
 103         return 0;
 104 }
 105 
 106 static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
 107                                 enum resource_type type, unsigned int size)
 108 {
 109         struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
 110 
 111         if (!res) {
 112                 dev_err(&mhba->pdev->dev,
 113                         "Failed to allocate memory for resource manager.\n");
 114                 return NULL;
 115         }
 116 
 117         switch (type) {
 118         case RESOURCE_CACHED_MEMORY:
 119                 res->virt_addr = kzalloc(size, GFP_ATOMIC);
 120                 if (!res->virt_addr) {
 121                         dev_err(&mhba->pdev->dev,
 122                                 "unable to allocate memory,size = %d.\n", size);
 123                         kfree(res);
 124                         return NULL;
 125                 }
 126                 break;
 127 
 128         case RESOURCE_UNCACHED_MEMORY:
 129                 size = round_up(size, 8);
 130                 res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
 131                                                     &res->bus_addr,
 132                                                     GFP_KERNEL);
 133                 if (!res->virt_addr) {
 134                         dev_err(&mhba->pdev->dev,
 135                                         "unable to allocate consistent mem,"
 136                                                         "size = %d.\n", size);
 137                         kfree(res);
 138                         return NULL;
 139                 }
 140                 break;
 141 
 142         default:
 143                 dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
 144                 kfree(res);
 145                 return NULL;
 146         }
 147 
 148         res->type = type;
 149         res->size = size;
 150         INIT_LIST_HEAD(&res->entry);
 151         list_add_tail(&res->entry, &mhba->res_list);
 152 
 153         return res;
 154 }
 155 
 156 static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
 157 {
 158         struct mvumi_res *res, *tmp;
 159 
 160         list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
 161                 switch (res->type) {
 162                 case RESOURCE_UNCACHED_MEMORY:
 163                         dma_free_coherent(&mhba->pdev->dev, res->size,
 164                                                 res->virt_addr, res->bus_addr);
 165                         break;
 166                 case RESOURCE_CACHED_MEMORY:
 167                         kfree(res->virt_addr);
 168                         break;
 169                 default:
 170                         dev_err(&mhba->pdev->dev,
 171                                 "unknown resource type %d\n", res->type);
 172                         break;
 173                 }
 174                 list_del(&res->entry);
 175                 kfree(res);
 176         }
 177         mhba->fw_flag &= ~MVUMI_FW_ALLOC;
 178 }
 179 
 180 /**
 181  * mvumi_make_sgl -     Prepares  SGL
 182  * @mhba:               Adapter soft state
 183  * @scmd:               SCSI command from the mid-layer
 184  * @sgl_p:              SGL to be filled in
 185  * @sg_count            return the number of SG elements
 186  *
 187  * If successful, this function returns 0. otherwise, it returns -1.
 188  */
 189 static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
 190                                         void *sgl_p, unsigned char *sg_count)
 191 {
 192         struct scatterlist *sg;
 193         struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
 194         unsigned int i;
 195         unsigned int sgnum = scsi_sg_count(scmd);
 196         dma_addr_t busaddr;
 197 
 198         *sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
 199                                scmd->sc_data_direction);
 200         if (*sg_count > mhba->max_sge) {
 201                 dev_err(&mhba->pdev->dev,
 202                         "sg count[0x%x] is bigger than max sg[0x%x].\n",
 203                         *sg_count, mhba->max_sge);
 204                 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
 205                              scmd->sc_data_direction);
 206                 return -1;
 207         }
 208         scsi_for_each_sg(scmd, sg, *sg_count, i) {
 209                 busaddr = sg_dma_address(sg);
 210                 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
 211                 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
 212                 m_sg->flags = 0;
 213                 sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg)));
 214                 if ((i + 1) == *sg_count)
 215                         m_sg->flags |= 1U << mhba->eot_flag;
 216 
 217                 sgd_inc(mhba, m_sg);
 218         }
 219 
 220         return 0;
 221 }
 222 
 223 static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
 224                                                         unsigned int size)
 225 {
 226         struct mvumi_sgl *m_sg;
 227         void *virt_addr;
 228         dma_addr_t phy_addr;
 229 
 230         if (size == 0)
 231                 return 0;
 232 
 233         virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
 234                                        GFP_KERNEL);
 235         if (!virt_addr)
 236                 return -1;
 237 
 238         m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
 239         cmd->frame->sg_counts = 1;
 240         cmd->data_buf = virt_addr;
 241 
 242         m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
 243         m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
 244         m_sg->flags = 1U << mhba->eot_flag;
 245         sgd_setsz(mhba, m_sg, cpu_to_le32(size));
 246 
 247         return 0;
 248 }
 249 
 250 static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
 251                                 unsigned int buf_size)
 252 {
 253         struct mvumi_cmd *cmd;
 254 
 255         cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 256         if (!cmd) {
 257                 dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
 258                 return NULL;
 259         }
 260         INIT_LIST_HEAD(&cmd->queue_pointer);
 261 
 262         cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
 263                         &cmd->frame_phys, GFP_KERNEL);
 264         if (!cmd->frame) {
 265                 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
 266                         " frame,size = %d.\n", mhba->ib_max_size);
 267                 kfree(cmd);
 268                 return NULL;
 269         }
 270 
 271         if (buf_size) {
 272                 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
 273                         dev_err(&mhba->pdev->dev, "failed to allocate memory"
 274                                                 " for internal frame\n");
 275                         dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
 276                                         cmd->frame, cmd->frame_phys);
 277                         kfree(cmd);
 278                         return NULL;
 279                 }
 280         } else
 281                 cmd->frame->sg_counts = 0;
 282 
 283         return cmd;
 284 }
 285 
 286 static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
 287                                                 struct mvumi_cmd *cmd)
 288 {
 289         struct mvumi_sgl *m_sg;
 290         unsigned int size;
 291         dma_addr_t phy_addr;
 292 
 293         if (cmd && cmd->frame) {
 294                 if (cmd->frame->sg_counts) {
 295                         m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
 296                         sgd_getsz(mhba, m_sg, size);
 297 
 298                         phy_addr = (dma_addr_t) m_sg->baseaddr_l |
 299                                 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
 300 
 301                         dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
 302                                                                 phy_addr);
 303                 }
 304                 dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
 305                                 cmd->frame, cmd->frame_phys);
 306                 kfree(cmd);
 307         }
 308 }
 309 
 310 /**
 311  * mvumi_get_cmd -      Get a command from the free pool
 312  * @mhba:               Adapter soft state
 313  *
 314  * Returns a free command from the pool
 315  */
 316 static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
 317 {
 318         struct mvumi_cmd *cmd = NULL;
 319 
 320         if (likely(!list_empty(&mhba->cmd_pool))) {
 321                 cmd = list_entry((&mhba->cmd_pool)->next,
 322                                 struct mvumi_cmd, queue_pointer);
 323                 list_del_init(&cmd->queue_pointer);
 324         } else
 325                 dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
 326 
 327         return cmd;
 328 }
 329 
 330 /**
 331  * mvumi_return_cmd -   Return a cmd to free command pool
 332  * @mhba:               Adapter soft state
 333  * @cmd:                Command packet to be returned to free command pool
 334  */
 335 static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
 336                                                 struct mvumi_cmd *cmd)
 337 {
 338         cmd->scmd = NULL;
 339         list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
 340 }
 341 
 342 /**
 343  * mvumi_free_cmds -    Free all the cmds in the free cmd pool
 344  * @mhba:               Adapter soft state
 345  */
 346 static void mvumi_free_cmds(struct mvumi_hba *mhba)
 347 {
 348         struct mvumi_cmd *cmd;
 349 
 350         while (!list_empty(&mhba->cmd_pool)) {
 351                 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 352                                                         queue_pointer);
 353                 list_del(&cmd->queue_pointer);
 354                 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
 355                         kfree(cmd->frame);
 356                 kfree(cmd);
 357         }
 358 }
 359 
 360 /**
 361  * mvumi_alloc_cmds -   Allocates the command packets
 362  * @mhba:               Adapter soft state
 363  *
 364  */
 365 static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
 366 {
 367         int i;
 368         struct mvumi_cmd *cmd;
 369 
 370         for (i = 0; i < mhba->max_io; i++) {
 371                 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 372                 if (!cmd)
 373                         goto err_exit;
 374 
 375                 INIT_LIST_HEAD(&cmd->queue_pointer);
 376                 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
 377                 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 378                         cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
 379                         cmd->frame_phys = mhba->ib_frame_phys
 380                                                 + i * mhba->ib_max_size;
 381                 } else
 382                         cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
 383                 if (!cmd->frame)
 384                         goto err_exit;
 385         }
 386         return 0;
 387 
 388 err_exit:
 389         dev_err(&mhba->pdev->dev,
 390                         "failed to allocate memory for cmd[0x%x].\n", i);
 391         while (!list_empty(&mhba->cmd_pool)) {
 392                 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 393                                                 queue_pointer);
 394                 list_del(&cmd->queue_pointer);
 395                 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
 396                         kfree(cmd->frame);
 397                 kfree(cmd);
 398         }
 399         return -ENOMEM;
 400 }
 401 
 402 static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
 403 {
 404         unsigned int ib_rp_reg;
 405         struct mvumi_hw_regs *regs = mhba->regs;
 406 
 407         ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
 408 
 409         if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
 410                         (mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
 411                         ((ib_rp_reg & regs->cl_pointer_toggle)
 412                          != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
 413                 dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
 414                 return 0;
 415         }
 416         if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
 417                 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
 418                 return 0;
 419         } else {
 420                 return mhba->max_io - atomic_read(&mhba->fw_outstanding);
 421         }
 422 }
 423 
 424 static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
 425 {
 426         unsigned int count;
 427         if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
 428                 return 0;
 429         count = ioread32(mhba->ib_shadow);
 430         if (count == 0xffff)
 431                 return 0;
 432         return count;
 433 }
 434 
 435 static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
 436 {
 437         unsigned int cur_ib_entry;
 438 
 439         cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
 440         cur_ib_entry++;
 441         if (cur_ib_entry >= mhba->list_num_io) {
 442                 cur_ib_entry -= mhba->list_num_io;
 443                 mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
 444         }
 445         mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
 446         mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
 447         if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 448                 *ib_entry = mhba->ib_list + cur_ib_entry *
 449                                 sizeof(struct mvumi_dyn_list_entry);
 450         } else {
 451                 *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
 452         }
 453         atomic_inc(&mhba->fw_outstanding);
 454 }
 455 
 456 static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
 457 {
 458         iowrite32(0xffff, mhba->ib_shadow);
 459         iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
 460 }
 461 
 462 static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
 463                 unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
 464 {
 465         unsigned short tag, request_id;
 466 
 467         udelay(1);
 468         p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
 469         request_id = p_outb_frame->request_id;
 470         tag = p_outb_frame->tag;
 471         if (tag > mhba->tag_pool.size) {
 472                 dev_err(&mhba->pdev->dev, "ob frame data error\n");
 473                 return -1;
 474         }
 475         if (mhba->tag_cmd[tag] == NULL) {
 476                 dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
 477                 return -1;
 478         } else if (mhba->tag_cmd[tag]->request_id != request_id &&
 479                                                 mhba->request_id_enabled) {
 480                         dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
 481                                         "cmd request ID:0x%x\n", request_id,
 482                                         mhba->tag_cmd[tag]->request_id);
 483                         return -1;
 484         }
 485 
 486         return 0;
 487 }
 488 
 489 static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
 490                         unsigned int *cur_obf, unsigned int *assign_obf_end)
 491 {
 492         unsigned int ob_write, ob_write_shadow;
 493         struct mvumi_hw_regs *regs = mhba->regs;
 494 
 495         do {
 496                 ob_write = ioread32(regs->outb_copy_pointer);
 497                 ob_write_shadow = ioread32(mhba->ob_shadow);
 498         } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
 499 
 500         *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
 501         *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
 502 
 503         if ((ob_write & regs->cl_pointer_toggle) !=
 504                         (mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
 505                 *assign_obf_end += mhba->list_num_io;
 506         }
 507         return 0;
 508 }
 509 
 510 static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
 511                         unsigned int *cur_obf, unsigned int *assign_obf_end)
 512 {
 513         unsigned int ob_write;
 514         struct mvumi_hw_regs *regs = mhba->regs;
 515 
 516         ob_write = ioread32(regs->outb_read_pointer);
 517         ob_write = ioread32(regs->outb_copy_pointer);
 518         *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
 519         *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
 520         if (*assign_obf_end < *cur_obf)
 521                 *assign_obf_end += mhba->list_num_io;
 522         else if (*assign_obf_end == *cur_obf)
 523                 return -1;
 524         return 0;
 525 }
 526 
 527 static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
 528 {
 529         unsigned int cur_obf, assign_obf_end, i;
 530         struct mvumi_ob_data *ob_data;
 531         struct mvumi_rsp_frame *p_outb_frame;
 532         struct mvumi_hw_regs *regs = mhba->regs;
 533 
 534         if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
 535                 return;
 536 
 537         for (i = (assign_obf_end - cur_obf); i != 0; i--) {
 538                 cur_obf++;
 539                 if (cur_obf >= mhba->list_num_io) {
 540                         cur_obf -= mhba->list_num_io;
 541                         mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
 542                 }
 543 
 544                 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
 545 
 546                 /* Copy pointer may point to entry in outbound list
 547                 *  before entry has valid data
 548                 */
 549                 if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
 550                         mhba->tag_cmd[p_outb_frame->tag] == NULL ||
 551                         p_outb_frame->request_id !=
 552                                 mhba->tag_cmd[p_outb_frame->tag]->request_id))
 553                         if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
 554                                 continue;
 555 
 556                 if (!list_empty(&mhba->ob_data_list)) {
 557                         ob_data = (struct mvumi_ob_data *)
 558                                 list_first_entry(&mhba->ob_data_list,
 559                                         struct mvumi_ob_data, list);
 560                         list_del_init(&ob_data->list);
 561                 } else {
 562                         ob_data = NULL;
 563                         if (cur_obf == 0) {
 564                                 cur_obf = mhba->list_num_io - 1;
 565                                 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
 566                         } else
 567                                 cur_obf -= 1;
 568                         break;
 569                 }
 570 
 571                 memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
 572                 p_outb_frame->tag = 0xff;
 573 
 574                 list_add_tail(&ob_data->list, &mhba->free_ob_list);
 575         }
 576         mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
 577         mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
 578         iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
 579 }
 580 
 581 static void mvumi_reset(struct mvumi_hba *mhba)
 582 {
 583         struct mvumi_hw_regs *regs = mhba->regs;
 584 
 585         iowrite32(0, regs->enpointa_mask_reg);
 586         if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
 587                 return;
 588 
 589         iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
 590 }
 591 
 592 static unsigned char mvumi_start(struct mvumi_hba *mhba);
 593 
 594 static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
 595 {
 596         mhba->fw_state = FW_STATE_ABORT;
 597         mvumi_reset(mhba);
 598 
 599         if (mvumi_start(mhba))
 600                 return FAILED;
 601         else
 602                 return SUCCESS;
 603 }
 604 
 605 static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
 606 {
 607         struct mvumi_hw_regs *regs = mhba->regs;
 608         u32 tmp;
 609         unsigned long before;
 610         before = jiffies;
 611 
 612         iowrite32(0, regs->enpointa_mask_reg);
 613         tmp = ioread32(regs->arm_to_pciea_msg1);
 614         while (tmp != HANDSHAKE_READYSTATE) {
 615                 iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
 616                 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
 617                         dev_err(&mhba->pdev->dev,
 618                                 "FW reset failed [0x%x].\n", tmp);
 619                         return FAILED;
 620                 }
 621 
 622                 msleep(500);
 623                 rmb();
 624                 tmp = ioread32(regs->arm_to_pciea_msg1);
 625         }
 626 
 627         return SUCCESS;
 628 }
 629 
 630 static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
 631 {
 632         unsigned char i;
 633 
 634         for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 635                 pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
 636                                                 &mhba->pci_base[i]);
 637         }
 638 }
 639 
 640 static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
 641 {
 642         unsigned char i;
 643 
 644         for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 645                 if (mhba->pci_base[i])
 646                         pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
 647                                                 mhba->pci_base[i]);
 648         }
 649 }
 650 
 651 static int mvumi_pci_set_master(struct pci_dev *pdev)
 652 {
 653         int ret = 0;
 654 
 655         pci_set_master(pdev);
 656 
 657         if (IS_DMA64) {
 658                 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
 659                         ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 660         } else
 661                 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 662 
 663         return ret;
 664 }
 665 
 666 static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
 667 {
 668         mhba->fw_state = FW_STATE_ABORT;
 669 
 670         iowrite32(0, mhba->regs->reset_enable);
 671         iowrite32(0xf, mhba->regs->reset_request);
 672 
 673         iowrite32(0x10, mhba->regs->reset_enable);
 674         iowrite32(0x10, mhba->regs->reset_request);
 675         msleep(100);
 676         pci_disable_device(mhba->pdev);
 677 
 678         if (pci_enable_device(mhba->pdev)) {
 679                 dev_err(&mhba->pdev->dev, "enable device failed\n");
 680                 return FAILED;
 681         }
 682         if (mvumi_pci_set_master(mhba->pdev)) {
 683                 dev_err(&mhba->pdev->dev, "set master failed\n");
 684                 return FAILED;
 685         }
 686         mvumi_restore_bar_addr(mhba);
 687         if (mvumi_wait_for_fw(mhba) == FAILED)
 688                 return FAILED;
 689 
 690         return mvumi_wait_for_outstanding(mhba);
 691 }
 692 
 693 static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
 694 {
 695         return mvumi_wait_for_outstanding(mhba);
 696 }
 697 
 698 static int mvumi_host_reset(struct scsi_cmnd *scmd)
 699 {
 700         struct mvumi_hba *mhba;
 701 
 702         mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
 703 
 704         scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n",
 705                         scmd->request->tag, scmd->cmnd[0], scmd->retries);
 706 
 707         return mhba->instancet->reset_host(mhba);
 708 }
 709 
 710 static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
 711                                                 struct mvumi_cmd *cmd)
 712 {
 713         unsigned long flags;
 714 
 715         cmd->cmd_status = REQ_STATUS_PENDING;
 716 
 717         if (atomic_read(&cmd->sync_cmd)) {
 718                 dev_err(&mhba->pdev->dev,
 719                         "last blocked cmd not finished, sync_cmd = %d\n",
 720                                                 atomic_read(&cmd->sync_cmd));
 721                 BUG_ON(1);
 722                 return -1;
 723         }
 724         atomic_inc(&cmd->sync_cmd);
 725         spin_lock_irqsave(mhba->shost->host_lock, flags);
 726         mhba->instancet->fire_cmd(mhba, cmd);
 727         spin_unlock_irqrestore(mhba->shost->host_lock, flags);
 728 
 729         wait_event_timeout(mhba->int_cmd_wait_q,
 730                 (cmd->cmd_status != REQ_STATUS_PENDING),
 731                 MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
 732 
 733         /* command timeout */
 734         if (atomic_read(&cmd->sync_cmd)) {
 735                 spin_lock_irqsave(mhba->shost->host_lock, flags);
 736                 atomic_dec(&cmd->sync_cmd);
 737                 if (mhba->tag_cmd[cmd->frame->tag]) {
 738                         mhba->tag_cmd[cmd->frame->tag] = NULL;
 739                         dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
 740                                                         cmd->frame->tag);
 741                         tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
 742                 }
 743                 if (!list_empty(&cmd->queue_pointer)) {
 744                         dev_warn(&mhba->pdev->dev,
 745                                 "TIMEOUT:A internal command doesn't send!\n");
 746                         list_del_init(&cmd->queue_pointer);
 747                 } else
 748                         atomic_dec(&mhba->fw_outstanding);
 749 
 750                 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
 751         }
 752         return 0;
 753 }
 754 
 755 static void mvumi_release_fw(struct mvumi_hba *mhba)
 756 {
 757         mvumi_free_cmds(mhba);
 758         mvumi_release_mem_resource(mhba);
 759         mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
 760         dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
 761                 mhba->handshake_page, mhba->handshake_page_phys);
 762         kfree(mhba->regs);
 763         pci_release_regions(mhba->pdev);
 764 }
 765 
 766 static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
 767 {
 768         struct mvumi_cmd *cmd;
 769         struct mvumi_msg_frame *frame;
 770         unsigned char device_id, retry = 0;
 771         unsigned char bitcount = sizeof(unsigned char) * 8;
 772 
 773         for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
 774                 if (!(mhba->target_map[device_id / bitcount] &
 775                                 (1 << (device_id % bitcount))))
 776                         continue;
 777 get_cmd:        cmd = mvumi_create_internal_cmd(mhba, 0);
 778                 if (!cmd) {
 779                         if (retry++ >= 5) {
 780                                 dev_err(&mhba->pdev->dev, "failed to get memory"
 781                                         " for internal flush cache cmd for "
 782                                         "device %d", device_id);
 783                                 retry = 0;
 784                                 continue;
 785                         } else
 786                                 goto get_cmd;
 787                 }
 788                 cmd->scmd = NULL;
 789                 cmd->cmd_status = REQ_STATUS_PENDING;
 790                 atomic_set(&cmd->sync_cmd, 0);
 791                 frame = cmd->frame;
 792                 frame->req_function = CL_FUN_SCSI_CMD;
 793                 frame->device_id = device_id;
 794                 frame->cmd_flag = CMD_FLAG_NON_DATA;
 795                 frame->data_transfer_length = 0;
 796                 frame->cdb_length = MAX_COMMAND_SIZE;
 797                 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
 798                 frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
 799                 frame->cdb[1] = CDB_CORE_MODULE;
 800                 frame->cdb[2] = CDB_CORE_SHUTDOWN;
 801 
 802                 mvumi_issue_blocked_cmd(mhba, cmd);
 803                 if (cmd->cmd_status != SAM_STAT_GOOD) {
 804                         dev_err(&mhba->pdev->dev,
 805                                 "device %d flush cache failed, status=0x%x.\n",
 806                                 device_id, cmd->cmd_status);
 807                 }
 808 
 809                 mvumi_delete_internal_cmd(mhba, cmd);
 810         }
 811         return 0;
 812 }
 813 
 814 static unsigned char
 815 mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
 816                                                         unsigned short len)
 817 {
 818         unsigned char *ptr;
 819         unsigned char ret = 0, i;
 820 
 821         ptr = (unsigned char *) p_header->frame_content;
 822         for (i = 0; i < len; i++) {
 823                 ret ^= *ptr;
 824                 ptr++;
 825         }
 826 
 827         return ret;
 828 }
 829 
 830 static void mvumi_hs_build_page(struct mvumi_hba *mhba,
 831                                 struct mvumi_hs_header *hs_header)
 832 {
 833         struct mvumi_hs_page2 *hs_page2;
 834         struct mvumi_hs_page4 *hs_page4;
 835         struct mvumi_hs_page3 *hs_page3;
 836         u64 time;
 837         u64 local_time;
 838 
 839         switch (hs_header->page_code) {
 840         case HS_PAGE_HOST_INFO:
 841                 hs_page2 = (struct mvumi_hs_page2 *) hs_header;
 842                 hs_header->frame_length = sizeof(*hs_page2) - 4;
 843                 memset(hs_header->frame_content, 0, hs_header->frame_length);
 844                 hs_page2->host_type = 3; /* 3 mean linux*/
 845                 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
 846                         hs_page2->host_cap = 0x08;/* host dynamic source mode */
 847                 hs_page2->host_ver.ver_major = VER_MAJOR;
 848                 hs_page2->host_ver.ver_minor = VER_MINOR;
 849                 hs_page2->host_ver.ver_oem = VER_OEM;
 850                 hs_page2->host_ver.ver_build = VER_BUILD;
 851                 hs_page2->system_io_bus = 0;
 852                 hs_page2->slot_number = 0;
 853                 hs_page2->intr_level = 0;
 854                 hs_page2->intr_vector = 0;
 855                 time = ktime_get_real_seconds();
 856                 local_time = (time - (sys_tz.tz_minuteswest * 60));
 857                 hs_page2->seconds_since1970 = local_time;
 858                 hs_header->checksum = mvumi_calculate_checksum(hs_header,
 859                                                 hs_header->frame_length);
 860                 break;
 861 
 862         case HS_PAGE_FIRM_CTL:
 863                 hs_page3 = (struct mvumi_hs_page3 *) hs_header;
 864                 hs_header->frame_length = sizeof(*hs_page3) - 4;
 865                 memset(hs_header->frame_content, 0, hs_header->frame_length);
 866                 hs_header->checksum = mvumi_calculate_checksum(hs_header,
 867                                                 hs_header->frame_length);
 868                 break;
 869 
 870         case HS_PAGE_CL_INFO:
 871                 hs_page4 = (struct mvumi_hs_page4 *) hs_header;
 872                 hs_header->frame_length = sizeof(*hs_page4) - 4;
 873                 memset(hs_header->frame_content, 0, hs_header->frame_length);
 874                 hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
 875                 hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
 876 
 877                 hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
 878                 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
 879                 hs_page4->ib_entry_size = mhba->ib_max_size_setting;
 880                 hs_page4->ob_entry_size = mhba->ob_max_size_setting;
 881                 if (mhba->hba_capability
 882                         & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
 883                         hs_page4->ob_depth = find_first_bit((unsigned long *)
 884                                                             &mhba->list_num_io,
 885                                                             BITS_PER_LONG);
 886                         hs_page4->ib_depth = find_first_bit((unsigned long *)
 887                                                             &mhba->list_num_io,
 888                                                             BITS_PER_LONG);
 889                 } else {
 890                         hs_page4->ob_depth = (u8) mhba->list_num_io;
 891                         hs_page4->ib_depth = (u8) mhba->list_num_io;
 892                 }
 893                 hs_header->checksum = mvumi_calculate_checksum(hs_header,
 894                                                 hs_header->frame_length);
 895                 break;
 896 
 897         default:
 898                 dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
 899                         hs_header->page_code);
 900                 break;
 901         }
 902 }
 903 
 904 /**
 905  * mvumi_init_data -    Initialize requested date for FW
 906  * @mhba:                       Adapter soft state
 907  */
 908 static int mvumi_init_data(struct mvumi_hba *mhba)
 909 {
 910         struct mvumi_ob_data *ob_pool;
 911         struct mvumi_res *res_mgnt;
 912         unsigned int tmp_size, offset, i;
 913         void *virmem, *v;
 914         dma_addr_t p;
 915 
 916         if (mhba->fw_flag & MVUMI_FW_ALLOC)
 917                 return 0;
 918 
 919         tmp_size = mhba->ib_max_size * mhba->max_io;
 920         if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
 921                 tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 922 
 923         tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
 924         tmp_size += 8 + sizeof(u32)*2 + 16;
 925 
 926         res_mgnt = mvumi_alloc_mem_resource(mhba,
 927                                         RESOURCE_UNCACHED_MEMORY, tmp_size);
 928         if (!res_mgnt) {
 929                 dev_err(&mhba->pdev->dev,
 930                         "failed to allocate memory for inbound list\n");
 931                 goto fail_alloc_dma_buf;
 932         }
 933 
 934         p = res_mgnt->bus_addr;
 935         v = res_mgnt->virt_addr;
 936         /* ib_list */
 937         offset = round_up(p, 128) - p;
 938         p += offset;
 939         v += offset;
 940         mhba->ib_list = v;
 941         mhba->ib_list_phys = p;
 942         if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 943                 v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 944                 p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 945                 mhba->ib_frame = v;
 946                 mhba->ib_frame_phys = p;
 947         }
 948         v += mhba->ib_max_size * mhba->max_io;
 949         p += mhba->ib_max_size * mhba->max_io;
 950 
 951         /* ib shadow */
 952         offset = round_up(p, 8) - p;
 953         p += offset;
 954         v += offset;
 955         mhba->ib_shadow = v;
 956         mhba->ib_shadow_phys = p;
 957         p += sizeof(u32)*2;
 958         v += sizeof(u32)*2;
 959         /* ob shadow */
 960         if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
 961                 offset = round_up(p, 8) - p;
 962                 p += offset;
 963                 v += offset;
 964                 mhba->ob_shadow = v;
 965                 mhba->ob_shadow_phys = p;
 966                 p += 8;
 967                 v += 8;
 968         } else {
 969                 offset = round_up(p, 4) - p;
 970                 p += offset;
 971                 v += offset;
 972                 mhba->ob_shadow = v;
 973                 mhba->ob_shadow_phys = p;
 974                 p += 4;
 975                 v += 4;
 976         }
 977 
 978         /* ob list */
 979         offset = round_up(p, 128) - p;
 980         p += offset;
 981         v += offset;
 982 
 983         mhba->ob_list = v;
 984         mhba->ob_list_phys = p;
 985 
 986         /* ob data pool */
 987         tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
 988         tmp_size = round_up(tmp_size, 8);
 989 
 990         res_mgnt = mvumi_alloc_mem_resource(mhba,
 991                                 RESOURCE_CACHED_MEMORY, tmp_size);
 992         if (!res_mgnt) {
 993                 dev_err(&mhba->pdev->dev,
 994                         "failed to allocate memory for outbound data buffer\n");
 995                 goto fail_alloc_dma_buf;
 996         }
 997         virmem = res_mgnt->virt_addr;
 998 
 999         for (i = mhba->max_io; i != 0; i--) {
1000                 ob_pool = (struct mvumi_ob_data *) virmem;
1001                 list_add_tail(&ob_pool->list, &mhba->ob_data_list);
1002                 virmem += mhba->ob_max_size + sizeof(*ob_pool);
1003         }
1004 
1005         tmp_size = sizeof(unsigned short) * mhba->max_io +
1006                                 sizeof(struct mvumi_cmd *) * mhba->max_io;
1007         tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
1008                                                 (sizeof(unsigned char) * 8);
1009 
1010         res_mgnt = mvumi_alloc_mem_resource(mhba,
1011                                 RESOURCE_CACHED_MEMORY, tmp_size);
1012         if (!res_mgnt) {
1013                 dev_err(&mhba->pdev->dev,
1014                         "failed to allocate memory for tag and target map\n");
1015                 goto fail_alloc_dma_buf;
1016         }
1017 
1018         virmem = res_mgnt->virt_addr;
1019         mhba->tag_pool.stack = virmem;
1020         mhba->tag_pool.size = mhba->max_io;
1021         tag_init(&mhba->tag_pool, mhba->max_io);
1022         virmem += sizeof(unsigned short) * mhba->max_io;
1023 
1024         mhba->tag_cmd = virmem;
1025         virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
1026 
1027         mhba->target_map = virmem;
1028 
1029         mhba->fw_flag |= MVUMI_FW_ALLOC;
1030         return 0;
1031 
1032 fail_alloc_dma_buf:
1033         mvumi_release_mem_resource(mhba);
1034         return -1;
1035 }
1036 
1037 static int mvumi_hs_process_page(struct mvumi_hba *mhba,
1038                                 struct mvumi_hs_header *hs_header)
1039 {
1040         struct mvumi_hs_page1 *hs_page1;
1041         unsigned char page_checksum;
1042 
1043         page_checksum = mvumi_calculate_checksum(hs_header,
1044                                                 hs_header->frame_length);
1045         if (page_checksum != hs_header->checksum) {
1046                 dev_err(&mhba->pdev->dev, "checksum error\n");
1047                 return -1;
1048         }
1049 
1050         switch (hs_header->page_code) {
1051         case HS_PAGE_FIRM_CAP:
1052                 hs_page1 = (struct mvumi_hs_page1 *) hs_header;
1053 
1054                 mhba->max_io = hs_page1->max_io_support;
1055                 mhba->list_num_io = hs_page1->cl_inout_list_depth;
1056                 mhba->max_transfer_size = hs_page1->max_transfer_size;
1057                 mhba->max_target_id = hs_page1->max_devices_support;
1058                 mhba->hba_capability = hs_page1->capability;
1059                 mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
1060                 mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
1061 
1062                 mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
1063                 mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
1064 
1065                 dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
1066                                                 hs_page1->fw_ver.ver_build);
1067 
1068                 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
1069                         mhba->eot_flag = 22;
1070                 else
1071                         mhba->eot_flag = 27;
1072                 if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
1073                         mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
1074                 break;
1075         default:
1076                 dev_err(&mhba->pdev->dev, "handshake: page code error\n");
1077                 return -1;
1078         }
1079         return 0;
1080 }
1081 
1082 /**
1083  * mvumi_handshake -    Move the FW to READY state
1084  * @mhba:                               Adapter soft state
1085  *
1086  * During the initialization, FW passes can potentially be in any one of
1087  * several possible states. If the FW in operational, waiting-for-handshake
1088  * states, driver must take steps to bring it to ready state. Otherwise, it
1089  * has to wait for the ready state.
1090  */
1091 static int mvumi_handshake(struct mvumi_hba *mhba)
1092 {
1093         unsigned int hs_state, tmp, hs_fun;
1094         struct mvumi_hs_header *hs_header;
1095         struct mvumi_hw_regs *regs = mhba->regs;
1096 
1097         if (mhba->fw_state == FW_STATE_STARTING)
1098                 hs_state = HS_S_START;
1099         else {
1100                 tmp = ioread32(regs->arm_to_pciea_msg0);
1101                 hs_state = HS_GET_STATE(tmp);
1102                 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
1103                 if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
1104                         mhba->fw_state = FW_STATE_STARTING;
1105                         return -1;
1106                 }
1107         }
1108 
1109         hs_fun = 0;
1110         switch (hs_state) {
1111         case HS_S_START:
1112                 mhba->fw_state = FW_STATE_HANDSHAKING;
1113                 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1114                 HS_SET_STATE(hs_fun, HS_S_RESET);
1115                 iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
1116                 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1117                 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1118                 break;
1119 
1120         case HS_S_RESET:
1121                 iowrite32(lower_32_bits(mhba->handshake_page_phys),
1122                                         regs->pciea_to_arm_msg1);
1123                 iowrite32(upper_32_bits(mhba->handshake_page_phys),
1124                                         regs->arm_to_pciea_msg1);
1125                 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1126                 HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
1127                 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1128                 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1129                 break;
1130 
1131         case HS_S_PAGE_ADDR:
1132         case HS_S_QUERY_PAGE:
1133         case HS_S_SEND_PAGE:
1134                 hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
1135                 if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
1136                         mhba->hba_total_pages =
1137                         ((struct mvumi_hs_page1 *) hs_header)->total_pages;
1138 
1139                         if (mhba->hba_total_pages == 0)
1140                                 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1141                 }
1142 
1143                 if (hs_state == HS_S_QUERY_PAGE) {
1144                         if (mvumi_hs_process_page(mhba, hs_header)) {
1145                                 HS_SET_STATE(hs_fun, HS_S_ABORT);
1146                                 return -1;
1147                         }
1148                         if (mvumi_init_data(mhba)) {
1149                                 HS_SET_STATE(hs_fun, HS_S_ABORT);
1150                                 return -1;
1151                         }
1152                 } else if (hs_state == HS_S_PAGE_ADDR) {
1153                         hs_header->page_code = 0;
1154                         mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1155                 }
1156 
1157                 if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
1158                         hs_header->page_code++;
1159                         if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
1160                                 mvumi_hs_build_page(mhba, hs_header);
1161                                 HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
1162                         } else
1163                                 HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
1164                 } else
1165                         HS_SET_STATE(hs_fun, HS_S_END);
1166 
1167                 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1168                 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1169                 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1170                 break;
1171 
1172         case HS_S_END:
1173                 /* Set communication list ISR */
1174                 tmp = ioread32(regs->enpointa_mask_reg);
1175                 tmp |= regs->int_comaout | regs->int_comaerr;
1176                 iowrite32(tmp, regs->enpointa_mask_reg);
1177                 iowrite32(mhba->list_num_io, mhba->ib_shadow);
1178                 /* Set InBound List Available count shadow */
1179                 iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1180                                         regs->inb_aval_count_basel);
1181                 iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1182                                         regs->inb_aval_count_baseh);
1183 
1184                 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
1185                         /* Set OutBound List Available count shadow */
1186                         iowrite32((mhba->list_num_io-1) |
1187                                                         regs->cl_pointer_toggle,
1188                                                         mhba->ob_shadow);
1189                         iowrite32(lower_32_bits(mhba->ob_shadow_phys),
1190                                                         regs->outb_copy_basel);
1191                         iowrite32(upper_32_bits(mhba->ob_shadow_phys),
1192                                                         regs->outb_copy_baseh);
1193                 }
1194 
1195                 mhba->ib_cur_slot = (mhba->list_num_io - 1) |
1196                                                         regs->cl_pointer_toggle;
1197                 mhba->ob_cur_slot = (mhba->list_num_io - 1) |
1198                                                         regs->cl_pointer_toggle;
1199                 mhba->fw_state = FW_STATE_STARTED;
1200 
1201                 break;
1202         default:
1203                 dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1204                                                                 hs_state);
1205                 return -1;
1206         }
1207         return 0;
1208 }
1209 
1210 static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1211 {
1212         unsigned int isr_status;
1213         unsigned long before;
1214 
1215         before = jiffies;
1216         mvumi_handshake(mhba);
1217         do {
1218                 isr_status = mhba->instancet->read_fw_status_reg(mhba);
1219 
1220                 if (mhba->fw_state == FW_STATE_STARTED)
1221                         return 0;
1222                 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1223                         dev_err(&mhba->pdev->dev,
1224                                 "no handshake response at state 0x%x.\n",
1225                                   mhba->fw_state);
1226                         dev_err(&mhba->pdev->dev,
1227                                 "isr : global=0x%x,status=0x%x.\n",
1228                                         mhba->global_isr, isr_status);
1229                         return -1;
1230                 }
1231                 rmb();
1232                 usleep_range(1000, 2000);
1233         } while (!(isr_status & DRBL_HANDSHAKE_ISR));
1234 
1235         return 0;
1236 }
1237 
1238 static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1239 {
1240         unsigned int tmp;
1241         unsigned long before;
1242 
1243         before = jiffies;
1244         tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1245         while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1246                 if (tmp != HANDSHAKE_READYSTATE)
1247                         iowrite32(DRBL_MU_RESET,
1248                                         mhba->regs->pciea_to_arm_drbl_reg);
1249                 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1250                         dev_err(&mhba->pdev->dev,
1251                                 "invalid signature [0x%x].\n", tmp);
1252                         return -1;
1253                 }
1254                 usleep_range(1000, 2000);
1255                 rmb();
1256                 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1257         }
1258 
1259         mhba->fw_state = FW_STATE_STARTING;
1260         dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1261         do {
1262                 if (mvumi_handshake_event(mhba)) {
1263                         dev_err(&mhba->pdev->dev,
1264                                         "handshake failed at state 0x%x.\n",
1265                                                 mhba->fw_state);
1266                         return -1;
1267                 }
1268         } while (mhba->fw_state != FW_STATE_STARTED);
1269 
1270         dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1271 
1272         return 0;
1273 }
1274 
1275 static unsigned char mvumi_start(struct mvumi_hba *mhba)
1276 {
1277         unsigned int tmp;
1278         struct mvumi_hw_regs *regs = mhba->regs;
1279 
1280         /* clear Door bell */
1281         tmp = ioread32(regs->arm_to_pciea_drbl_reg);
1282         iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
1283 
1284         iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1285         tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
1286         iowrite32(tmp, regs->enpointa_mask_reg);
1287         msleep(100);
1288         if (mvumi_check_handshake(mhba))
1289                 return -1;
1290 
1291         return 0;
1292 }
1293 
1294 /**
1295  * mvumi_complete_cmd - Completes a command
1296  * @mhba:                       Adapter soft state
1297  * @cmd:                        Command to be completed
1298  */
1299 static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1300                                         struct mvumi_rsp_frame *ob_frame)
1301 {
1302         struct scsi_cmnd *scmd = cmd->scmd;
1303 
1304         cmd->scmd->SCp.ptr = NULL;
1305         scmd->result = ob_frame->req_status;
1306 
1307         switch (ob_frame->req_status) {
1308         case SAM_STAT_GOOD:
1309                 scmd->result |= DID_OK << 16;
1310                 break;
1311         case SAM_STAT_BUSY:
1312                 scmd->result |= DID_BUS_BUSY << 16;
1313                 break;
1314         case SAM_STAT_CHECK_CONDITION:
1315                 scmd->result |= (DID_OK << 16);
1316                 if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
1317                         memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
1318                                 sizeof(struct mvumi_sense_data));
1319                         scmd->result |=  (DRIVER_SENSE << 24);
1320                 }
1321                 break;
1322         default:
1323                 scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1324                 break;
1325         }
1326 
1327         if (scsi_bufflen(scmd))
1328                 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
1329                              scsi_sg_count(scmd),
1330                              scmd->sc_data_direction);
1331         cmd->scmd->scsi_done(scmd);
1332         mvumi_return_cmd(mhba, cmd);
1333 }
1334 
1335 static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1336                                                 struct mvumi_cmd *cmd,
1337                                         struct mvumi_rsp_frame *ob_frame)
1338 {
1339         if (atomic_read(&cmd->sync_cmd)) {
1340                 cmd->cmd_status = ob_frame->req_status;
1341 
1342                 if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
1343                                 (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
1344                                 cmd->data_buf) {
1345                         memcpy(cmd->data_buf, ob_frame->payload,
1346                                         sizeof(struct mvumi_sense_data));
1347                 }
1348                 atomic_dec(&cmd->sync_cmd);
1349                 wake_up(&mhba->int_cmd_wait_q);
1350         }
1351 }
1352 
1353 static void mvumi_show_event(struct mvumi_hba *mhba,
1354                         struct mvumi_driver_event *ptr)
1355 {
1356         unsigned int i;
1357 
1358         dev_warn(&mhba->pdev->dev,
1359                 "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1360                 ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
1361         if (ptr->param_count) {
1362                 printk(KERN_WARNING "Event param(len 0x%x): ",
1363                                                 ptr->param_count);
1364                 for (i = 0; i < ptr->param_count; i++)
1365                         printk(KERN_WARNING "0x%x ", ptr->params[i]);
1366 
1367                 printk(KERN_WARNING "\n");
1368         }
1369 
1370         if (ptr->sense_data_length) {
1371                 printk(KERN_WARNING "Event sense data(len 0x%x): ",
1372                                                 ptr->sense_data_length);
1373                 for (i = 0; i < ptr->sense_data_length; i++)
1374                         printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
1375                 printk(KERN_WARNING "\n");
1376         }
1377 }
1378 
1379 static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
1380 {
1381         struct scsi_device *sdev;
1382         int ret = -1;
1383 
1384         if (status == DEVICE_OFFLINE) {
1385                 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1386                 if (sdev) {
1387                         dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
1388                                                                 sdev->id, 0);
1389                         scsi_remove_device(sdev);
1390                         scsi_device_put(sdev);
1391                         ret = 0;
1392                 } else
1393                         dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
1394                                                                         devid);
1395         } else if (status == DEVICE_ONLINE) {
1396                 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1397                 if (!sdev) {
1398                         scsi_add_device(mhba->shost, 0, devid, 0);
1399                         dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
1400                                                                 devid, 0);
1401                         ret = 0;
1402                 } else {
1403                         dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
1404                                                                 0, devid, 0);
1405                         scsi_device_put(sdev);
1406                 }
1407         }
1408         return ret;
1409 }
1410 
1411 static u64 mvumi_inquiry(struct mvumi_hba *mhba,
1412         unsigned int id, struct mvumi_cmd *cmd)
1413 {
1414         struct mvumi_msg_frame *frame;
1415         u64 wwid = 0;
1416         int cmd_alloc = 0;
1417         int data_buf_len = 64;
1418 
1419         if (!cmd) {
1420                 cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
1421                 if (cmd)
1422                         cmd_alloc = 1;
1423                 else
1424                         return 0;
1425         } else {
1426                 memset(cmd->data_buf, 0, data_buf_len);
1427         }
1428         cmd->scmd = NULL;
1429         cmd->cmd_status = REQ_STATUS_PENDING;
1430         atomic_set(&cmd->sync_cmd, 0);
1431         frame = cmd->frame;
1432         frame->device_id = (u16) id;
1433         frame->cmd_flag = CMD_FLAG_DATA_IN;
1434         frame->req_function = CL_FUN_SCSI_CMD;
1435         frame->cdb_length = 6;
1436         frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
1437         memset(frame->cdb, 0, frame->cdb_length);
1438         frame->cdb[0] = INQUIRY;
1439         frame->cdb[4] = frame->data_transfer_length;
1440 
1441         mvumi_issue_blocked_cmd(mhba, cmd);
1442 
1443         if (cmd->cmd_status == SAM_STAT_GOOD) {
1444                 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1445                         wwid = id + 1;
1446                 else
1447                         memcpy((void *)&wwid,
1448                                (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
1449                                MVUMI_INQUIRY_UUID_LEN);
1450                 dev_dbg(&mhba->pdev->dev,
1451                         "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
1452         } else {
1453                 wwid = 0;
1454         }
1455         if (cmd_alloc)
1456                 mvumi_delete_internal_cmd(mhba, cmd);
1457 
1458         return wwid;
1459 }
1460 
1461 static void mvumi_detach_devices(struct mvumi_hba *mhba)
1462 {
1463         struct mvumi_device *mv_dev = NULL , *dev_next;
1464         struct scsi_device *sdev = NULL;
1465 
1466         mutex_lock(&mhba->device_lock);
1467 
1468         /* detach Hard Disk */
1469         list_for_each_entry_safe(mv_dev, dev_next,
1470                 &mhba->shost_dev_list, list) {
1471                 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1472                 list_del_init(&mv_dev->list);
1473                 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1474                         mv_dev->id, mv_dev->wwid);
1475                 kfree(mv_dev);
1476         }
1477         list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
1478                 list_del_init(&mv_dev->list);
1479                 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1480                         mv_dev->id, mv_dev->wwid);
1481                 kfree(mv_dev);
1482         }
1483 
1484         /* detach virtual device */
1485         if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
1486                 sdev = scsi_device_lookup(mhba->shost, 0,
1487                                                 mhba->max_target_id - 1, 0);
1488 
1489         if (sdev) {
1490                 scsi_remove_device(sdev);
1491                 scsi_device_put(sdev);
1492         }
1493 
1494         mutex_unlock(&mhba->device_lock);
1495 }
1496 
1497 static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
1498 {
1499         struct scsi_device *sdev;
1500 
1501         sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
1502         if (sdev) {
1503                 scsi_rescan_device(&sdev->sdev_gendev);
1504                 scsi_device_put(sdev);
1505         }
1506 }
1507 
1508 static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
1509 {
1510         struct mvumi_device *mv_dev = NULL;
1511 
1512         list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
1513                 if (mv_dev->wwid == wwid) {
1514                         if (mv_dev->id != id) {
1515                                 dev_err(&mhba->pdev->dev,
1516                                         "%s has same wwid[%llx] ,"
1517                                         " but different id[%d %d]\n",
1518                                         __func__, mv_dev->wwid, mv_dev->id, id);
1519                                 return -1;
1520                         } else {
1521                                 if (mhba->pdev->device ==
1522                                                 PCI_DEVICE_ID_MARVELL_MV9143)
1523                                         mvumi_rescan_devices(mhba, id);
1524                                 return 1;
1525                         }
1526                 }
1527         }
1528         return 0;
1529 }
1530 
1531 static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
1532 {
1533         struct mvumi_device *mv_dev = NULL, *dev_next;
1534 
1535         list_for_each_entry_safe(mv_dev, dev_next,
1536                                 &mhba->shost_dev_list, list) {
1537                 if (mv_dev->id == id) {
1538                         dev_dbg(&mhba->pdev->dev,
1539                                 "detach device(0:%d:0) wwid(%llx) from HOST\n",
1540                                 mv_dev->id, mv_dev->wwid);
1541                         mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1542                         list_del_init(&mv_dev->list);
1543                         kfree(mv_dev);
1544                 }
1545         }
1546 }
1547 
1548 static int mvumi_probe_devices(struct mvumi_hba *mhba)
1549 {
1550         int id, maxid;
1551         u64 wwid = 0;
1552         struct mvumi_device *mv_dev = NULL;
1553         struct mvumi_cmd *cmd = NULL;
1554         int found = 0;
1555 
1556         cmd = mvumi_create_internal_cmd(mhba, 64);
1557         if (!cmd)
1558                 return -1;
1559 
1560         if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1561                 maxid = mhba->max_target_id;
1562         else
1563                 maxid = mhba->max_target_id - 1;
1564 
1565         for (id = 0; id < maxid; id++) {
1566                 wwid = mvumi_inquiry(mhba, id, cmd);
1567                 if (!wwid) {
1568                         /* device no response, remove it */
1569                         mvumi_remove_devices(mhba, id);
1570                 } else {
1571                         /* device response, add it */
1572                         found = mvumi_match_devices(mhba, id, wwid);
1573                         if (!found) {
1574                                 mvumi_remove_devices(mhba, id);
1575                                 mv_dev = kzalloc(sizeof(struct mvumi_device),
1576                                                                 GFP_KERNEL);
1577                                 if (!mv_dev) {
1578                                         dev_err(&mhba->pdev->dev,
1579                                                 "%s alloc mv_dev failed\n",
1580                                                 __func__);
1581                                         continue;
1582                                 }
1583                                 mv_dev->id = id;
1584                                 mv_dev->wwid = wwid;
1585                                 mv_dev->sdev = NULL;
1586                                 INIT_LIST_HEAD(&mv_dev->list);
1587                                 list_add_tail(&mv_dev->list,
1588                                               &mhba->mhba_dev_list);
1589                                 dev_dbg(&mhba->pdev->dev,
1590                                         "probe a new device(0:%d:0)"
1591                                         " wwid(%llx)\n", id, mv_dev->wwid);
1592                         } else if (found == -1)
1593                                 return -1;
1594                         else
1595                                 continue;
1596                 }
1597         }
1598 
1599         if (cmd)
1600                 mvumi_delete_internal_cmd(mhba, cmd);
1601 
1602         return 0;
1603 }
1604 
1605 static int mvumi_rescan_bus(void *data)
1606 {
1607         int ret = 0;
1608         struct mvumi_hba *mhba = (struct mvumi_hba *) data;
1609         struct mvumi_device *mv_dev = NULL , *dev_next;
1610 
1611         while (!kthread_should_stop()) {
1612 
1613                 set_current_state(TASK_INTERRUPTIBLE);
1614                 if (!atomic_read(&mhba->pnp_count))
1615                         schedule();
1616                 msleep(1000);
1617                 atomic_set(&mhba->pnp_count, 0);
1618                 __set_current_state(TASK_RUNNING);
1619 
1620                 mutex_lock(&mhba->device_lock);
1621                 ret = mvumi_probe_devices(mhba);
1622                 if (!ret) {
1623                         list_for_each_entry_safe(mv_dev, dev_next,
1624                                                  &mhba->mhba_dev_list, list) {
1625                                 if (mvumi_handle_hotplug(mhba, mv_dev->id,
1626                                                          DEVICE_ONLINE)) {
1627                                         dev_err(&mhba->pdev->dev,
1628                                                 "%s add device(0:%d:0) failed"
1629                                                 "wwid(%llx) has exist\n",
1630                                                 __func__,
1631                                                 mv_dev->id, mv_dev->wwid);
1632                                         list_del_init(&mv_dev->list);
1633                                         kfree(mv_dev);
1634                                 } else {
1635                                         list_move_tail(&mv_dev->list,
1636                                                        &mhba->shost_dev_list);
1637                                 }
1638                         }
1639                 }
1640                 mutex_unlock(&mhba->device_lock);
1641         }
1642         return 0;
1643 }
1644 
1645 static void mvumi_proc_msg(struct mvumi_hba *mhba,
1646                                         struct mvumi_hotplug_event *param)
1647 {
1648         u16 size = param->size;
1649         const unsigned long *ar_bitmap;
1650         const unsigned long *re_bitmap;
1651         int index;
1652 
1653         if (mhba->fw_flag & MVUMI_FW_ATTACH) {
1654                 index = -1;
1655                 ar_bitmap = (const unsigned long *) param->bitmap;
1656                 re_bitmap = (const unsigned long *) &param->bitmap[size >> 3];
1657 
1658                 mutex_lock(&mhba->sas_discovery_mutex);
1659                 do {
1660                         index = find_next_zero_bit(ar_bitmap, size, index + 1);
1661                         if (index >= size)
1662                                 break;
1663                         mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
1664                 } while (1);
1665 
1666                 index = -1;
1667                 do {
1668                         index = find_next_zero_bit(re_bitmap, size, index + 1);
1669                         if (index >= size)
1670                                 break;
1671                         mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
1672                 } while (1);
1673                 mutex_unlock(&mhba->sas_discovery_mutex);
1674         }
1675 }
1676 
1677 static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1678 {
1679         if (msg == APICDB1_EVENT_GETEVENT) {
1680                 int i, count;
1681                 struct mvumi_driver_event *param = NULL;
1682                 struct mvumi_event_req *er = buffer;
1683                 count = er->count;
1684                 if (count > MAX_EVENTS_RETURNED) {
1685                         dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1686                                         " than max event count[0x%x].\n",
1687                                         count, MAX_EVENTS_RETURNED);
1688                         return;
1689                 }
1690                 for (i = 0; i < count; i++) {
1691                         param = &er->events[i];
1692                         mvumi_show_event(mhba, param);
1693                 }
1694         } else if (msg == APICDB1_HOST_GETEVENT) {
1695                 mvumi_proc_msg(mhba, buffer);
1696         }
1697 }
1698 
1699 static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1700 {
1701         struct mvumi_cmd *cmd;
1702         struct mvumi_msg_frame *frame;
1703 
1704         cmd = mvumi_create_internal_cmd(mhba, 512);
1705         if (!cmd)
1706                 return -1;
1707         cmd->scmd = NULL;
1708         cmd->cmd_status = REQ_STATUS_PENDING;
1709         atomic_set(&cmd->sync_cmd, 0);
1710         frame = cmd->frame;
1711         frame->device_id = 0;
1712         frame->cmd_flag = CMD_FLAG_DATA_IN;
1713         frame->req_function = CL_FUN_SCSI_CMD;
1714         frame->cdb_length = MAX_COMMAND_SIZE;
1715         frame->data_transfer_length = sizeof(struct mvumi_event_req);
1716         memset(frame->cdb, 0, MAX_COMMAND_SIZE);
1717         frame->cdb[0] = APICDB0_EVENT;
1718         frame->cdb[1] = msg;
1719         mvumi_issue_blocked_cmd(mhba, cmd);
1720 
1721         if (cmd->cmd_status != SAM_STAT_GOOD)
1722                 dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1723                                                         cmd->cmd_status);
1724         else
1725                 mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1726 
1727         mvumi_delete_internal_cmd(mhba, cmd);
1728         return 0;
1729 }
1730 
1731 static void mvumi_scan_events(struct work_struct *work)
1732 {
1733         struct mvumi_events_wq *mu_ev =
1734                 container_of(work, struct mvumi_events_wq, work_q);
1735 
1736         mvumi_get_event(mu_ev->mhba, mu_ev->event);
1737         kfree(mu_ev);
1738 }
1739 
1740 static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
1741 {
1742         struct mvumi_events_wq *mu_ev;
1743 
1744         while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
1745                 if (isr_status & DRBL_BUS_CHANGE) {
1746                         atomic_inc(&mhba->pnp_count);
1747                         wake_up_process(mhba->dm_thread);
1748                         isr_status &= ~(DRBL_BUS_CHANGE);
1749                         continue;
1750                 }
1751 
1752                 mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1753                 if (mu_ev) {
1754                         INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1755                         mu_ev->mhba = mhba;
1756                         mu_ev->event = APICDB1_EVENT_GETEVENT;
1757                         isr_status &= ~(DRBL_EVENT_NOTIFY);
1758                         mu_ev->param = NULL;
1759                         schedule_work(&mu_ev->work_q);
1760                 }
1761         }
1762 }
1763 
1764 static void mvumi_handle_clob(struct mvumi_hba *mhba)
1765 {
1766         struct mvumi_rsp_frame *ob_frame;
1767         struct mvumi_cmd *cmd;
1768         struct mvumi_ob_data *pool;
1769 
1770         while (!list_empty(&mhba->free_ob_list)) {
1771                 pool = list_first_entry(&mhba->free_ob_list,
1772                                                 struct mvumi_ob_data, list);
1773                 list_del_init(&pool->list);
1774                 list_add_tail(&pool->list, &mhba->ob_data_list);
1775 
1776                 ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
1777                 cmd = mhba->tag_cmd[ob_frame->tag];
1778 
1779                 atomic_dec(&mhba->fw_outstanding);
1780                 mhba->tag_cmd[ob_frame->tag] = NULL;
1781                 tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1782                 if (cmd->scmd)
1783                         mvumi_complete_cmd(mhba, cmd, ob_frame);
1784                 else
1785                         mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1786         }
1787         mhba->instancet->fire_cmd(mhba, NULL);
1788 }
1789 
1790 static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1791 {
1792         struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1793         unsigned long flags;
1794 
1795         spin_lock_irqsave(mhba->shost->host_lock, flags);
1796         if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1797                 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1798                 return IRQ_NONE;
1799         }
1800 
1801         if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
1802                 if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
1803                         mvumi_launch_events(mhba, mhba->isr_status);
1804                 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1805                         dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1806                         mvumi_handshake(mhba);
1807                 }
1808 
1809         }
1810 
1811         if (mhba->global_isr & mhba->regs->int_comaout)
1812                 mvumi_receive_ob_list_entry(mhba);
1813 
1814         mhba->global_isr = 0;
1815         mhba->isr_status = 0;
1816         if (mhba->fw_state == FW_STATE_STARTED)
1817                 mvumi_handle_clob(mhba);
1818         spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1819         return IRQ_HANDLED;
1820 }
1821 
1822 static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1823                                                 struct mvumi_cmd *cmd)
1824 {
1825         void *ib_entry;
1826         struct mvumi_msg_frame *ib_frame;
1827         unsigned int frame_len;
1828 
1829         ib_frame = cmd->frame;
1830         if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1831                 dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1832                 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1833         }
1834         if (tag_is_empty(&mhba->tag_pool)) {
1835                 dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1836                 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1837         }
1838         mvumi_get_ib_list_entry(mhba, &ib_entry);
1839 
1840         cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1841         cmd->frame->request_id = mhba->io_seq++;
1842         cmd->request_id = cmd->frame->request_id;
1843         mhba->tag_cmd[cmd->frame->tag] = cmd;
1844         frame_len = sizeof(*ib_frame) - 4 +
1845                                 ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1846         if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
1847                 struct mvumi_dyn_list_entry *dle;
1848                 dle = ib_entry;
1849                 dle->src_low_addr =
1850                         cpu_to_le32(lower_32_bits(cmd->frame_phys));
1851                 dle->src_high_addr =
1852                         cpu_to_le32(upper_32_bits(cmd->frame_phys));
1853                 dle->if_length = (frame_len >> 2) & 0xFFF;
1854         } else {
1855                 memcpy(ib_entry, ib_frame, frame_len);
1856         }
1857         return MV_QUEUE_COMMAND_RESULT_SENT;
1858 }
1859 
1860 static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1861 {
1862         unsigned short num_of_cl_sent = 0;
1863         unsigned int count;
1864         enum mvumi_qc_result result;
1865 
1866         if (cmd)
1867                 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1868         count = mhba->instancet->check_ib_list(mhba);
1869         if (list_empty(&mhba->waiting_req_list) || !count)
1870                 return;
1871 
1872         do {
1873                 cmd = list_first_entry(&mhba->waiting_req_list,
1874                                        struct mvumi_cmd, queue_pointer);
1875                 list_del_init(&cmd->queue_pointer);
1876                 result = mvumi_send_command(mhba, cmd);
1877                 switch (result) {
1878                 case MV_QUEUE_COMMAND_RESULT_SENT:
1879                         num_of_cl_sent++;
1880                         break;
1881                 case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
1882                         list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1883                         if (num_of_cl_sent > 0)
1884                                 mvumi_send_ib_list_entry(mhba);
1885 
1886                         return;
1887                 }
1888         } while (!list_empty(&mhba->waiting_req_list) && count--);
1889 
1890         if (num_of_cl_sent > 0)
1891                 mvumi_send_ib_list_entry(mhba);
1892 }
1893 
1894 /**
1895  * mvumi_enable_intr -  Enables interrupts
1896  * @mhba:               Adapter soft state
1897  */
1898 static void mvumi_enable_intr(struct mvumi_hba *mhba)
1899 {
1900         unsigned int mask;
1901         struct mvumi_hw_regs *regs = mhba->regs;
1902 
1903         iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1904         mask = ioread32(regs->enpointa_mask_reg);
1905         mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
1906         iowrite32(mask, regs->enpointa_mask_reg);
1907 }
1908 
1909 /**
1910  * mvumi_disable_intr -Disables interrupt
1911  * @mhba:               Adapter soft state
1912  */
1913 static void mvumi_disable_intr(struct mvumi_hba *mhba)
1914 {
1915         unsigned int mask;
1916         struct mvumi_hw_regs *regs = mhba->regs;
1917 
1918         iowrite32(0, regs->arm_to_pciea_mask_reg);
1919         mask = ioread32(regs->enpointa_mask_reg);
1920         mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
1921                                                         regs->int_comaerr);
1922         iowrite32(mask, regs->enpointa_mask_reg);
1923 }
1924 
1925 static int mvumi_clear_intr(void *extend)
1926 {
1927         struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1928         unsigned int status, isr_status = 0, tmp = 0;
1929         struct mvumi_hw_regs *regs = mhba->regs;
1930 
1931         status = ioread32(regs->main_int_cause_reg);
1932         if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
1933                 return 1;
1934         if (unlikely(status & regs->int_comaerr)) {
1935                 tmp = ioread32(regs->outb_isr_cause);
1936                 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
1937                         if (tmp & regs->clic_out_err) {
1938                                 iowrite32(tmp & regs->clic_out_err,
1939                                                         regs->outb_isr_cause);
1940                         }
1941                 } else {
1942                         if (tmp & (regs->clic_in_err | regs->clic_out_err))
1943                                 iowrite32(tmp & (regs->clic_in_err |
1944                                                 regs->clic_out_err),
1945                                                 regs->outb_isr_cause);
1946                 }
1947                 status ^= mhba->regs->int_comaerr;
1948                 /* inbound or outbound parity error, command will timeout */
1949         }
1950         if (status & regs->int_comaout) {
1951                 tmp = ioread32(regs->outb_isr_cause);
1952                 if (tmp & regs->clic_irq)
1953                         iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
1954         }
1955         if (status & regs->int_dl_cpu2pciea) {
1956                 isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
1957                 if (isr_status)
1958                         iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
1959         }
1960 
1961         mhba->global_isr = status;
1962         mhba->isr_status = isr_status;
1963 
1964         return 0;
1965 }
1966 
1967 /**
1968  * mvumi_read_fw_status_reg - returns the current FW status value
1969  * @mhba:               Adapter soft state
1970  */
1971 static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
1972 {
1973         unsigned int status;
1974 
1975         status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
1976         if (status)
1977                 iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
1978         return status;
1979 }
1980 
1981 static struct mvumi_instance_template mvumi_instance_9143 = {
1982         .fire_cmd = mvumi_fire_cmd,
1983         .enable_intr = mvumi_enable_intr,
1984         .disable_intr = mvumi_disable_intr,
1985         .clear_intr = mvumi_clear_intr,
1986         .read_fw_status_reg = mvumi_read_fw_status_reg,
1987         .check_ib_list = mvumi_check_ib_list_9143,
1988         .check_ob_list = mvumi_check_ob_list_9143,
1989         .reset_host = mvumi_reset_host_9143,
1990 };
1991 
1992 static struct mvumi_instance_template mvumi_instance_9580 = {
1993         .fire_cmd = mvumi_fire_cmd,
1994         .enable_intr = mvumi_enable_intr,
1995         .disable_intr = mvumi_disable_intr,
1996         .clear_intr = mvumi_clear_intr,
1997         .read_fw_status_reg = mvumi_read_fw_status_reg,
1998         .check_ib_list = mvumi_check_ib_list_9580,
1999         .check_ob_list = mvumi_check_ob_list_9580,
2000         .reset_host = mvumi_reset_host_9580,
2001 };
2002 
2003 static int mvumi_slave_configure(struct scsi_device *sdev)
2004 {
2005         struct mvumi_hba *mhba;
2006         unsigned char bitcount = sizeof(unsigned char) * 8;
2007 
2008         mhba = (struct mvumi_hba *) sdev->host->hostdata;
2009         if (sdev->id >= mhba->max_target_id)
2010                 return -EINVAL;
2011 
2012         mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
2013         return 0;
2014 }
2015 
2016 /**
2017  * mvumi_build_frame -  Prepares a direct cdb (DCDB) command
2018  * @mhba:               Adapter soft state
2019  * @scmd:               SCSI command
2020  * @cmd:                Command to be prepared in
2021  *
2022  * This function prepares CDB commands. These are typcially pass-through
2023  * commands to the devices.
2024  */
2025 static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
2026                                 struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
2027 {
2028         struct mvumi_msg_frame *pframe;
2029 
2030         cmd->scmd = scmd;
2031         cmd->cmd_status = REQ_STATUS_PENDING;
2032         pframe = cmd->frame;
2033         pframe->device_id = ((unsigned short) scmd->device->id) |
2034                                 (((unsigned short) scmd->device->lun) << 8);
2035         pframe->cmd_flag = 0;
2036 
2037         switch (scmd->sc_data_direction) {
2038         case DMA_NONE:
2039                 pframe->cmd_flag |= CMD_FLAG_NON_DATA;
2040                 break;
2041         case DMA_FROM_DEVICE:
2042                 pframe->cmd_flag |= CMD_FLAG_DATA_IN;
2043                 break;
2044         case DMA_TO_DEVICE:
2045                 pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
2046                 break;
2047         case DMA_BIDIRECTIONAL:
2048         default:
2049                 dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
2050                         "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
2051                 goto error;
2052         }
2053 
2054         pframe->cdb_length = scmd->cmd_len;
2055         memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
2056         pframe->req_function = CL_FUN_SCSI_CMD;
2057         if (scsi_bufflen(scmd)) {
2058                 if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
2059                         &pframe->sg_counts))
2060                         goto error;
2061 
2062                 pframe->data_transfer_length = scsi_bufflen(scmd);
2063         } else {
2064                 pframe->sg_counts = 0;
2065                 pframe->data_transfer_length = 0;
2066         }
2067         return 0;
2068 
2069 error:
2070         scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
2071                 SAM_STAT_CHECK_CONDITION;
2072         scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
2073                                                                         0);
2074         return -1;
2075 }
2076 
2077 /**
2078  * mvumi_queue_command -        Queue entry point
2079  * @scmd:                       SCSI command to be queued
2080  * @done:                       Callback entry point
2081  */
2082 static int mvumi_queue_command(struct Scsi_Host *shost,
2083                                         struct scsi_cmnd *scmd)
2084 {
2085         struct mvumi_cmd *cmd;
2086         struct mvumi_hba *mhba;
2087         unsigned long irq_flags;
2088 
2089         spin_lock_irqsave(shost->host_lock, irq_flags);
2090 
2091         mhba = (struct mvumi_hba *) shost->hostdata;
2092         scmd->result = 0;
2093         cmd = mvumi_get_cmd(mhba);
2094         if (unlikely(!cmd)) {
2095                 spin_unlock_irqrestore(shost->host_lock, irq_flags);
2096                 return SCSI_MLQUEUE_HOST_BUSY;
2097         }
2098 
2099         if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
2100                 goto out_return_cmd;
2101 
2102         cmd->scmd = scmd;
2103         scmd->SCp.ptr = (char *) cmd;
2104         mhba->instancet->fire_cmd(mhba, cmd);
2105         spin_unlock_irqrestore(shost->host_lock, irq_flags);
2106         return 0;
2107 
2108 out_return_cmd:
2109         mvumi_return_cmd(mhba, cmd);
2110         scmd->scsi_done(scmd);
2111         spin_unlock_irqrestore(shost->host_lock, irq_flags);
2112         return 0;
2113 }
2114 
2115 static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
2116 {
2117         struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
2118         struct Scsi_Host *host = scmd->device->host;
2119         struct mvumi_hba *mhba = shost_priv(host);
2120         unsigned long flags;
2121 
2122         spin_lock_irqsave(mhba->shost->host_lock, flags);
2123 
2124         if (mhba->tag_cmd[cmd->frame->tag]) {
2125                 mhba->tag_cmd[cmd->frame->tag] = NULL;
2126                 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
2127         }
2128         if (!list_empty(&cmd->queue_pointer))
2129                 list_del_init(&cmd->queue_pointer);
2130         else
2131                 atomic_dec(&mhba->fw_outstanding);
2132 
2133         scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
2134         scmd->SCp.ptr = NULL;
2135         if (scsi_bufflen(scmd)) {
2136                 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
2137                              scsi_sg_count(scmd),
2138                              scmd->sc_data_direction);
2139         }
2140         mvumi_return_cmd(mhba, cmd);
2141         spin_unlock_irqrestore(mhba->shost->host_lock, flags);
2142 
2143         return BLK_EH_DONE;
2144 }
2145 
2146 static int
2147 mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2148                         sector_t capacity, int geom[])
2149 {
2150         int heads, sectors;
2151         sector_t cylinders;
2152         unsigned long tmp;
2153 
2154         heads = 64;
2155         sectors = 32;
2156         tmp = heads * sectors;
2157         cylinders = capacity;
2158         sector_div(cylinders, tmp);
2159 
2160         if (capacity >= 0x200000) {
2161                 heads = 255;
2162                 sectors = 63;
2163                 tmp = heads * sectors;
2164                 cylinders = capacity;
2165                 sector_div(cylinders, tmp);
2166         }
2167         geom[0] = heads;
2168         geom[1] = sectors;
2169         geom[2] = cylinders;
2170 
2171         return 0;
2172 }
2173 
2174 static struct scsi_host_template mvumi_template = {
2175 
2176         .module = THIS_MODULE,
2177         .name = "Marvell Storage Controller",
2178         .slave_configure = mvumi_slave_configure,
2179         .queuecommand = mvumi_queue_command,
2180         .eh_timed_out = mvumi_timed_out,
2181         .eh_host_reset_handler = mvumi_host_reset,
2182         .bios_param = mvumi_bios_param,
2183         .dma_boundary = PAGE_SIZE - 1,
2184         .this_id = -1,
2185 };
2186 
2187 static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2188 {
2189         void *base = NULL;
2190         struct mvumi_hw_regs *regs;
2191 
2192         switch (mhba->pdev->device) {
2193         case PCI_DEVICE_ID_MARVELL_MV9143:
2194                 mhba->mmio = mhba->base_addr[0];
2195                 base = mhba->mmio;
2196                 if (!mhba->regs) {
2197                         mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2198                         if (mhba->regs == NULL)
2199                                 return -ENOMEM;
2200                 }
2201                 regs = mhba->regs;
2202 
2203                 /* For Arm */
2204                 regs->ctrl_sts_reg          = base + 0x20104;
2205                 regs->rstoutn_mask_reg      = base + 0x20108;
2206                 regs->sys_soft_rst_reg      = base + 0x2010C;
2207                 regs->main_int_cause_reg    = base + 0x20200;
2208                 regs->enpointa_mask_reg     = base + 0x2020C;
2209                 regs->rstoutn_en_reg        = base + 0xF1400;
2210                 /* For Doorbell */
2211                 regs->pciea_to_arm_drbl_reg = base + 0x20400;
2212                 regs->arm_to_pciea_drbl_reg = base + 0x20408;
2213                 regs->arm_to_pciea_mask_reg = base + 0x2040C;
2214                 regs->pciea_to_arm_msg0     = base + 0x20430;
2215                 regs->pciea_to_arm_msg1     = base + 0x20434;
2216                 regs->arm_to_pciea_msg0     = base + 0x20438;
2217                 regs->arm_to_pciea_msg1     = base + 0x2043C;
2218 
2219                 /* For Message Unit */
2220 
2221                 regs->inb_aval_count_basel  = base + 0x508;
2222                 regs->inb_aval_count_baseh  = base + 0x50C;
2223                 regs->inb_write_pointer     = base + 0x518;
2224                 regs->inb_read_pointer      = base + 0x51C;
2225                 regs->outb_coal_cfg         = base + 0x568;
2226                 regs->outb_copy_basel       = base + 0x5B0;
2227                 regs->outb_copy_baseh       = base + 0x5B4;
2228                 regs->outb_copy_pointer     = base + 0x544;
2229                 regs->outb_read_pointer     = base + 0x548;
2230                 regs->outb_isr_cause        = base + 0x560;
2231                 regs->outb_coal_cfg         = base + 0x568;
2232                 /* Bit setting for HW */
2233                 regs->int_comaout           = 1 << 8;
2234                 regs->int_comaerr           = 1 << 6;
2235                 regs->int_dl_cpu2pciea      = 1 << 1;
2236                 regs->cl_pointer_toggle     = 1 << 12;
2237                 regs->clic_irq              = 1 << 1;
2238                 regs->clic_in_err           = 1 << 8;
2239                 regs->clic_out_err          = 1 << 12;
2240                 regs->cl_slot_num_mask      = 0xFFF;
2241                 regs->int_drbl_int_mask     = 0x3FFFFFFF;
2242                 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
2243                                                         regs->int_comaerr;
2244                 break;
2245         case PCI_DEVICE_ID_MARVELL_MV9580:
2246                 mhba->mmio = mhba->base_addr[2];
2247                 base = mhba->mmio;
2248                 if (!mhba->regs) {
2249                         mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2250                         if (mhba->regs == NULL)
2251                                 return -ENOMEM;
2252                 }
2253                 regs = mhba->regs;
2254                 /* For Arm */
2255                 regs->ctrl_sts_reg          = base + 0x20104;
2256                 regs->rstoutn_mask_reg      = base + 0x1010C;
2257                 regs->sys_soft_rst_reg      = base + 0x10108;
2258                 regs->main_int_cause_reg    = base + 0x10200;
2259                 regs->enpointa_mask_reg     = base + 0x1020C;
2260                 regs->rstoutn_en_reg        = base + 0xF1400;
2261 
2262                 /* For Doorbell */
2263                 regs->pciea_to_arm_drbl_reg = base + 0x10460;
2264                 regs->arm_to_pciea_drbl_reg = base + 0x10480;
2265                 regs->arm_to_pciea_mask_reg = base + 0x10484;
2266                 regs->pciea_to_arm_msg0     = base + 0x10400;
2267                 regs->pciea_to_arm_msg1     = base + 0x10404;
2268                 regs->arm_to_pciea_msg0     = base + 0x10420;
2269                 regs->arm_to_pciea_msg1     = base + 0x10424;
2270 
2271                 /* For reset*/
2272                 regs->reset_request         = base + 0x10108;
2273                 regs->reset_enable          = base + 0x1010c;
2274 
2275                 /* For Message Unit */
2276                 regs->inb_aval_count_basel  = base + 0x4008;
2277                 regs->inb_aval_count_baseh  = base + 0x400C;
2278                 regs->inb_write_pointer     = base + 0x4018;
2279                 regs->inb_read_pointer      = base + 0x401C;
2280                 regs->outb_copy_basel       = base + 0x4058;
2281                 regs->outb_copy_baseh       = base + 0x405C;
2282                 regs->outb_copy_pointer     = base + 0x406C;
2283                 regs->outb_read_pointer     = base + 0x4070;
2284                 regs->outb_coal_cfg         = base + 0x4080;
2285                 regs->outb_isr_cause        = base + 0x4088;
2286                 /* Bit setting for HW */
2287                 regs->int_comaout           = 1 << 4;
2288                 regs->int_dl_cpu2pciea      = 1 << 12;
2289                 regs->int_comaerr           = 1 << 29;
2290                 regs->cl_pointer_toggle     = 1 << 14;
2291                 regs->cl_slot_num_mask      = 0x3FFF;
2292                 regs->clic_irq              = 1 << 0;
2293                 regs->clic_out_err          = 1 << 1;
2294                 regs->int_drbl_int_mask     = 0x3FFFFFFF;
2295                 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
2296                 break;
2297         default:
2298                 return -1;
2299                 break;
2300         }
2301 
2302         return 0;
2303 }
2304 
2305 /**
2306  * mvumi_init_fw -      Initializes the FW
2307  * @mhba:               Adapter soft state
2308  *
2309  * This is the main function for initializing firmware.
2310  */
2311 static int mvumi_init_fw(struct mvumi_hba *mhba)
2312 {
2313         int ret = 0;
2314 
2315         if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
2316                 dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
2317                 return -EBUSY;
2318         }
2319         ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2320         if (ret)
2321                 goto fail_ioremap;
2322 
2323         switch (mhba->pdev->device) {
2324         case PCI_DEVICE_ID_MARVELL_MV9143:
2325                 mhba->instancet = &mvumi_instance_9143;
2326                 mhba->io_seq = 0;
2327                 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2328                 mhba->request_id_enabled = 1;
2329                 break;
2330         case PCI_DEVICE_ID_MARVELL_MV9580:
2331                 mhba->instancet = &mvumi_instance_9580;
2332                 mhba->io_seq = 0;
2333                 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2334                 break;
2335         default:
2336                 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
2337                                                         mhba->pdev->device);
2338                 mhba->instancet = NULL;
2339                 ret = -EINVAL;
2340                 goto fail_alloc_mem;
2341         }
2342         dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
2343                                                         mhba->pdev->device);
2344         ret = mvumi_cfg_hw_reg(mhba);
2345         if (ret) {
2346                 dev_err(&mhba->pdev->dev,
2347                         "failed to allocate memory for reg\n");
2348                 ret = -ENOMEM;
2349                 goto fail_alloc_mem;
2350         }
2351         mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
2352                         HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
2353         if (!mhba->handshake_page) {
2354                 dev_err(&mhba->pdev->dev,
2355                         "failed to allocate memory for handshake\n");
2356                 ret = -ENOMEM;
2357                 goto fail_alloc_page;
2358         }
2359 
2360         if (mvumi_start(mhba)) {
2361                 ret = -EINVAL;
2362                 goto fail_ready_state;
2363         }
2364         ret = mvumi_alloc_cmds(mhba);
2365         if (ret)
2366                 goto fail_ready_state;
2367 
2368         return 0;
2369 
2370 fail_ready_state:
2371         mvumi_release_mem_resource(mhba);
2372         dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
2373                 mhba->handshake_page, mhba->handshake_page_phys);
2374 fail_alloc_page:
2375         kfree(mhba->regs);
2376 fail_alloc_mem:
2377         mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
2378 fail_ioremap:
2379         pci_release_regions(mhba->pdev);
2380 
2381         return ret;
2382 }
2383 
2384 /**
2385  * mvumi_io_attach -    Attaches this driver to SCSI mid-layer
2386  * @mhba:               Adapter soft state
2387  */
2388 static int mvumi_io_attach(struct mvumi_hba *mhba)
2389 {
2390         struct Scsi_Host *host = mhba->shost;
2391         struct scsi_device *sdev = NULL;
2392         int ret;
2393         unsigned int max_sg = (mhba->ib_max_size + 4 -
2394                 sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
2395 
2396         host->irq = mhba->pdev->irq;
2397         host->unique_id = mhba->unique_id;
2398         host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2399         host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
2400         host->max_sectors = mhba->max_transfer_size / 512;
2401         host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2402         host->max_id = mhba->max_target_id;
2403         host->max_cmd_len = MAX_COMMAND_SIZE;
2404 
2405         ret = scsi_add_host(host, &mhba->pdev->dev);
2406         if (ret) {
2407                 dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
2408                 return ret;
2409         }
2410         mhba->fw_flag |= MVUMI_FW_ATTACH;
2411 
2412         mutex_lock(&mhba->sas_discovery_mutex);
2413         if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2414                 ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
2415         else
2416                 ret = 0;
2417         if (ret) {
2418                 dev_err(&mhba->pdev->dev, "add virtual device failed\n");
2419                 mutex_unlock(&mhba->sas_discovery_mutex);
2420                 goto fail_add_device;
2421         }
2422 
2423         mhba->dm_thread = kthread_create(mvumi_rescan_bus,
2424                                                 mhba, "mvumi_scanthread");
2425         if (IS_ERR(mhba->dm_thread)) {
2426                 dev_err(&mhba->pdev->dev,
2427                         "failed to create device scan thread\n");
2428                 mutex_unlock(&mhba->sas_discovery_mutex);
2429                 goto fail_create_thread;
2430         }
2431         atomic_set(&mhba->pnp_count, 1);
2432         wake_up_process(mhba->dm_thread);
2433 
2434         mutex_unlock(&mhba->sas_discovery_mutex);
2435         return 0;
2436 
2437 fail_create_thread:
2438         if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2439                 sdev = scsi_device_lookup(mhba->shost, 0,
2440                                                 mhba->max_target_id - 1, 0);
2441         if (sdev) {
2442                 scsi_remove_device(sdev);
2443                 scsi_device_put(sdev);
2444         }
2445 fail_add_device:
2446         scsi_remove_host(mhba->shost);
2447         return ret;
2448 }
2449 
2450 /**
2451  * mvumi_probe_one -    PCI hotplug entry point
2452  * @pdev:               PCI device structure
2453  * @id:                 PCI ids of supported hotplugged adapter
2454  */
2455 static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2456 {
2457         struct Scsi_Host *host;
2458         struct mvumi_hba *mhba;
2459         int ret;
2460 
2461         dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2462                         pdev->vendor, pdev->device, pdev->subsystem_vendor,
2463                         pdev->subsystem_device);
2464 
2465         ret = pci_enable_device(pdev);
2466         if (ret)
2467                 return ret;
2468 
2469         ret = mvumi_pci_set_master(pdev);
2470         if (ret)
2471                 goto fail_set_dma_mask;
2472 
2473         host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
2474         if (!host) {
2475                 dev_err(&pdev->dev, "scsi_host_alloc failed\n");
2476                 ret = -ENOMEM;
2477                 goto fail_alloc_instance;
2478         }
2479         mhba = shost_priv(host);
2480 
2481         INIT_LIST_HEAD(&mhba->cmd_pool);
2482         INIT_LIST_HEAD(&mhba->ob_data_list);
2483         INIT_LIST_HEAD(&mhba->free_ob_list);
2484         INIT_LIST_HEAD(&mhba->res_list);
2485         INIT_LIST_HEAD(&mhba->waiting_req_list);
2486         mutex_init(&mhba->device_lock);
2487         INIT_LIST_HEAD(&mhba->mhba_dev_list);
2488         INIT_LIST_HEAD(&mhba->shost_dev_list);
2489         atomic_set(&mhba->fw_outstanding, 0);
2490         init_waitqueue_head(&mhba->int_cmd_wait_q);
2491         mutex_init(&mhba->sas_discovery_mutex);
2492 
2493         mhba->pdev = pdev;
2494         mhba->shost = host;
2495         mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
2496 
2497         ret = mvumi_init_fw(mhba);
2498         if (ret)
2499                 goto fail_init_fw;
2500 
2501         ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2502                                 "mvumi", mhba);
2503         if (ret) {
2504                 dev_err(&pdev->dev, "failed to register IRQ\n");
2505                 goto fail_init_irq;
2506         }
2507 
2508         mhba->instancet->enable_intr(mhba);
2509         pci_set_drvdata(pdev, mhba);
2510 
2511         ret = mvumi_io_attach(mhba);
2512         if (ret)
2513                 goto fail_io_attach;
2514 
2515         mvumi_backup_bar_addr(mhba);
2516         dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
2517 
2518         return 0;
2519 
2520 fail_io_attach:
2521         mhba->instancet->disable_intr(mhba);
2522         free_irq(mhba->pdev->irq, mhba);
2523 fail_init_irq:
2524         mvumi_release_fw(mhba);
2525 fail_init_fw:
2526         scsi_host_put(host);
2527 
2528 fail_alloc_instance:
2529 fail_set_dma_mask:
2530         pci_disable_device(pdev);
2531 
2532         return ret;
2533 }
2534 
2535 static void mvumi_detach_one(struct pci_dev *pdev)
2536 {
2537         struct Scsi_Host *host;
2538         struct mvumi_hba *mhba;
2539 
2540         mhba = pci_get_drvdata(pdev);
2541         if (mhba->dm_thread) {
2542                 kthread_stop(mhba->dm_thread);
2543                 mhba->dm_thread = NULL;
2544         }
2545 
2546         mvumi_detach_devices(mhba);
2547         host = mhba->shost;
2548         scsi_remove_host(mhba->shost);
2549         mvumi_flush_cache(mhba);
2550 
2551         mhba->instancet->disable_intr(mhba);
2552         free_irq(mhba->pdev->irq, mhba);
2553         mvumi_release_fw(mhba);
2554         scsi_host_put(host);
2555         pci_disable_device(pdev);
2556         dev_dbg(&pdev->dev, "driver is removed!\n");
2557 }
2558 
2559 /**
2560  * mvumi_shutdown -     Shutdown entry point
2561  * @device:             Generic device structure
2562  */
2563 static void mvumi_shutdown(struct pci_dev *pdev)
2564 {
2565         struct mvumi_hba *mhba = pci_get_drvdata(pdev);
2566 
2567         mvumi_flush_cache(mhba);
2568 }
2569 
2570 static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
2571 {
2572         struct mvumi_hba *mhba = NULL;
2573 
2574         mhba = pci_get_drvdata(pdev);
2575         mvumi_flush_cache(mhba);
2576 
2577         pci_set_drvdata(pdev, mhba);
2578         mhba->instancet->disable_intr(mhba);
2579         free_irq(mhba->pdev->irq, mhba);
2580         mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2581         pci_release_regions(pdev);
2582         pci_save_state(pdev);
2583         pci_disable_device(pdev);
2584         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2585 
2586         return 0;
2587 }
2588 
2589 static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
2590 {
2591         int ret;
2592         struct mvumi_hba *mhba = NULL;
2593 
2594         mhba = pci_get_drvdata(pdev);
2595 
2596         pci_set_power_state(pdev, PCI_D0);
2597         pci_enable_wake(pdev, PCI_D0, 0);
2598         pci_restore_state(pdev);
2599 
2600         ret = pci_enable_device(pdev);
2601         if (ret) {
2602                 dev_err(&pdev->dev, "enable device failed\n");
2603                 return ret;
2604         }
2605 
2606         ret = mvumi_pci_set_master(pdev);
2607         ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2608         if (ret)
2609                 goto fail;
2610         ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
2611         if (ret)
2612                 goto fail;
2613         ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2614         if (ret)
2615                 goto release_regions;
2616 
2617         if (mvumi_cfg_hw_reg(mhba)) {
2618                 ret = -EINVAL;
2619                 goto unmap_pci_addr;
2620         }
2621 
2622         mhba->mmio = mhba->base_addr[0];
2623         mvumi_reset(mhba);
2624 
2625         if (mvumi_start(mhba)) {
2626                 ret = -EINVAL;
2627                 goto unmap_pci_addr;
2628         }
2629 
2630         ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2631                                 "mvumi", mhba);
2632         if (ret) {
2633                 dev_err(&pdev->dev, "failed to register IRQ\n");
2634                 goto unmap_pci_addr;
2635         }
2636         mhba->instancet->enable_intr(mhba);
2637 
2638         return 0;
2639 
2640 unmap_pci_addr:
2641         mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2642 release_regions:
2643         pci_release_regions(pdev);
2644 fail:
2645         pci_disable_device(pdev);
2646 
2647         return ret;
2648 }
2649 
2650 static struct pci_driver mvumi_pci_driver = {
2651 
2652         .name = MV_DRIVER_NAME,
2653         .id_table = mvumi_pci_table,
2654         .probe = mvumi_probe_one,
2655         .remove = mvumi_detach_one,
2656         .shutdown = mvumi_shutdown,
2657 #ifdef CONFIG_PM
2658         .suspend = mvumi_suspend,
2659         .resume = mvumi_resume,
2660 #endif
2661 };
2662 
2663 module_pci_driver(mvumi_pci_driver);

/* [<][>][^][v][top][bottom][index][help] */