mhba 53 drivers/scsi/mvumi.c static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st) mhba 59 drivers/scsi/mvumi.c static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st, mhba 106 drivers/scsi/mvumi.c static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, mhba 112 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 121 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 130 drivers/scsi/mvumi.c res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, mhba 134 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 143 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type); mhba 151 drivers/scsi/mvumi.c list_add_tail(&res->entry, &mhba->res_list); mhba 156 drivers/scsi/mvumi.c static void mvumi_release_mem_resource(struct mvumi_hba *mhba) mhba 160 drivers/scsi/mvumi.c list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) { mhba 163 drivers/scsi/mvumi.c dma_free_coherent(&mhba->pdev->dev, res->size, mhba 170 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 177 drivers/scsi/mvumi.c mhba->fw_flag &= ~MVUMI_FW_ALLOC; mhba 189 drivers/scsi/mvumi.c static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, mhba 198 drivers/scsi/mvumi.c *sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum, mhba 200 drivers/scsi/mvumi.c if (*sg_count > mhba->max_sge) { mhba 201 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 203 drivers/scsi/mvumi.c *sg_count, mhba->max_sge); mhba 204 drivers/scsi/mvumi.c dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum, mhba 213 drivers/scsi/mvumi.c sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg))); mhba 215 drivers/scsi/mvumi.c m_sg->flags |= 1U << mhba->eot_flag; mhba 217 drivers/scsi/mvumi.c sgd_inc(mhba, m_sg); mhba 223 drivers/scsi/mvumi.c static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, mhba 233 drivers/scsi/mvumi.c virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr, mhba 244 drivers/scsi/mvumi.c m_sg->flags = 1U << mhba->eot_flag; mhba 245 drivers/scsi/mvumi.c sgd_setsz(mhba, m_sg, cpu_to_le32(size)); mhba 250 drivers/scsi/mvumi.c static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba, mhba 257 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n"); mhba 262 drivers/scsi/mvumi.c cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size, mhba 265 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" mhba 266 drivers/scsi/mvumi.c " frame,size = %d.\n", mhba->ib_max_size); mhba 272 drivers/scsi/mvumi.c if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { mhba 273 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "failed to allocate memory" mhba 275 drivers/scsi/mvumi.c dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size, mhba 286 drivers/scsi/mvumi.c static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba, mhba 296 drivers/scsi/mvumi.c sgd_getsz(mhba, m_sg, size); mhba 301 drivers/scsi/mvumi.c dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf, mhba 304 drivers/scsi/mvumi.c dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size, mhba 316 drivers/scsi/mvumi.c static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba) mhba 320 drivers/scsi/mvumi.c if (likely(!list_empty(&mhba->cmd_pool))) { mhba 321 drivers/scsi/mvumi.c cmd = list_entry((&mhba->cmd_pool)->next, mhba 325 drivers/scsi/mvumi.c dev_warn(&mhba->pdev->dev, "command pool is empty!\n"); mhba 335 drivers/scsi/mvumi.c static inline void mvumi_return_cmd(struct mvumi_hba *mhba, mhba 339 drivers/scsi/mvumi.c list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); mhba 346 drivers/scsi/mvumi.c static void mvumi_free_cmds(struct mvumi_hba *mhba) mhba 350 drivers/scsi/mvumi.c while (!list_empty(&mhba->cmd_pool)) { mhba 351 drivers/scsi/mvumi.c cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, mhba 354 drivers/scsi/mvumi.c if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) mhba 365 drivers/scsi/mvumi.c static int mvumi_alloc_cmds(struct mvumi_hba *mhba) mhba 370 drivers/scsi/mvumi.c for (i = 0; i < mhba->max_io; i++) { mhba 376 drivers/scsi/mvumi.c list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); mhba 377 drivers/scsi/mvumi.c if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { mhba 378 drivers/scsi/mvumi.c cmd->frame = mhba->ib_frame + i * mhba->ib_max_size; mhba 379 drivers/scsi/mvumi.c cmd->frame_phys = mhba->ib_frame_phys mhba 380 drivers/scsi/mvumi.c + i * mhba->ib_max_size; mhba 382 drivers/scsi/mvumi.c cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); mhba 389 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 391 drivers/scsi/mvumi.c while (!list_empty(&mhba->cmd_pool)) { mhba 392 drivers/scsi/mvumi.c cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, mhba 395 drivers/scsi/mvumi.c if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) mhba 402 drivers/scsi/mvumi.c static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba) mhba 405 drivers/scsi/mvumi.c struct mvumi_hw_regs *regs = mhba->regs; mhba 407 drivers/scsi/mvumi.c ib_rp_reg = ioread32(mhba->regs->inb_read_pointer); mhba 410 drivers/scsi/mvumi.c (mhba->ib_cur_slot & regs->cl_slot_num_mask)) && mhba 412 drivers/scsi/mvumi.c != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) { mhba 413 drivers/scsi/mvumi.c dev_warn(&mhba->pdev->dev, "no free slot to use.\n"); mhba 416 drivers/scsi/mvumi.c if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) { mhba 417 drivers/scsi/mvumi.c dev_warn(&mhba->pdev->dev, "firmware io overflow.\n"); mhba 420 drivers/scsi/mvumi.c return mhba->max_io - atomic_read(&mhba->fw_outstanding); mhba 424 drivers/scsi/mvumi.c static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba) mhba 427 drivers/scsi/mvumi.c if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1)) mhba 429 drivers/scsi/mvumi.c count = ioread32(mhba->ib_shadow); mhba 435 drivers/scsi/mvumi.c static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry) mhba 439 drivers/scsi/mvumi.c cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask; mhba 441 drivers/scsi/mvumi.c if (cur_ib_entry >= mhba->list_num_io) { mhba 442 drivers/scsi/mvumi.c cur_ib_entry -= mhba->list_num_io; mhba 443 drivers/scsi/mvumi.c mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle; mhba 445 drivers/scsi/mvumi.c mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask; mhba 446 drivers/scsi/mvumi.c mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask); mhba 447 drivers/scsi/mvumi.c if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { mhba 448 drivers/scsi/mvumi.c *ib_entry = mhba->ib_list + cur_ib_entry * mhba 451 drivers/scsi/mvumi.c *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size; mhba 453 drivers/scsi/mvumi.c atomic_inc(&mhba->fw_outstanding); mhba 456 drivers/scsi/mvumi.c static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba) mhba 458 drivers/scsi/mvumi.c iowrite32(0xffff, mhba->ib_shadow); mhba 459 drivers/scsi/mvumi.c iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer); mhba 462 drivers/scsi/mvumi.c static char mvumi_check_ob_frame(struct mvumi_hba *mhba, mhba 468 drivers/scsi/mvumi.c p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; mhba 471 drivers/scsi/mvumi.c if (tag > mhba->tag_pool.size) { mhba 472 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "ob frame data error\n"); mhba 475 drivers/scsi/mvumi.c if (mhba->tag_cmd[tag] == NULL) { mhba 476 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag); mhba 478 drivers/scsi/mvumi.c } else if (mhba->tag_cmd[tag]->request_id != request_id && mhba 479 drivers/scsi/mvumi.c mhba->request_id_enabled) { mhba 480 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "request ID from FW:0x%x," mhba 482 drivers/scsi/mvumi.c mhba->tag_cmd[tag]->request_id); mhba 489 drivers/scsi/mvumi.c static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba, mhba 493 drivers/scsi/mvumi.c struct mvumi_hw_regs *regs = mhba->regs; mhba 497 drivers/scsi/mvumi.c ob_write_shadow = ioread32(mhba->ob_shadow); mhba 500 drivers/scsi/mvumi.c *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; mhba 501 drivers/scsi/mvumi.c *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; mhba 504 drivers/scsi/mvumi.c (mhba->ob_cur_slot & regs->cl_pointer_toggle)) { mhba 505 drivers/scsi/mvumi.c *assign_obf_end += mhba->list_num_io; mhba 510 drivers/scsi/mvumi.c static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba, mhba 514 drivers/scsi/mvumi.c struct mvumi_hw_regs *regs = mhba->regs; mhba 518 drivers/scsi/mvumi.c *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; mhba 519 drivers/scsi/mvumi.c *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; mhba 521 drivers/scsi/mvumi.c *assign_obf_end += mhba->list_num_io; mhba 527 drivers/scsi/mvumi.c static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) mhba 532 drivers/scsi/mvumi.c struct mvumi_hw_regs *regs = mhba->regs; mhba 534 drivers/scsi/mvumi.c if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end)) mhba 539 drivers/scsi/mvumi.c if (cur_obf >= mhba->list_num_io) { mhba 540 drivers/scsi/mvumi.c cur_obf -= mhba->list_num_io; mhba 541 drivers/scsi/mvumi.c mhba->ob_cur_slot ^= regs->cl_pointer_toggle; mhba 544 drivers/scsi/mvumi.c p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; mhba 549 drivers/scsi/mvumi.c if (unlikely(p_outb_frame->tag > mhba->tag_pool.size || mhba 550 drivers/scsi/mvumi.c mhba->tag_cmd[p_outb_frame->tag] == NULL || mhba 552 drivers/scsi/mvumi.c mhba->tag_cmd[p_outb_frame->tag]->request_id)) mhba 553 drivers/scsi/mvumi.c if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame)) mhba 556 drivers/scsi/mvumi.c if (!list_empty(&mhba->ob_data_list)) { mhba 558 drivers/scsi/mvumi.c list_first_entry(&mhba->ob_data_list, mhba 564 drivers/scsi/mvumi.c cur_obf = mhba->list_num_io - 1; mhba 565 drivers/scsi/mvumi.c mhba->ob_cur_slot ^= regs->cl_pointer_toggle; mhba 571 drivers/scsi/mvumi.c memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size); mhba 574 drivers/scsi/mvumi.c list_add_tail(&ob_data->list, &mhba->free_ob_list); mhba 576 drivers/scsi/mvumi.c mhba->ob_cur_slot &= ~regs->cl_slot_num_mask; mhba 577 drivers/scsi/mvumi.c mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask); mhba 578 drivers/scsi/mvumi.c iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer); mhba 581 drivers/scsi/mvumi.c static void mvumi_reset(struct mvumi_hba *mhba) mhba 583 drivers/scsi/mvumi.c struct mvumi_hw_regs *regs = mhba->regs; mhba 592 drivers/scsi/mvumi.c static unsigned char mvumi_start(struct mvumi_hba *mhba); mhba 594 drivers/scsi/mvumi.c static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) mhba 596 drivers/scsi/mvumi.c mhba->fw_state = FW_STATE_ABORT; mhba 597 drivers/scsi/mvumi.c mvumi_reset(mhba); mhba 599 drivers/scsi/mvumi.c if (mvumi_start(mhba)) mhba 605 drivers/scsi/mvumi.c static int mvumi_wait_for_fw(struct mvumi_hba *mhba) mhba 607 drivers/scsi/mvumi.c struct mvumi_hw_regs *regs = mhba->regs; mhba 617 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 630 drivers/scsi/mvumi.c static void mvumi_backup_bar_addr(struct mvumi_hba *mhba) mhba 635 drivers/scsi/mvumi.c pci_read_config_dword(mhba->pdev, 0x10 + i * 4, mhba 636 drivers/scsi/mvumi.c &mhba->pci_base[i]); mhba 640 drivers/scsi/mvumi.c static void mvumi_restore_bar_addr(struct mvumi_hba *mhba) mhba 645 drivers/scsi/mvumi.c if (mhba->pci_base[i]) mhba 646 drivers/scsi/mvumi.c pci_write_config_dword(mhba->pdev, 0x10 + i * 4, mhba 647 drivers/scsi/mvumi.c mhba->pci_base[i]); mhba 666 drivers/scsi/mvumi.c static int mvumi_reset_host_9580(struct mvumi_hba *mhba) mhba 668 drivers/scsi/mvumi.c mhba->fw_state = FW_STATE_ABORT; mhba 670 drivers/scsi/mvumi.c iowrite32(0, mhba->regs->reset_enable); mhba 671 drivers/scsi/mvumi.c iowrite32(0xf, mhba->regs->reset_request); mhba 673 drivers/scsi/mvumi.c iowrite32(0x10, mhba->regs->reset_enable); mhba 674 drivers/scsi/mvumi.c iowrite32(0x10, mhba->regs->reset_request); mhba 676 drivers/scsi/mvumi.c pci_disable_device(mhba->pdev); mhba 678 drivers/scsi/mvumi.c if (pci_enable_device(mhba->pdev)) { mhba 679 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "enable device failed\n"); mhba 682 drivers/scsi/mvumi.c if (mvumi_pci_set_master(mhba->pdev)) { mhba 683 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "set master failed\n"); mhba 686 drivers/scsi/mvumi.c mvumi_restore_bar_addr(mhba); mhba 687 drivers/scsi/mvumi.c if (mvumi_wait_for_fw(mhba) == FAILED) mhba 690 drivers/scsi/mvumi.c return mvumi_wait_for_outstanding(mhba); mhba 693 drivers/scsi/mvumi.c static int mvumi_reset_host_9143(struct mvumi_hba *mhba) mhba 695 drivers/scsi/mvumi.c return mvumi_wait_for_outstanding(mhba); mhba 700 drivers/scsi/mvumi.c struct mvumi_hba *mhba; mhba 702 drivers/scsi/mvumi.c mhba = (struct mvumi_hba *) scmd->device->host->hostdata; mhba 707 drivers/scsi/mvumi.c return mhba->instancet->reset_host(mhba); mhba 710 drivers/scsi/mvumi.c static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, mhba 718 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 725 drivers/scsi/mvumi.c spin_lock_irqsave(mhba->shost->host_lock, flags); mhba 726 drivers/scsi/mvumi.c mhba->instancet->fire_cmd(mhba, cmd); mhba 727 drivers/scsi/mvumi.c spin_unlock_irqrestore(mhba->shost->host_lock, flags); mhba 729 drivers/scsi/mvumi.c wait_event_timeout(mhba->int_cmd_wait_q, mhba 735 drivers/scsi/mvumi.c spin_lock_irqsave(mhba->shost->host_lock, flags); mhba 737 drivers/scsi/mvumi.c if (mhba->tag_cmd[cmd->frame->tag]) { mhba 738 drivers/scsi/mvumi.c mhba->tag_cmd[cmd->frame->tag] = NULL; mhba 739 drivers/scsi/mvumi.c dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n", mhba 741 drivers/scsi/mvumi.c tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); mhba 744 drivers/scsi/mvumi.c dev_warn(&mhba->pdev->dev, mhba 748 drivers/scsi/mvumi.c atomic_dec(&mhba->fw_outstanding); mhba 750 drivers/scsi/mvumi.c spin_unlock_irqrestore(mhba->shost->host_lock, flags); mhba 755 drivers/scsi/mvumi.c static void mvumi_release_fw(struct mvumi_hba *mhba) mhba 757 drivers/scsi/mvumi.c mvumi_free_cmds(mhba); mhba 758 drivers/scsi/mvumi.c mvumi_release_mem_resource(mhba); mhba 759 drivers/scsi/mvumi.c mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); mhba 760 drivers/scsi/mvumi.c dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE, mhba 761 drivers/scsi/mvumi.c mhba->handshake_page, mhba->handshake_page_phys); mhba 762 drivers/scsi/mvumi.c kfree(mhba->regs); mhba 763 drivers/scsi/mvumi.c pci_release_regions(mhba->pdev); mhba 766 drivers/scsi/mvumi.c static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba) mhba 773 drivers/scsi/mvumi.c for (device_id = 0; device_id < mhba->max_target_id; device_id++) { mhba 774 drivers/scsi/mvumi.c if (!(mhba->target_map[device_id / bitcount] & mhba 777 drivers/scsi/mvumi.c get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0); mhba 780 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "failed to get memory" mhba 802 drivers/scsi/mvumi.c mvumi_issue_blocked_cmd(mhba, cmd); mhba 804 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 809 drivers/scsi/mvumi.c mvumi_delete_internal_cmd(mhba, cmd); mhba 830 drivers/scsi/mvumi.c static void mvumi_hs_build_page(struct mvumi_hba *mhba, mhba 845 drivers/scsi/mvumi.c if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) mhba 874 drivers/scsi/mvumi.c hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys); mhba 875 drivers/scsi/mvumi.c hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys); mhba 877 drivers/scsi/mvumi.c hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys); mhba 878 drivers/scsi/mvumi.c hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys); mhba 879 drivers/scsi/mvumi.c hs_page4->ib_entry_size = mhba->ib_max_size_setting; mhba 880 drivers/scsi/mvumi.c hs_page4->ob_entry_size = mhba->ob_max_size_setting; mhba 881 drivers/scsi/mvumi.c if (mhba->hba_capability mhba 884 drivers/scsi/mvumi.c &mhba->list_num_io, mhba 887 drivers/scsi/mvumi.c &mhba->list_num_io, mhba 890 drivers/scsi/mvumi.c hs_page4->ob_depth = (u8) mhba->list_num_io; mhba 891 drivers/scsi/mvumi.c hs_page4->ib_depth = (u8) mhba->list_num_io; mhba 898 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n", mhba 908 drivers/scsi/mvumi.c static int mvumi_init_data(struct mvumi_hba *mhba) mhba 916 drivers/scsi/mvumi.c if (mhba->fw_flag & MVUMI_FW_ALLOC) mhba 919 drivers/scsi/mvumi.c tmp_size = mhba->ib_max_size * mhba->max_io; mhba 920 drivers/scsi/mvumi.c if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) mhba 921 drivers/scsi/mvumi.c tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; mhba 923 drivers/scsi/mvumi.c tmp_size += 128 + mhba->ob_max_size * mhba->max_io; mhba 926 drivers/scsi/mvumi.c res_mgnt = mvumi_alloc_mem_resource(mhba, mhba 929 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 940 drivers/scsi/mvumi.c mhba->ib_list = v; mhba 941 drivers/scsi/mvumi.c mhba->ib_list_phys = p; mhba 942 drivers/scsi/mvumi.c if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { mhba 943 drivers/scsi/mvumi.c v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; mhba 944 drivers/scsi/mvumi.c p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; mhba 945 drivers/scsi/mvumi.c mhba->ib_frame = v; mhba 946 drivers/scsi/mvumi.c mhba->ib_frame_phys = p; mhba 948 drivers/scsi/mvumi.c v += mhba->ib_max_size * mhba->max_io; mhba 949 drivers/scsi/mvumi.c p += mhba->ib_max_size * mhba->max_io; mhba 955 drivers/scsi/mvumi.c mhba->ib_shadow = v; mhba 956 drivers/scsi/mvumi.c mhba->ib_shadow_phys = p; mhba 960 drivers/scsi/mvumi.c if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { mhba 964 drivers/scsi/mvumi.c mhba->ob_shadow = v; mhba 965 drivers/scsi/mvumi.c mhba->ob_shadow_phys = p; mhba 972 drivers/scsi/mvumi.c mhba->ob_shadow = v; mhba 973 drivers/scsi/mvumi.c mhba->ob_shadow_phys = p; mhba 983 drivers/scsi/mvumi.c mhba->ob_list = v; mhba 984 drivers/scsi/mvumi.c mhba->ob_list_phys = p; mhba 987 drivers/scsi/mvumi.c tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool)); mhba 990 drivers/scsi/mvumi.c res_mgnt = mvumi_alloc_mem_resource(mhba, mhba 993 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 999 drivers/scsi/mvumi.c for (i = mhba->max_io; i != 0; i--) { mhba 1001 drivers/scsi/mvumi.c list_add_tail(&ob_pool->list, &mhba->ob_data_list); mhba 1002 drivers/scsi/mvumi.c virmem += mhba->ob_max_size + sizeof(*ob_pool); mhba 1005 drivers/scsi/mvumi.c tmp_size = sizeof(unsigned short) * mhba->max_io + mhba 1006 drivers/scsi/mvumi.c sizeof(struct mvumi_cmd *) * mhba->max_io; mhba 1007 drivers/scsi/mvumi.c tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) / mhba 1010 drivers/scsi/mvumi.c res_mgnt = mvumi_alloc_mem_resource(mhba, mhba 1013 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 1019 drivers/scsi/mvumi.c mhba->tag_pool.stack = virmem; mhba 1020 drivers/scsi/mvumi.c mhba->tag_pool.size = mhba->max_io; mhba 1021 drivers/scsi/mvumi.c tag_init(&mhba->tag_pool, mhba->max_io); mhba 1022 drivers/scsi/mvumi.c virmem += sizeof(unsigned short) * mhba->max_io; mhba 1024 drivers/scsi/mvumi.c mhba->tag_cmd = virmem; mhba 1025 drivers/scsi/mvumi.c virmem += sizeof(struct mvumi_cmd *) * mhba->max_io; mhba 1027 drivers/scsi/mvumi.c mhba->target_map = virmem; mhba 1029 drivers/scsi/mvumi.c mhba->fw_flag |= MVUMI_FW_ALLOC; mhba 1033 drivers/scsi/mvumi.c mvumi_release_mem_resource(mhba); mhba 1037 drivers/scsi/mvumi.c static int mvumi_hs_process_page(struct mvumi_hba *mhba, mhba 1046 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "checksum error\n"); mhba 1054 drivers/scsi/mvumi.c mhba->max_io = hs_page1->max_io_support; mhba 1055 drivers/scsi/mvumi.c mhba->list_num_io = hs_page1->cl_inout_list_depth; mhba 1056 drivers/scsi/mvumi.c mhba->max_transfer_size = hs_page1->max_transfer_size; mhba 1057 drivers/scsi/mvumi.c mhba->max_target_id = hs_page1->max_devices_support; mhba 1058 drivers/scsi/mvumi.c mhba->hba_capability = hs_page1->capability; mhba 1059 drivers/scsi/mvumi.c mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size; mhba 1060 drivers/scsi/mvumi.c mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2; mhba 1062 drivers/scsi/mvumi.c mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size; mhba 1063 drivers/scsi/mvumi.c mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2; mhba 1065 drivers/scsi/mvumi.c dev_dbg(&mhba->pdev->dev, "FW version:%d\n", mhba 1068 drivers/scsi/mvumi.c if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) mhba 1069 drivers/scsi/mvumi.c mhba->eot_flag = 22; mhba 1071 drivers/scsi/mvumi.c mhba->eot_flag = 27; mhba 1072 drivers/scsi/mvumi.c if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) mhba 1073 drivers/scsi/mvumi.c mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth; mhba 1076 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "handshake: page code error\n"); mhba 1091 drivers/scsi/mvumi.c static int mvumi_handshake(struct mvumi_hba *mhba) mhba 1095 drivers/scsi/mvumi.c struct mvumi_hw_regs *regs = mhba->regs; mhba 1097 drivers/scsi/mvumi.c if (mhba->fw_state == FW_STATE_STARTING) mhba 1102 drivers/scsi/mvumi.c dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state); mhba 1104 drivers/scsi/mvumi.c mhba->fw_state = FW_STATE_STARTING; mhba 1112 drivers/scsi/mvumi.c mhba->fw_state = FW_STATE_HANDSHAKING; mhba 1121 drivers/scsi/mvumi.c iowrite32(lower_32_bits(mhba->handshake_page_phys), mhba 1123 drivers/scsi/mvumi.c iowrite32(upper_32_bits(mhba->handshake_page_phys), mhba 1134 drivers/scsi/mvumi.c hs_header = (struct mvumi_hs_header *) mhba->handshake_page; mhba 1136 drivers/scsi/mvumi.c mhba->hba_total_pages = mhba 1139 drivers/scsi/mvumi.c if (mhba->hba_total_pages == 0) mhba 1140 drivers/scsi/mvumi.c mhba->hba_total_pages = HS_PAGE_TOTAL-1; mhba 1144 drivers/scsi/mvumi.c if (mvumi_hs_process_page(mhba, hs_header)) { mhba 1148 drivers/scsi/mvumi.c if (mvumi_init_data(mhba)) { mhba 1154 drivers/scsi/mvumi.c mhba->hba_total_pages = HS_PAGE_TOTAL-1; mhba 1157 drivers/scsi/mvumi.c if ((hs_header->page_code + 1) <= mhba->hba_total_pages) { mhba 1160 drivers/scsi/mvumi.c mvumi_hs_build_page(mhba, hs_header); mhba 1177 drivers/scsi/mvumi.c iowrite32(mhba->list_num_io, mhba->ib_shadow); mhba 1179 drivers/scsi/mvumi.c iowrite32(lower_32_bits(mhba->ib_shadow_phys), mhba 1181 drivers/scsi/mvumi.c iowrite32(upper_32_bits(mhba->ib_shadow_phys), mhba 1184 drivers/scsi/mvumi.c if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) { mhba 1186 drivers/scsi/mvumi.c iowrite32((mhba->list_num_io-1) | mhba 1188 drivers/scsi/mvumi.c mhba->ob_shadow); mhba 1189 drivers/scsi/mvumi.c iowrite32(lower_32_bits(mhba->ob_shadow_phys), mhba 1191 drivers/scsi/mvumi.c iowrite32(upper_32_bits(mhba->ob_shadow_phys), mhba 1195 drivers/scsi/mvumi.c mhba->ib_cur_slot = (mhba->list_num_io - 1) | mhba 1197 drivers/scsi/mvumi.c mhba->ob_cur_slot = (mhba->list_num_io - 1) | mhba 1199 drivers/scsi/mvumi.c mhba->fw_state = FW_STATE_STARTED; mhba 1203 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n", mhba 1210 drivers/scsi/mvumi.c static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba) mhba 1216 drivers/scsi/mvumi.c mvumi_handshake(mhba); mhba 1218 drivers/scsi/mvumi.c isr_status = mhba->instancet->read_fw_status_reg(mhba); mhba 1220 drivers/scsi/mvumi.c if (mhba->fw_state == FW_STATE_STARTED) mhba 1223 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 1225 drivers/scsi/mvumi.c mhba->fw_state); mhba 1226 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 1228 drivers/scsi/mvumi.c mhba->global_isr, isr_status); mhba 1238 drivers/scsi/mvumi.c static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) mhba 1244 drivers/scsi/mvumi.c tmp = ioread32(mhba->regs->arm_to_pciea_msg1); mhba 1248 drivers/scsi/mvumi.c mhba->regs->pciea_to_arm_drbl_reg); mhba 1250 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 1256 drivers/scsi/mvumi.c tmp = ioread32(mhba->regs->arm_to_pciea_msg1); mhba 1259 drivers/scsi/mvumi.c mhba->fw_state = FW_STATE_STARTING; mhba 1260 drivers/scsi/mvumi.c dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n"); mhba 1262 drivers/scsi/mvumi.c if (mvumi_handshake_event(mhba)) { mhba 1263 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 1265 drivers/scsi/mvumi.c mhba->fw_state); mhba 1268 drivers/scsi/mvumi.c } while (mhba->fw_state != FW_STATE_STARTED); mhba 1270 drivers/scsi/mvumi.c dev_dbg(&mhba->pdev->dev, "firmware handshake done\n"); mhba 1275 drivers/scsi/mvumi.c static unsigned char mvumi_start(struct mvumi_hba *mhba) mhba 1278 drivers/scsi/mvumi.c struct mvumi_hw_regs *regs = mhba->regs; mhba 1288 drivers/scsi/mvumi.c if (mvumi_check_handshake(mhba)) mhba 1299 drivers/scsi/mvumi.c static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, mhba 1328 drivers/scsi/mvumi.c dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), mhba 1332 drivers/scsi/mvumi.c mvumi_return_cmd(mhba, cmd); mhba 1335 drivers/scsi/mvumi.c static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, mhba 1349 drivers/scsi/mvumi.c wake_up(&mhba->int_cmd_wait_q); mhba 1353 drivers/scsi/mvumi.c static void mvumi_show_event(struct mvumi_hba *mhba, mhba 1358 drivers/scsi/mvumi.c dev_warn(&mhba->pdev->dev, mhba 1379 drivers/scsi/mvumi.c static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status) mhba 1385 drivers/scsi/mvumi.c sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); mhba 1387 drivers/scsi/mvumi.c dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0, mhba 1393 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n", mhba 1396 drivers/scsi/mvumi.c sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); mhba 1398 drivers/scsi/mvumi.c scsi_add_device(mhba->shost, 0, devid, 0); mhba 1399 drivers/scsi/mvumi.c dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0, mhba 1403 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n", mhba 1411 drivers/scsi/mvumi.c static u64 mvumi_inquiry(struct mvumi_hba *mhba, mhba 1420 drivers/scsi/mvumi.c cmd = mvumi_create_internal_cmd(mhba, data_buf_len); mhba 1441 drivers/scsi/mvumi.c mvumi_issue_blocked_cmd(mhba, cmd); mhba 1444 drivers/scsi/mvumi.c if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) mhba 1450 drivers/scsi/mvumi.c dev_dbg(&mhba->pdev->dev, mhba 1456 drivers/scsi/mvumi.c mvumi_delete_internal_cmd(mhba, cmd); mhba 1461 drivers/scsi/mvumi.c static void mvumi_detach_devices(struct mvumi_hba *mhba) mhba 1466 drivers/scsi/mvumi.c mutex_lock(&mhba->device_lock); mhba 1470 drivers/scsi/mvumi.c &mhba->shost_dev_list, list) { mhba 1471 drivers/scsi/mvumi.c mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); mhba 1473 drivers/scsi/mvumi.c dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", mhba 1477 drivers/scsi/mvumi.c list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) { mhba 1479 drivers/scsi/mvumi.c dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", mhba 1485 drivers/scsi/mvumi.c if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) mhba 1486 drivers/scsi/mvumi.c sdev = scsi_device_lookup(mhba->shost, 0, mhba 1487 drivers/scsi/mvumi.c mhba->max_target_id - 1, 0); mhba 1494 drivers/scsi/mvumi.c mutex_unlock(&mhba->device_lock); mhba 1497 drivers/scsi/mvumi.c static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id) mhba 1501 drivers/scsi/mvumi.c sdev = scsi_device_lookup(mhba->shost, 0, id, 0); mhba 1508 drivers/scsi/mvumi.c static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid) mhba 1512 drivers/scsi/mvumi.c list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) { mhba 1515 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 1521 drivers/scsi/mvumi.c if (mhba->pdev->device == mhba 1523 drivers/scsi/mvumi.c mvumi_rescan_devices(mhba, id); mhba 1531 drivers/scsi/mvumi.c static void mvumi_remove_devices(struct mvumi_hba *mhba, int id) mhba 1536 drivers/scsi/mvumi.c &mhba->shost_dev_list, list) { mhba 1538 drivers/scsi/mvumi.c dev_dbg(&mhba->pdev->dev, mhba 1541 drivers/scsi/mvumi.c mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); mhba 1548 drivers/scsi/mvumi.c static int mvumi_probe_devices(struct mvumi_hba *mhba) mhba 1556 drivers/scsi/mvumi.c cmd = mvumi_create_internal_cmd(mhba, 64); mhba 1560 drivers/scsi/mvumi.c if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) mhba 1561 drivers/scsi/mvumi.c maxid = mhba->max_target_id; mhba 1563 drivers/scsi/mvumi.c maxid = mhba->max_target_id - 1; mhba 1566 drivers/scsi/mvumi.c wwid = mvumi_inquiry(mhba, id, cmd); mhba 1569 drivers/scsi/mvumi.c mvumi_remove_devices(mhba, id); mhba 1572 drivers/scsi/mvumi.c found = mvumi_match_devices(mhba, id, wwid); mhba 1574 drivers/scsi/mvumi.c mvumi_remove_devices(mhba, id); mhba 1578 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 1588 drivers/scsi/mvumi.c &mhba->mhba_dev_list); mhba 1589 drivers/scsi/mvumi.c dev_dbg(&mhba->pdev->dev, mhba 1600 drivers/scsi/mvumi.c mvumi_delete_internal_cmd(mhba, cmd); mhba 1608 drivers/scsi/mvumi.c struct mvumi_hba *mhba = (struct mvumi_hba *) data; mhba 1614 drivers/scsi/mvumi.c if (!atomic_read(&mhba->pnp_count)) mhba 1617 drivers/scsi/mvumi.c atomic_set(&mhba->pnp_count, 0); mhba 1620 drivers/scsi/mvumi.c mutex_lock(&mhba->device_lock); mhba 1621 drivers/scsi/mvumi.c ret = mvumi_probe_devices(mhba); mhba 1624 drivers/scsi/mvumi.c &mhba->mhba_dev_list, list) { mhba 1625 drivers/scsi/mvumi.c if (mvumi_handle_hotplug(mhba, mv_dev->id, mhba 1627 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 1636 drivers/scsi/mvumi.c &mhba->shost_dev_list); mhba 1640 drivers/scsi/mvumi.c mutex_unlock(&mhba->device_lock); mhba 1645 drivers/scsi/mvumi.c static void mvumi_proc_msg(struct mvumi_hba *mhba, mhba 1653 drivers/scsi/mvumi.c if (mhba->fw_flag & MVUMI_FW_ATTACH) { mhba 1658 drivers/scsi/mvumi.c mutex_lock(&mhba->sas_discovery_mutex); mhba 1663 drivers/scsi/mvumi.c mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE); mhba 1671 drivers/scsi/mvumi.c mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE); mhba 1673 drivers/scsi/mvumi.c mutex_unlock(&mhba->sas_discovery_mutex); mhba 1677 drivers/scsi/mvumi.c static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) mhba 1685 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger" mhba 1692 drivers/scsi/mvumi.c mvumi_show_event(mhba, param); mhba 1695 drivers/scsi/mvumi.c mvumi_proc_msg(mhba, buffer); mhba 1699 drivers/scsi/mvumi.c static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg) mhba 1704 drivers/scsi/mvumi.c cmd = mvumi_create_internal_cmd(mhba, 512); mhba 1719 drivers/scsi/mvumi.c mvumi_issue_blocked_cmd(mhba, cmd); mhba 1722 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n", mhba 1725 drivers/scsi/mvumi.c mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf); mhba 1727 drivers/scsi/mvumi.c mvumi_delete_internal_cmd(mhba, cmd); mhba 1736 drivers/scsi/mvumi.c mvumi_get_event(mu_ev->mhba, mu_ev->event); mhba 1740 drivers/scsi/mvumi.c static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status) mhba 1746 drivers/scsi/mvumi.c atomic_inc(&mhba->pnp_count); mhba 1747 drivers/scsi/mvumi.c wake_up_process(mhba->dm_thread); mhba 1755 drivers/scsi/mvumi.c mu_ev->mhba = mhba; mhba 1764 drivers/scsi/mvumi.c static void mvumi_handle_clob(struct mvumi_hba *mhba) mhba 1770 drivers/scsi/mvumi.c while (!list_empty(&mhba->free_ob_list)) { mhba 1771 drivers/scsi/mvumi.c pool = list_first_entry(&mhba->free_ob_list, mhba 1774 drivers/scsi/mvumi.c list_add_tail(&pool->list, &mhba->ob_data_list); mhba 1777 drivers/scsi/mvumi.c cmd = mhba->tag_cmd[ob_frame->tag]; mhba 1779 drivers/scsi/mvumi.c atomic_dec(&mhba->fw_outstanding); mhba 1780 drivers/scsi/mvumi.c mhba->tag_cmd[ob_frame->tag] = NULL; mhba 1781 drivers/scsi/mvumi.c tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag); mhba 1783 drivers/scsi/mvumi.c mvumi_complete_cmd(mhba, cmd, ob_frame); mhba 1785 drivers/scsi/mvumi.c mvumi_complete_internal_cmd(mhba, cmd, ob_frame); mhba 1787 drivers/scsi/mvumi.c mhba->instancet->fire_cmd(mhba, NULL); mhba 1792 drivers/scsi/mvumi.c struct mvumi_hba *mhba = (struct mvumi_hba *) devp; mhba 1795 drivers/scsi/mvumi.c spin_lock_irqsave(mhba->shost->host_lock, flags); mhba 1796 drivers/scsi/mvumi.c if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) { mhba 1797 drivers/scsi/mvumi.c spin_unlock_irqrestore(mhba->shost->host_lock, flags); mhba 1801 drivers/scsi/mvumi.c if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) { mhba 1802 drivers/scsi/mvumi.c if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) mhba 1803 drivers/scsi/mvumi.c mvumi_launch_events(mhba, mhba->isr_status); mhba 1804 drivers/scsi/mvumi.c if (mhba->isr_status & DRBL_HANDSHAKE_ISR) { mhba 1805 drivers/scsi/mvumi.c dev_warn(&mhba->pdev->dev, "enter handshake again!\n"); mhba 1806 drivers/scsi/mvumi.c mvumi_handshake(mhba); mhba 1811 drivers/scsi/mvumi.c if (mhba->global_isr & mhba->regs->int_comaout) mhba 1812 drivers/scsi/mvumi.c mvumi_receive_ob_list_entry(mhba); mhba 1814 drivers/scsi/mvumi.c mhba->global_isr = 0; mhba 1815 drivers/scsi/mvumi.c mhba->isr_status = 0; mhba 1816 drivers/scsi/mvumi.c if (mhba->fw_state == FW_STATE_STARTED) mhba 1817 drivers/scsi/mvumi.c mvumi_handle_clob(mhba); mhba 1818 drivers/scsi/mvumi.c spin_unlock_irqrestore(mhba->shost->host_lock, flags); mhba 1822 drivers/scsi/mvumi.c static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, mhba 1830 drivers/scsi/mvumi.c if (unlikely(mhba->fw_state != FW_STATE_STARTED)) { mhba 1831 drivers/scsi/mvumi.c dev_dbg(&mhba->pdev->dev, "firmware not ready.\n"); mhba 1834 drivers/scsi/mvumi.c if (tag_is_empty(&mhba->tag_pool)) { mhba 1835 drivers/scsi/mvumi.c dev_dbg(&mhba->pdev->dev, "no free tag.\n"); mhba 1838 drivers/scsi/mvumi.c mvumi_get_ib_list_entry(mhba, &ib_entry); mhba 1840 drivers/scsi/mvumi.c cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool); mhba 1841 drivers/scsi/mvumi.c cmd->frame->request_id = mhba->io_seq++; mhba 1843 drivers/scsi/mvumi.c mhba->tag_cmd[cmd->frame->tag] = cmd; mhba 1846 drivers/scsi/mvumi.c if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { mhba 1860 drivers/scsi/mvumi.c static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) mhba 1867 drivers/scsi/mvumi.c list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list); mhba 1868 drivers/scsi/mvumi.c count = mhba->instancet->check_ib_list(mhba); mhba 1869 drivers/scsi/mvumi.c if (list_empty(&mhba->waiting_req_list) || !count) mhba 1873 drivers/scsi/mvumi.c cmd = list_first_entry(&mhba->waiting_req_list, mhba 1876 drivers/scsi/mvumi.c result = mvumi_send_command(mhba, cmd); mhba 1882 drivers/scsi/mvumi.c list_add(&cmd->queue_pointer, &mhba->waiting_req_list); mhba 1884 drivers/scsi/mvumi.c mvumi_send_ib_list_entry(mhba); mhba 1888 drivers/scsi/mvumi.c } while (!list_empty(&mhba->waiting_req_list) && count--); mhba 1891 drivers/scsi/mvumi.c mvumi_send_ib_list_entry(mhba); mhba 1898 drivers/scsi/mvumi.c static void mvumi_enable_intr(struct mvumi_hba *mhba) mhba 1901 drivers/scsi/mvumi.c struct mvumi_hw_regs *regs = mhba->regs; mhba 1913 drivers/scsi/mvumi.c static void mvumi_disable_intr(struct mvumi_hba *mhba) mhba 1916 drivers/scsi/mvumi.c struct mvumi_hw_regs *regs = mhba->regs; mhba 1927 drivers/scsi/mvumi.c struct mvumi_hba *mhba = (struct mvumi_hba *) extend; mhba 1929 drivers/scsi/mvumi.c struct mvumi_hw_regs *regs = mhba->regs; mhba 1936 drivers/scsi/mvumi.c if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { mhba 1947 drivers/scsi/mvumi.c status ^= mhba->regs->int_comaerr; mhba 1961 drivers/scsi/mvumi.c mhba->global_isr = status; mhba 1962 drivers/scsi/mvumi.c mhba->isr_status = isr_status; mhba 1971 drivers/scsi/mvumi.c static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba) mhba 1975 drivers/scsi/mvumi.c status = ioread32(mhba->regs->arm_to_pciea_drbl_reg); mhba 1977 drivers/scsi/mvumi.c iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg); mhba 2005 drivers/scsi/mvumi.c struct mvumi_hba *mhba; mhba 2008 drivers/scsi/mvumi.c mhba = (struct mvumi_hba *) sdev->host->hostdata; mhba 2009 drivers/scsi/mvumi.c if (sdev->id >= mhba->max_target_id) mhba 2012 drivers/scsi/mvumi.c mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount)); mhba 2025 drivers/scsi/mvumi.c static unsigned char mvumi_build_frame(struct mvumi_hba *mhba, mhba 2049 drivers/scsi/mvumi.c dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] " mhba 2058 drivers/scsi/mvumi.c if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0], mhba 2086 drivers/scsi/mvumi.c struct mvumi_hba *mhba; mhba 2091 drivers/scsi/mvumi.c mhba = (struct mvumi_hba *) shost->hostdata; mhba 2093 drivers/scsi/mvumi.c cmd = mvumi_get_cmd(mhba); mhba 2099 drivers/scsi/mvumi.c if (unlikely(mvumi_build_frame(mhba, scmd, cmd))) mhba 2104 drivers/scsi/mvumi.c mhba->instancet->fire_cmd(mhba, cmd); mhba 2109 drivers/scsi/mvumi.c mvumi_return_cmd(mhba, cmd); mhba 2119 drivers/scsi/mvumi.c struct mvumi_hba *mhba = shost_priv(host); mhba 2122 drivers/scsi/mvumi.c spin_lock_irqsave(mhba->shost->host_lock, flags); mhba 2124 drivers/scsi/mvumi.c if (mhba->tag_cmd[cmd->frame->tag]) { mhba 2125 drivers/scsi/mvumi.c mhba->tag_cmd[cmd->frame->tag] = NULL; mhba 2126 drivers/scsi/mvumi.c tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); mhba 2131 drivers/scsi/mvumi.c atomic_dec(&mhba->fw_outstanding); mhba 2136 drivers/scsi/mvumi.c dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), mhba 2140 drivers/scsi/mvumi.c mvumi_return_cmd(mhba, cmd); mhba 2141 drivers/scsi/mvumi.c spin_unlock_irqrestore(mhba->shost->host_lock, flags); mhba 2187 drivers/scsi/mvumi.c static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba) mhba 2192 drivers/scsi/mvumi.c switch (mhba->pdev->device) { mhba 2194 drivers/scsi/mvumi.c mhba->mmio = mhba->base_addr[0]; mhba 2195 drivers/scsi/mvumi.c base = mhba->mmio; mhba 2196 drivers/scsi/mvumi.c if (!mhba->regs) { mhba 2197 drivers/scsi/mvumi.c mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); mhba 2198 drivers/scsi/mvumi.c if (mhba->regs == NULL) mhba 2201 drivers/scsi/mvumi.c regs = mhba->regs; mhba 2246 drivers/scsi/mvumi.c mhba->mmio = mhba->base_addr[2]; mhba 2247 drivers/scsi/mvumi.c base = mhba->mmio; mhba 2248 drivers/scsi/mvumi.c if (!mhba->regs) { mhba 2249 drivers/scsi/mvumi.c mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); mhba 2250 drivers/scsi/mvumi.c if (mhba->regs == NULL) mhba 2253 drivers/scsi/mvumi.c regs = mhba->regs; mhba 2311 drivers/scsi/mvumi.c static int mvumi_init_fw(struct mvumi_hba *mhba) mhba 2315 drivers/scsi/mvumi.c if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) { mhba 2316 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "IO memory region busy!\n"); mhba 2319 drivers/scsi/mvumi.c ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); mhba 2323 drivers/scsi/mvumi.c switch (mhba->pdev->device) { mhba 2325 drivers/scsi/mvumi.c mhba->instancet = &mvumi_instance_9143; mhba 2326 drivers/scsi/mvumi.c mhba->io_seq = 0; mhba 2327 drivers/scsi/mvumi.c mhba->max_sge = MVUMI_MAX_SG_ENTRY; mhba 2328 drivers/scsi/mvumi.c mhba->request_id_enabled = 1; mhba 2331 drivers/scsi/mvumi.c mhba->instancet = &mvumi_instance_9580; mhba 2332 drivers/scsi/mvumi.c mhba->io_seq = 0; mhba 2333 drivers/scsi/mvumi.c mhba->max_sge = MVUMI_MAX_SG_ENTRY; mhba 2336 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n", mhba 2337 drivers/scsi/mvumi.c mhba->pdev->device); mhba 2338 drivers/scsi/mvumi.c mhba->instancet = NULL; mhba 2342 drivers/scsi/mvumi.c dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n", mhba 2343 drivers/scsi/mvumi.c mhba->pdev->device); mhba 2344 drivers/scsi/mvumi.c ret = mvumi_cfg_hw_reg(mhba); mhba 2346 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 2351 drivers/scsi/mvumi.c mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev, mhba 2352 drivers/scsi/mvumi.c HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL); mhba 2353 drivers/scsi/mvumi.c if (!mhba->handshake_page) { mhba 2354 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 2360 drivers/scsi/mvumi.c if (mvumi_start(mhba)) { mhba 2364 drivers/scsi/mvumi.c ret = mvumi_alloc_cmds(mhba); mhba 2371 drivers/scsi/mvumi.c mvumi_release_mem_resource(mhba); mhba 2372 drivers/scsi/mvumi.c dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE, mhba 2373 drivers/scsi/mvumi.c mhba->handshake_page, mhba->handshake_page_phys); mhba 2375 drivers/scsi/mvumi.c kfree(mhba->regs); mhba 2377 drivers/scsi/mvumi.c mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); mhba 2379 drivers/scsi/mvumi.c pci_release_regions(mhba->pdev); mhba 2388 drivers/scsi/mvumi.c static int mvumi_io_attach(struct mvumi_hba *mhba) mhba 2390 drivers/scsi/mvumi.c struct Scsi_Host *host = mhba->shost; mhba 2393 drivers/scsi/mvumi.c unsigned int max_sg = (mhba->ib_max_size + 4 - mhba 2396 drivers/scsi/mvumi.c host->irq = mhba->pdev->irq; mhba 2397 drivers/scsi/mvumi.c host->unique_id = mhba->unique_id; mhba 2398 drivers/scsi/mvumi.c host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; mhba 2399 drivers/scsi/mvumi.c host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge; mhba 2400 drivers/scsi/mvumi.c host->max_sectors = mhba->max_transfer_size / 512; mhba 2401 drivers/scsi/mvumi.c host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; mhba 2402 drivers/scsi/mvumi.c host->max_id = mhba->max_target_id; mhba 2405 drivers/scsi/mvumi.c ret = scsi_add_host(host, &mhba->pdev->dev); mhba 2407 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "scsi_add_host failed\n"); mhba 2410 drivers/scsi/mvumi.c mhba->fw_flag |= MVUMI_FW_ATTACH; mhba 2412 drivers/scsi/mvumi.c mutex_lock(&mhba->sas_discovery_mutex); mhba 2413 drivers/scsi/mvumi.c if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) mhba 2414 drivers/scsi/mvumi.c ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0); mhba 2418 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, "add virtual device failed\n"); mhba 2419 drivers/scsi/mvumi.c mutex_unlock(&mhba->sas_discovery_mutex); mhba 2423 drivers/scsi/mvumi.c mhba->dm_thread = kthread_create(mvumi_rescan_bus, mhba 2424 drivers/scsi/mvumi.c mhba, "mvumi_scanthread"); mhba 2425 drivers/scsi/mvumi.c if (IS_ERR(mhba->dm_thread)) { mhba 2426 drivers/scsi/mvumi.c dev_err(&mhba->pdev->dev, mhba 2428 drivers/scsi/mvumi.c mutex_unlock(&mhba->sas_discovery_mutex); mhba 2431 drivers/scsi/mvumi.c atomic_set(&mhba->pnp_count, 1); mhba 2432 drivers/scsi/mvumi.c wake_up_process(mhba->dm_thread); mhba 2434 drivers/scsi/mvumi.c mutex_unlock(&mhba->sas_discovery_mutex); mhba 2438 drivers/scsi/mvumi.c if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) mhba 2439 drivers/scsi/mvumi.c sdev = scsi_device_lookup(mhba->shost, 0, mhba 2440 drivers/scsi/mvumi.c mhba->max_target_id - 1, 0); mhba 2446 drivers/scsi/mvumi.c scsi_remove_host(mhba->shost); mhba 2458 drivers/scsi/mvumi.c struct mvumi_hba *mhba; mhba 2473 drivers/scsi/mvumi.c host = scsi_host_alloc(&mvumi_template, sizeof(*mhba)); mhba 2479 drivers/scsi/mvumi.c mhba = shost_priv(host); mhba 2481 drivers/scsi/mvumi.c INIT_LIST_HEAD(&mhba->cmd_pool); mhba 2482 drivers/scsi/mvumi.c INIT_LIST_HEAD(&mhba->ob_data_list); mhba 2483 drivers/scsi/mvumi.c INIT_LIST_HEAD(&mhba->free_ob_list); mhba 2484 drivers/scsi/mvumi.c INIT_LIST_HEAD(&mhba->res_list); mhba 2485 drivers/scsi/mvumi.c INIT_LIST_HEAD(&mhba->waiting_req_list); mhba 2486 drivers/scsi/mvumi.c mutex_init(&mhba->device_lock); mhba 2487 drivers/scsi/mvumi.c INIT_LIST_HEAD(&mhba->mhba_dev_list); mhba 2488 drivers/scsi/mvumi.c INIT_LIST_HEAD(&mhba->shost_dev_list); mhba 2489 drivers/scsi/mvumi.c atomic_set(&mhba->fw_outstanding, 0); mhba 2490 drivers/scsi/mvumi.c init_waitqueue_head(&mhba->int_cmd_wait_q); mhba 2491 drivers/scsi/mvumi.c mutex_init(&mhba->sas_discovery_mutex); mhba 2493 drivers/scsi/mvumi.c mhba->pdev = pdev; mhba 2494 drivers/scsi/mvumi.c mhba->shost = host; mhba 2495 drivers/scsi/mvumi.c mhba->unique_id = pdev->bus->number << 8 | pdev->devfn; mhba 2497 drivers/scsi/mvumi.c ret = mvumi_init_fw(mhba); mhba 2501 drivers/scsi/mvumi.c ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED, mhba 2502 drivers/scsi/mvumi.c "mvumi", mhba); mhba 2508 drivers/scsi/mvumi.c mhba->instancet->enable_intr(mhba); mhba 2509 drivers/scsi/mvumi.c pci_set_drvdata(pdev, mhba); mhba 2511 drivers/scsi/mvumi.c ret = mvumi_io_attach(mhba); mhba 2515 drivers/scsi/mvumi.c mvumi_backup_bar_addr(mhba); mhba 2521 drivers/scsi/mvumi.c mhba->instancet->disable_intr(mhba); mhba 2522 drivers/scsi/mvumi.c free_irq(mhba->pdev->irq, mhba); mhba 2524 drivers/scsi/mvumi.c mvumi_release_fw(mhba); mhba 2538 drivers/scsi/mvumi.c struct mvumi_hba *mhba; mhba 2540 drivers/scsi/mvumi.c mhba = pci_get_drvdata(pdev); mhba 2541 drivers/scsi/mvumi.c if (mhba->dm_thread) { mhba 2542 drivers/scsi/mvumi.c kthread_stop(mhba->dm_thread); mhba 2543 drivers/scsi/mvumi.c mhba->dm_thread = NULL; mhba 2546 drivers/scsi/mvumi.c mvumi_detach_devices(mhba); mhba 2547 drivers/scsi/mvumi.c host = mhba->shost; mhba 2548 drivers/scsi/mvumi.c scsi_remove_host(mhba->shost); mhba 2549 drivers/scsi/mvumi.c mvumi_flush_cache(mhba); mhba 2551 drivers/scsi/mvumi.c mhba->instancet->disable_intr(mhba); mhba 2552 drivers/scsi/mvumi.c free_irq(mhba->pdev->irq, mhba); mhba 2553 drivers/scsi/mvumi.c mvumi_release_fw(mhba); mhba 2565 drivers/scsi/mvumi.c struct mvumi_hba *mhba = pci_get_drvdata(pdev); mhba 2567 drivers/scsi/mvumi.c mvumi_flush_cache(mhba); mhba 2572 drivers/scsi/mvumi.c struct mvumi_hba *mhba = NULL; mhba 2574 drivers/scsi/mvumi.c mhba = pci_get_drvdata(pdev); mhba 2575 drivers/scsi/mvumi.c mvumi_flush_cache(mhba); mhba 2577 drivers/scsi/mvumi.c pci_set_drvdata(pdev, mhba); mhba 2578 drivers/scsi/mvumi.c mhba->instancet->disable_intr(mhba); mhba 2579 drivers/scsi/mvumi.c free_irq(mhba->pdev->irq, mhba); mhba 2580 drivers/scsi/mvumi.c mvumi_unmap_pci_addr(pdev, mhba->base_addr); mhba 2592 drivers/scsi/mvumi.c struct mvumi_hba *mhba = NULL; mhba 2594 drivers/scsi/mvumi.c mhba = pci_get_drvdata(pdev); mhba 2610 drivers/scsi/mvumi.c ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME); mhba 2613 drivers/scsi/mvumi.c ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); mhba 2617 drivers/scsi/mvumi.c if (mvumi_cfg_hw_reg(mhba)) { mhba 2622 drivers/scsi/mvumi.c mhba->mmio = mhba->base_addr[0]; mhba 2623 drivers/scsi/mvumi.c mvumi_reset(mhba); mhba 2625 drivers/scsi/mvumi.c if (mvumi_start(mhba)) { mhba 2630 drivers/scsi/mvumi.c ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED, mhba 2631 drivers/scsi/mvumi.c "mvumi", mhba); mhba 2636 drivers/scsi/mvumi.c mhba->instancet->enable_intr(mhba); mhba 2641 drivers/scsi/mvumi.c mvumi_unmap_pci_addr(pdev, mhba->base_addr); mhba 157 drivers/scsi/mvumi.h struct mvumi_hba *mhba;