Lines Matching refs:mhba
68 static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st) in tag_get_one() argument
74 static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st, in tag_release_one() argument
121 static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, in mvumi_alloc_mem_resource() argument
127 dev_err(&mhba->pdev->dev, in mvumi_alloc_mem_resource()
136 dev_err(&mhba->pdev->dev, in mvumi_alloc_mem_resource()
145 res->virt_addr = pci_zalloc_consistent(mhba->pdev, size, in mvumi_alloc_mem_resource()
148 dev_err(&mhba->pdev->dev, in mvumi_alloc_mem_resource()
157 dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type); in mvumi_alloc_mem_resource()
165 list_add_tail(&res->entry, &mhba->res_list); in mvumi_alloc_mem_resource()
170 static void mvumi_release_mem_resource(struct mvumi_hba *mhba) in mvumi_release_mem_resource() argument
174 list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) { in mvumi_release_mem_resource()
177 pci_free_consistent(mhba->pdev, res->size, in mvumi_release_mem_resource()
184 dev_err(&mhba->pdev->dev, in mvumi_release_mem_resource()
191 mhba->fw_flag &= ~MVUMI_FW_ALLOC; in mvumi_release_mem_resource()
203 static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, in mvumi_make_sgl() argument
214 *sg_count = pci_map_sg(mhba->pdev, sg, sgnum, in mvumi_make_sgl()
216 if (*sg_count > mhba->max_sge) { in mvumi_make_sgl()
217 dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger " in mvumi_make_sgl()
219 *sg_count, mhba->max_sge); in mvumi_make_sgl()
227 sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i]))); in mvumi_make_sgl()
229 m_sg->flags |= 1U << mhba->eot_flag; in mvumi_make_sgl()
231 sgd_inc(mhba, m_sg); in mvumi_make_sgl()
235 pci_map_single(mhba->pdev, scsi_sglist(scmd), in mvumi_make_sgl()
242 m_sg->flags = 1U << mhba->eot_flag; in mvumi_make_sgl()
243 sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd))); in mvumi_make_sgl()
250 static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, in mvumi_internal_cmd_sgl() argument
260 virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr); in mvumi_internal_cmd_sgl()
270 m_sg->flags = 1U << mhba->eot_flag; in mvumi_internal_cmd_sgl()
271 sgd_setsz(mhba, m_sg, cpu_to_le32(size)); in mvumi_internal_cmd_sgl()
276 static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba, in mvumi_create_internal_cmd() argument
283 dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n"); in mvumi_create_internal_cmd()
288 cmd->frame = pci_alloc_consistent(mhba->pdev, in mvumi_create_internal_cmd()
289 mhba->ib_max_size, &cmd->frame_phys); in mvumi_create_internal_cmd()
291 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" in mvumi_create_internal_cmd()
292 " frame,size = %d.\n", mhba->ib_max_size); in mvumi_create_internal_cmd()
298 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { in mvumi_create_internal_cmd()
299 dev_err(&mhba->pdev->dev, "failed to allocate memory" in mvumi_create_internal_cmd()
301 pci_free_consistent(mhba->pdev, mhba->ib_max_size, in mvumi_create_internal_cmd()
312 static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba, in mvumi_delete_internal_cmd() argument
322 sgd_getsz(mhba, m_sg, size); in mvumi_delete_internal_cmd()
327 pci_free_consistent(mhba->pdev, size, cmd->data_buf, in mvumi_delete_internal_cmd()
330 pci_free_consistent(mhba->pdev, mhba->ib_max_size, in mvumi_delete_internal_cmd()
342 static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba) in mvumi_get_cmd() argument
346 if (likely(!list_empty(&mhba->cmd_pool))) { in mvumi_get_cmd()
347 cmd = list_entry((&mhba->cmd_pool)->next, in mvumi_get_cmd()
351 dev_warn(&mhba->pdev->dev, "command pool is empty!\n"); in mvumi_get_cmd()
361 static inline void mvumi_return_cmd(struct mvumi_hba *mhba, in mvumi_return_cmd() argument
365 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); in mvumi_return_cmd()
372 static void mvumi_free_cmds(struct mvumi_hba *mhba) in mvumi_free_cmds() argument
376 while (!list_empty(&mhba->cmd_pool)) { in mvumi_free_cmds()
377 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, in mvumi_free_cmds()
380 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) in mvumi_free_cmds()
391 static int mvumi_alloc_cmds(struct mvumi_hba *mhba) in mvumi_alloc_cmds() argument
396 for (i = 0; i < mhba->max_io; i++) { in mvumi_alloc_cmds()
402 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); in mvumi_alloc_cmds()
403 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { in mvumi_alloc_cmds()
404 cmd->frame = mhba->ib_frame + i * mhba->ib_max_size; in mvumi_alloc_cmds()
405 cmd->frame_phys = mhba->ib_frame_phys in mvumi_alloc_cmds()
406 + i * mhba->ib_max_size; in mvumi_alloc_cmds()
408 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); in mvumi_alloc_cmds()
415 dev_err(&mhba->pdev->dev, in mvumi_alloc_cmds()
417 while (!list_empty(&mhba->cmd_pool)) { in mvumi_alloc_cmds()
418 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, in mvumi_alloc_cmds()
421 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) in mvumi_alloc_cmds()
428 static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba) in mvumi_check_ib_list_9143() argument
431 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_check_ib_list_9143()
433 ib_rp_reg = ioread32(mhba->regs->inb_read_pointer); in mvumi_check_ib_list_9143()
436 (mhba->ib_cur_slot & regs->cl_slot_num_mask)) && in mvumi_check_ib_list_9143()
438 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) { in mvumi_check_ib_list_9143()
439 dev_warn(&mhba->pdev->dev, "no free slot to use.\n"); in mvumi_check_ib_list_9143()
442 if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) { in mvumi_check_ib_list_9143()
443 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n"); in mvumi_check_ib_list_9143()
446 return mhba->max_io - atomic_read(&mhba->fw_outstanding); in mvumi_check_ib_list_9143()
450 static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba) in mvumi_check_ib_list_9580() argument
453 if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1)) in mvumi_check_ib_list_9580()
455 count = ioread32(mhba->ib_shadow); in mvumi_check_ib_list_9580()
461 static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry) in mvumi_get_ib_list_entry() argument
465 cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask; in mvumi_get_ib_list_entry()
467 if (cur_ib_entry >= mhba->list_num_io) { in mvumi_get_ib_list_entry()
468 cur_ib_entry -= mhba->list_num_io; in mvumi_get_ib_list_entry()
469 mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle; in mvumi_get_ib_list_entry()
471 mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask; in mvumi_get_ib_list_entry()
472 mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask); in mvumi_get_ib_list_entry()
473 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { in mvumi_get_ib_list_entry()
474 *ib_entry = mhba->ib_list + cur_ib_entry * in mvumi_get_ib_list_entry()
477 *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size; in mvumi_get_ib_list_entry()
479 atomic_inc(&mhba->fw_outstanding); in mvumi_get_ib_list_entry()
482 static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba) in mvumi_send_ib_list_entry() argument
484 iowrite32(0xffff, mhba->ib_shadow); in mvumi_send_ib_list_entry()
485 iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer); in mvumi_send_ib_list_entry()
488 static char mvumi_check_ob_frame(struct mvumi_hba *mhba, in mvumi_check_ob_frame() argument
494 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; in mvumi_check_ob_frame()
497 if (tag > mhba->tag_pool.size) { in mvumi_check_ob_frame()
498 dev_err(&mhba->pdev->dev, "ob frame data error\n"); in mvumi_check_ob_frame()
501 if (mhba->tag_cmd[tag] == NULL) { in mvumi_check_ob_frame()
502 dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag); in mvumi_check_ob_frame()
504 } else if (mhba->tag_cmd[tag]->request_id != request_id && in mvumi_check_ob_frame()
505 mhba->request_id_enabled) { in mvumi_check_ob_frame()
506 dev_err(&mhba->pdev->dev, "request ID from FW:0x%x," in mvumi_check_ob_frame()
508 mhba->tag_cmd[tag]->request_id); in mvumi_check_ob_frame()
515 static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba, in mvumi_check_ob_list_9143() argument
519 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_check_ob_list_9143()
523 ob_write_shadow = ioread32(mhba->ob_shadow); in mvumi_check_ob_list_9143()
526 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; in mvumi_check_ob_list_9143()
527 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; in mvumi_check_ob_list_9143()
530 (mhba->ob_cur_slot & regs->cl_pointer_toggle)) { in mvumi_check_ob_list_9143()
531 *assign_obf_end += mhba->list_num_io; in mvumi_check_ob_list_9143()
536 static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba, in mvumi_check_ob_list_9580() argument
540 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_check_ob_list_9580()
544 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; in mvumi_check_ob_list_9580()
545 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; in mvumi_check_ob_list_9580()
547 *assign_obf_end += mhba->list_num_io; in mvumi_check_ob_list_9580()
553 static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) in mvumi_receive_ob_list_entry() argument
558 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_receive_ob_list_entry()
560 if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end)) in mvumi_receive_ob_list_entry()
565 if (cur_obf >= mhba->list_num_io) { in mvumi_receive_ob_list_entry()
566 cur_obf -= mhba->list_num_io; in mvumi_receive_ob_list_entry()
567 mhba->ob_cur_slot ^= regs->cl_pointer_toggle; in mvumi_receive_ob_list_entry()
570 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; in mvumi_receive_ob_list_entry()
575 if (unlikely(p_outb_frame->tag > mhba->tag_pool.size || in mvumi_receive_ob_list_entry()
576 mhba->tag_cmd[p_outb_frame->tag] == NULL || in mvumi_receive_ob_list_entry()
578 mhba->tag_cmd[p_outb_frame->tag]->request_id)) in mvumi_receive_ob_list_entry()
579 if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame)) in mvumi_receive_ob_list_entry()
582 if (!list_empty(&mhba->ob_data_list)) { in mvumi_receive_ob_list_entry()
584 list_first_entry(&mhba->ob_data_list, in mvumi_receive_ob_list_entry()
590 cur_obf = mhba->list_num_io - 1; in mvumi_receive_ob_list_entry()
591 mhba->ob_cur_slot ^= regs->cl_pointer_toggle; in mvumi_receive_ob_list_entry()
597 memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size); in mvumi_receive_ob_list_entry()
600 list_add_tail(&ob_data->list, &mhba->free_ob_list); in mvumi_receive_ob_list_entry()
602 mhba->ob_cur_slot &= ~regs->cl_slot_num_mask; in mvumi_receive_ob_list_entry()
603 mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask); in mvumi_receive_ob_list_entry()
604 iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer); in mvumi_receive_ob_list_entry()
607 static void mvumi_reset(struct mvumi_hba *mhba) in mvumi_reset() argument
609 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_reset()
618 static unsigned char mvumi_start(struct mvumi_hba *mhba);
620 static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) in mvumi_wait_for_outstanding() argument
622 mhba->fw_state = FW_STATE_ABORT; in mvumi_wait_for_outstanding()
623 mvumi_reset(mhba); in mvumi_wait_for_outstanding()
625 if (mvumi_start(mhba)) in mvumi_wait_for_outstanding()
631 static int mvumi_wait_for_fw(struct mvumi_hba *mhba) in mvumi_wait_for_fw() argument
633 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_wait_for_fw()
643 dev_err(&mhba->pdev->dev, in mvumi_wait_for_fw()
656 static void mvumi_backup_bar_addr(struct mvumi_hba *mhba) in mvumi_backup_bar_addr() argument
661 pci_read_config_dword(mhba->pdev, 0x10 + i * 4, in mvumi_backup_bar_addr()
662 &mhba->pci_base[i]); in mvumi_backup_bar_addr()
666 static void mvumi_restore_bar_addr(struct mvumi_hba *mhba) in mvumi_restore_bar_addr() argument
671 if (mhba->pci_base[i]) in mvumi_restore_bar_addr()
672 pci_write_config_dword(mhba->pdev, 0x10 + i * 4, in mvumi_restore_bar_addr()
673 mhba->pci_base[i]); in mvumi_restore_bar_addr()
691 static int mvumi_reset_host_9580(struct mvumi_hba *mhba) in mvumi_reset_host_9580() argument
693 mhba->fw_state = FW_STATE_ABORT; in mvumi_reset_host_9580()
695 iowrite32(0, mhba->regs->reset_enable); in mvumi_reset_host_9580()
696 iowrite32(0xf, mhba->regs->reset_request); in mvumi_reset_host_9580()
698 iowrite32(0x10, mhba->regs->reset_enable); in mvumi_reset_host_9580()
699 iowrite32(0x10, mhba->regs->reset_request); in mvumi_reset_host_9580()
701 pci_disable_device(mhba->pdev); in mvumi_reset_host_9580()
703 if (pci_enable_device(mhba->pdev)) { in mvumi_reset_host_9580()
704 dev_err(&mhba->pdev->dev, "enable device failed\n"); in mvumi_reset_host_9580()
707 if (mvumi_pci_set_master(mhba->pdev)) { in mvumi_reset_host_9580()
708 dev_err(&mhba->pdev->dev, "set master failed\n"); in mvumi_reset_host_9580()
711 mvumi_restore_bar_addr(mhba); in mvumi_reset_host_9580()
712 if (mvumi_wait_for_fw(mhba) == FAILED) in mvumi_reset_host_9580()
715 return mvumi_wait_for_outstanding(mhba); in mvumi_reset_host_9580()
718 static int mvumi_reset_host_9143(struct mvumi_hba *mhba) in mvumi_reset_host_9143() argument
720 return mvumi_wait_for_outstanding(mhba); in mvumi_reset_host_9143()
725 struct mvumi_hba *mhba; in mvumi_host_reset() local
727 mhba = (struct mvumi_hba *) scmd->device->host->hostdata; in mvumi_host_reset()
732 return mhba->instancet->reset_host(mhba); in mvumi_host_reset()
735 static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, in mvumi_issue_blocked_cmd() argument
743 dev_err(&mhba->pdev->dev, in mvumi_issue_blocked_cmd()
750 spin_lock_irqsave(mhba->shost->host_lock, flags); in mvumi_issue_blocked_cmd()
751 mhba->instancet->fire_cmd(mhba, cmd); in mvumi_issue_blocked_cmd()
752 spin_unlock_irqrestore(mhba->shost->host_lock, flags); in mvumi_issue_blocked_cmd()
754 wait_event_timeout(mhba->int_cmd_wait_q, in mvumi_issue_blocked_cmd()
760 spin_lock_irqsave(mhba->shost->host_lock, flags); in mvumi_issue_blocked_cmd()
762 if (mhba->tag_cmd[cmd->frame->tag]) { in mvumi_issue_blocked_cmd()
763 mhba->tag_cmd[cmd->frame->tag] = 0; in mvumi_issue_blocked_cmd()
764 dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n", in mvumi_issue_blocked_cmd()
766 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); in mvumi_issue_blocked_cmd()
769 dev_warn(&mhba->pdev->dev, in mvumi_issue_blocked_cmd()
773 atomic_dec(&mhba->fw_outstanding); in mvumi_issue_blocked_cmd()
775 spin_unlock_irqrestore(mhba->shost->host_lock, flags); in mvumi_issue_blocked_cmd()
780 static void mvumi_release_fw(struct mvumi_hba *mhba) in mvumi_release_fw() argument
782 mvumi_free_cmds(mhba); in mvumi_release_fw()
783 mvumi_release_mem_resource(mhba); in mvumi_release_fw()
784 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); in mvumi_release_fw()
785 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE, in mvumi_release_fw()
786 mhba->handshake_page, mhba->handshake_page_phys); in mvumi_release_fw()
787 kfree(mhba->regs); in mvumi_release_fw()
788 pci_release_regions(mhba->pdev); in mvumi_release_fw()
791 static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba) in mvumi_flush_cache() argument
798 for (device_id = 0; device_id < mhba->max_target_id; device_id++) { in mvumi_flush_cache()
799 if (!(mhba->target_map[device_id / bitcount] & in mvumi_flush_cache()
802 get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0); in mvumi_flush_cache()
805 dev_err(&mhba->pdev->dev, "failed to get memory" in mvumi_flush_cache()
827 mvumi_issue_blocked_cmd(mhba, cmd); in mvumi_flush_cache()
829 dev_err(&mhba->pdev->dev, in mvumi_flush_cache()
834 mvumi_delete_internal_cmd(mhba, cmd); in mvumi_flush_cache()
855 static void mvumi_hs_build_page(struct mvumi_hba *mhba, in mvumi_hs_build_page() argument
870 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) in mvumi_hs_build_page()
900 hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys); in mvumi_hs_build_page()
901 hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys); in mvumi_hs_build_page()
903 hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys); in mvumi_hs_build_page()
904 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys); in mvumi_hs_build_page()
905 hs_page4->ib_entry_size = mhba->ib_max_size_setting; in mvumi_hs_build_page()
906 hs_page4->ob_entry_size = mhba->ob_max_size_setting; in mvumi_hs_build_page()
907 if (mhba->hba_capability in mvumi_hs_build_page()
910 &mhba->list_num_io, in mvumi_hs_build_page()
913 &mhba->list_num_io, in mvumi_hs_build_page()
916 hs_page4->ob_depth = (u8) mhba->list_num_io; in mvumi_hs_build_page()
917 hs_page4->ib_depth = (u8) mhba->list_num_io; in mvumi_hs_build_page()
924 dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n", in mvumi_hs_build_page()
934 static int mvumi_init_data(struct mvumi_hba *mhba) in mvumi_init_data() argument
942 if (mhba->fw_flag & MVUMI_FW_ALLOC) in mvumi_init_data()
945 tmp_size = mhba->ib_max_size * mhba->max_io; in mvumi_init_data()
946 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) in mvumi_init_data()
947 tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; in mvumi_init_data()
949 tmp_size += 128 + mhba->ob_max_size * mhba->max_io; in mvumi_init_data()
952 res_mgnt = mvumi_alloc_mem_resource(mhba, in mvumi_init_data()
955 dev_err(&mhba->pdev->dev, in mvumi_init_data()
966 mhba->ib_list = v; in mvumi_init_data()
967 mhba->ib_list_phys = p; in mvumi_init_data()
968 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { in mvumi_init_data()
969 v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; in mvumi_init_data()
970 p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; in mvumi_init_data()
971 mhba->ib_frame = v; in mvumi_init_data()
972 mhba->ib_frame_phys = p; in mvumi_init_data()
974 v += mhba->ib_max_size * mhba->max_io; in mvumi_init_data()
975 p += mhba->ib_max_size * mhba->max_io; in mvumi_init_data()
981 mhba->ib_shadow = v; in mvumi_init_data()
982 mhba->ib_shadow_phys = p; in mvumi_init_data()
986 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { in mvumi_init_data()
990 mhba->ob_shadow = v; in mvumi_init_data()
991 mhba->ob_shadow_phys = p; in mvumi_init_data()
998 mhba->ob_shadow = v; in mvumi_init_data()
999 mhba->ob_shadow_phys = p; in mvumi_init_data()
1009 mhba->ob_list = v; in mvumi_init_data()
1010 mhba->ob_list_phys = p; in mvumi_init_data()
1013 tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool)); in mvumi_init_data()
1016 res_mgnt = mvumi_alloc_mem_resource(mhba, in mvumi_init_data()
1019 dev_err(&mhba->pdev->dev, in mvumi_init_data()
1025 for (i = mhba->max_io; i != 0; i--) { in mvumi_init_data()
1027 list_add_tail(&ob_pool->list, &mhba->ob_data_list); in mvumi_init_data()
1028 virmem += mhba->ob_max_size + sizeof(*ob_pool); in mvumi_init_data()
1031 tmp_size = sizeof(unsigned short) * mhba->max_io + in mvumi_init_data()
1032 sizeof(struct mvumi_cmd *) * mhba->max_io; in mvumi_init_data()
1033 tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) / in mvumi_init_data()
1036 res_mgnt = mvumi_alloc_mem_resource(mhba, in mvumi_init_data()
1039 dev_err(&mhba->pdev->dev, in mvumi_init_data()
1045 mhba->tag_pool.stack = virmem; in mvumi_init_data()
1046 mhba->tag_pool.size = mhba->max_io; in mvumi_init_data()
1047 tag_init(&mhba->tag_pool, mhba->max_io); in mvumi_init_data()
1048 virmem += sizeof(unsigned short) * mhba->max_io; in mvumi_init_data()
1050 mhba->tag_cmd = virmem; in mvumi_init_data()
1051 virmem += sizeof(struct mvumi_cmd *) * mhba->max_io; in mvumi_init_data()
1053 mhba->target_map = virmem; in mvumi_init_data()
1055 mhba->fw_flag |= MVUMI_FW_ALLOC; in mvumi_init_data()
1059 mvumi_release_mem_resource(mhba); in mvumi_init_data()
1063 static int mvumi_hs_process_page(struct mvumi_hba *mhba, in mvumi_hs_process_page() argument
1072 dev_err(&mhba->pdev->dev, "checksum error\n"); in mvumi_hs_process_page()
1080 mhba->max_io = hs_page1->max_io_support; in mvumi_hs_process_page()
1081 mhba->list_num_io = hs_page1->cl_inout_list_depth; in mvumi_hs_process_page()
1082 mhba->max_transfer_size = hs_page1->max_transfer_size; in mvumi_hs_process_page()
1083 mhba->max_target_id = hs_page1->max_devices_support; in mvumi_hs_process_page()
1084 mhba->hba_capability = hs_page1->capability; in mvumi_hs_process_page()
1085 mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size; in mvumi_hs_process_page()
1086 mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2; in mvumi_hs_process_page()
1088 mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size; in mvumi_hs_process_page()
1089 mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2; in mvumi_hs_process_page()
1091 dev_dbg(&mhba->pdev->dev, "FW version:%d\n", in mvumi_hs_process_page()
1094 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) in mvumi_hs_process_page()
1095 mhba->eot_flag = 22; in mvumi_hs_process_page()
1097 mhba->eot_flag = 27; in mvumi_hs_process_page()
1098 if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) in mvumi_hs_process_page()
1099 mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth; in mvumi_hs_process_page()
1102 dev_err(&mhba->pdev->dev, "handshake: page code error\n"); in mvumi_hs_process_page()
1117 static int mvumi_handshake(struct mvumi_hba *mhba) in mvumi_handshake() argument
1121 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_handshake()
1123 if (mhba->fw_state == FW_STATE_STARTING) in mvumi_handshake()
1128 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state); in mvumi_handshake()
1130 mhba->fw_state = FW_STATE_STARTING; in mvumi_handshake()
1138 mhba->fw_state = FW_STATE_HANDSHAKING; in mvumi_handshake()
1147 iowrite32(lower_32_bits(mhba->handshake_page_phys), in mvumi_handshake()
1149 iowrite32(upper_32_bits(mhba->handshake_page_phys), in mvumi_handshake()
1160 hs_header = (struct mvumi_hs_header *) mhba->handshake_page; in mvumi_handshake()
1162 mhba->hba_total_pages = in mvumi_handshake()
1165 if (mhba->hba_total_pages == 0) in mvumi_handshake()
1166 mhba->hba_total_pages = HS_PAGE_TOTAL-1; in mvumi_handshake()
1170 if (mvumi_hs_process_page(mhba, hs_header)) { in mvumi_handshake()
1174 if (mvumi_init_data(mhba)) { in mvumi_handshake()
1180 mhba->hba_total_pages = HS_PAGE_TOTAL-1; in mvumi_handshake()
1183 if ((hs_header->page_code + 1) <= mhba->hba_total_pages) { in mvumi_handshake()
1186 mvumi_hs_build_page(mhba, hs_header); in mvumi_handshake()
1203 iowrite32(mhba->list_num_io, mhba->ib_shadow); in mvumi_handshake()
1205 iowrite32(lower_32_bits(mhba->ib_shadow_phys), in mvumi_handshake()
1207 iowrite32(upper_32_bits(mhba->ib_shadow_phys), in mvumi_handshake()
1210 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) { in mvumi_handshake()
1212 iowrite32((mhba->list_num_io-1) | in mvumi_handshake()
1214 mhba->ob_shadow); in mvumi_handshake()
1215 iowrite32(lower_32_bits(mhba->ob_shadow_phys), in mvumi_handshake()
1217 iowrite32(upper_32_bits(mhba->ob_shadow_phys), in mvumi_handshake()
1221 mhba->ib_cur_slot = (mhba->list_num_io - 1) | in mvumi_handshake()
1223 mhba->ob_cur_slot = (mhba->list_num_io - 1) | in mvumi_handshake()
1225 mhba->fw_state = FW_STATE_STARTED; in mvumi_handshake()
1229 dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n", in mvumi_handshake()
1236 static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba) in mvumi_handshake_event() argument
1242 mvumi_handshake(mhba); in mvumi_handshake_event()
1244 isr_status = mhba->instancet->read_fw_status_reg(mhba); in mvumi_handshake_event()
1246 if (mhba->fw_state == FW_STATE_STARTED) in mvumi_handshake_event()
1249 dev_err(&mhba->pdev->dev, in mvumi_handshake_event()
1251 mhba->fw_state); in mvumi_handshake_event()
1252 dev_err(&mhba->pdev->dev, in mvumi_handshake_event()
1254 mhba->global_isr, isr_status); in mvumi_handshake_event()
1264 static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) in mvumi_check_handshake() argument
1270 tmp = ioread32(mhba->regs->arm_to_pciea_msg1); in mvumi_check_handshake()
1274 mhba->regs->pciea_to_arm_drbl_reg); in mvumi_check_handshake()
1276 dev_err(&mhba->pdev->dev, in mvumi_check_handshake()
1282 tmp = ioread32(mhba->regs->arm_to_pciea_msg1); in mvumi_check_handshake()
1285 mhba->fw_state = FW_STATE_STARTING; in mvumi_check_handshake()
1286 dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n"); in mvumi_check_handshake()
1288 if (mvumi_handshake_event(mhba)) { in mvumi_check_handshake()
1289 dev_err(&mhba->pdev->dev, in mvumi_check_handshake()
1291 mhba->fw_state); in mvumi_check_handshake()
1294 } while (mhba->fw_state != FW_STATE_STARTED); in mvumi_check_handshake()
1296 dev_dbg(&mhba->pdev->dev, "firmware handshake done\n"); in mvumi_check_handshake()
1301 static unsigned char mvumi_start(struct mvumi_hba *mhba) in mvumi_start() argument
1304 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_start()
1314 if (mvumi_check_handshake(mhba)) in mvumi_start()
1325 static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, in mvumi_complete_cmd() argument
1355 pci_unmap_sg(mhba->pdev, in mvumi_complete_cmd()
1360 pci_unmap_single(mhba->pdev, in mvumi_complete_cmd()
1369 mvumi_return_cmd(mhba, cmd); in mvumi_complete_cmd()
1372 static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, in mvumi_complete_internal_cmd() argument
1386 wake_up(&mhba->int_cmd_wait_q); in mvumi_complete_internal_cmd()
1390 static void mvumi_show_event(struct mvumi_hba *mhba, in mvumi_show_event() argument
1395 dev_warn(&mhba->pdev->dev, in mvumi_show_event()
1416 static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status) in mvumi_handle_hotplug() argument
1422 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); in mvumi_handle_hotplug()
1424 dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0, in mvumi_handle_hotplug()
1430 dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n", in mvumi_handle_hotplug()
1433 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); in mvumi_handle_hotplug()
1435 scsi_add_device(mhba->shost, 0, devid, 0); in mvumi_handle_hotplug()
1436 dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0, in mvumi_handle_hotplug()
1440 dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n", in mvumi_handle_hotplug()
1448 static u64 mvumi_inquiry(struct mvumi_hba *mhba, in mvumi_inquiry() argument
1457 cmd = mvumi_create_internal_cmd(mhba, data_buf_len); in mvumi_inquiry()
1478 mvumi_issue_blocked_cmd(mhba, cmd); in mvumi_inquiry()
1481 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) in mvumi_inquiry()
1487 dev_dbg(&mhba->pdev->dev, in mvumi_inquiry()
1493 mvumi_delete_internal_cmd(mhba, cmd); in mvumi_inquiry()
1498 static void mvumi_detach_devices(struct mvumi_hba *mhba) in mvumi_detach_devices() argument
1503 mutex_lock(&mhba->device_lock); in mvumi_detach_devices()
1507 &mhba->shost_dev_list, list) { in mvumi_detach_devices()
1508 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); in mvumi_detach_devices()
1510 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", in mvumi_detach_devices()
1514 list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) { in mvumi_detach_devices()
1516 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", in mvumi_detach_devices()
1522 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) in mvumi_detach_devices()
1523 sdev = scsi_device_lookup(mhba->shost, 0, in mvumi_detach_devices()
1524 mhba->max_target_id - 1, 0); in mvumi_detach_devices()
1531 mutex_unlock(&mhba->device_lock); in mvumi_detach_devices()
1534 static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id) in mvumi_rescan_devices() argument
1538 sdev = scsi_device_lookup(mhba->shost, 0, id, 0); in mvumi_rescan_devices()
1545 static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid) in mvumi_match_devices() argument
1549 list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) { in mvumi_match_devices()
1552 dev_err(&mhba->pdev->dev, in mvumi_match_devices()
1558 if (mhba->pdev->device == in mvumi_match_devices()
1560 mvumi_rescan_devices(mhba, id); in mvumi_match_devices()
1568 static void mvumi_remove_devices(struct mvumi_hba *mhba, int id) in mvumi_remove_devices() argument
1573 &mhba->shost_dev_list, list) { in mvumi_remove_devices()
1575 dev_dbg(&mhba->pdev->dev, in mvumi_remove_devices()
1578 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); in mvumi_remove_devices()
1585 static int mvumi_probe_devices(struct mvumi_hba *mhba) in mvumi_probe_devices() argument
1593 cmd = mvumi_create_internal_cmd(mhba, 64); in mvumi_probe_devices()
1597 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) in mvumi_probe_devices()
1598 maxid = mhba->max_target_id; in mvumi_probe_devices()
1600 maxid = mhba->max_target_id - 1; in mvumi_probe_devices()
1603 wwid = mvumi_inquiry(mhba, id, cmd); in mvumi_probe_devices()
1606 mvumi_remove_devices(mhba, id); in mvumi_probe_devices()
1609 found = mvumi_match_devices(mhba, id, wwid); in mvumi_probe_devices()
1611 mvumi_remove_devices(mhba, id); in mvumi_probe_devices()
1615 dev_err(&mhba->pdev->dev, in mvumi_probe_devices()
1625 &mhba->mhba_dev_list); in mvumi_probe_devices()
1626 dev_dbg(&mhba->pdev->dev, in mvumi_probe_devices()
1637 mvumi_delete_internal_cmd(mhba, cmd); in mvumi_probe_devices()
1645 struct mvumi_hba *mhba = (struct mvumi_hba *) data; in mvumi_rescan_bus() local
1651 if (!atomic_read(&mhba->pnp_count)) in mvumi_rescan_bus()
1654 atomic_set(&mhba->pnp_count, 0); in mvumi_rescan_bus()
1657 mutex_lock(&mhba->device_lock); in mvumi_rescan_bus()
1658 ret = mvumi_probe_devices(mhba); in mvumi_rescan_bus()
1661 &mhba->mhba_dev_list, list) { in mvumi_rescan_bus()
1662 if (mvumi_handle_hotplug(mhba, mv_dev->id, in mvumi_rescan_bus()
1664 dev_err(&mhba->pdev->dev, in mvumi_rescan_bus()
1673 &mhba->shost_dev_list); in mvumi_rescan_bus()
1677 mutex_unlock(&mhba->device_lock); in mvumi_rescan_bus()
1682 static void mvumi_proc_msg(struct mvumi_hba *mhba, in mvumi_proc_msg() argument
1690 if (mhba->fw_flag & MVUMI_FW_ATTACH) { in mvumi_proc_msg()
1695 mutex_lock(&mhba->sas_discovery_mutex); in mvumi_proc_msg()
1700 mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE); in mvumi_proc_msg()
1708 mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE); in mvumi_proc_msg()
1710 mutex_unlock(&mhba->sas_discovery_mutex); in mvumi_proc_msg()
1714 static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) in mvumi_notification() argument
1722 dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger" in mvumi_notification()
1729 mvumi_show_event(mhba, param); in mvumi_notification()
1732 mvumi_proc_msg(mhba, buffer); in mvumi_notification()
1736 static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg) in mvumi_get_event() argument
1741 cmd = mvumi_create_internal_cmd(mhba, 512); in mvumi_get_event()
1756 mvumi_issue_blocked_cmd(mhba, cmd); in mvumi_get_event()
1759 dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n", in mvumi_get_event()
1762 mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf); in mvumi_get_event()
1764 mvumi_delete_internal_cmd(mhba, cmd); in mvumi_get_event()
1773 mvumi_get_event(mu_ev->mhba, mu_ev->event); in mvumi_scan_events()
1777 static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status) in mvumi_launch_events() argument
1783 atomic_inc(&mhba->pnp_count); in mvumi_launch_events()
1784 wake_up_process(mhba->dm_thread); in mvumi_launch_events()
1792 mu_ev->mhba = mhba; in mvumi_launch_events()
1801 static void mvumi_handle_clob(struct mvumi_hba *mhba) in mvumi_handle_clob() argument
1807 while (!list_empty(&mhba->free_ob_list)) { in mvumi_handle_clob()
1808 pool = list_first_entry(&mhba->free_ob_list, in mvumi_handle_clob()
1811 list_add_tail(&pool->list, &mhba->ob_data_list); in mvumi_handle_clob()
1814 cmd = mhba->tag_cmd[ob_frame->tag]; in mvumi_handle_clob()
1816 atomic_dec(&mhba->fw_outstanding); in mvumi_handle_clob()
1817 mhba->tag_cmd[ob_frame->tag] = 0; in mvumi_handle_clob()
1818 tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag); in mvumi_handle_clob()
1820 mvumi_complete_cmd(mhba, cmd, ob_frame); in mvumi_handle_clob()
1822 mvumi_complete_internal_cmd(mhba, cmd, ob_frame); in mvumi_handle_clob()
1824 mhba->instancet->fire_cmd(mhba, NULL); in mvumi_handle_clob()
1829 struct mvumi_hba *mhba = (struct mvumi_hba *) devp; in mvumi_isr_handler() local
1832 spin_lock_irqsave(mhba->shost->host_lock, flags); in mvumi_isr_handler()
1833 if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) { in mvumi_isr_handler()
1834 spin_unlock_irqrestore(mhba->shost->host_lock, flags); in mvumi_isr_handler()
1838 if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) { in mvumi_isr_handler()
1839 if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) in mvumi_isr_handler()
1840 mvumi_launch_events(mhba, mhba->isr_status); in mvumi_isr_handler()
1841 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) { in mvumi_isr_handler()
1842 dev_warn(&mhba->pdev->dev, "enter handshake again!\n"); in mvumi_isr_handler()
1843 mvumi_handshake(mhba); in mvumi_isr_handler()
1848 if (mhba->global_isr & mhba->regs->int_comaout) in mvumi_isr_handler()
1849 mvumi_receive_ob_list_entry(mhba); in mvumi_isr_handler()
1851 mhba->global_isr = 0; in mvumi_isr_handler()
1852 mhba->isr_status = 0; in mvumi_isr_handler()
1853 if (mhba->fw_state == FW_STATE_STARTED) in mvumi_isr_handler()
1854 mvumi_handle_clob(mhba); in mvumi_isr_handler()
1855 spin_unlock_irqrestore(mhba->shost->host_lock, flags); in mvumi_isr_handler()
1859 static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, in mvumi_send_command() argument
1867 if (unlikely(mhba->fw_state != FW_STATE_STARTED)) { in mvumi_send_command()
1868 dev_dbg(&mhba->pdev->dev, "firmware not ready.\n"); in mvumi_send_command()
1871 if (tag_is_empty(&mhba->tag_pool)) { in mvumi_send_command()
1872 dev_dbg(&mhba->pdev->dev, "no free tag.\n"); in mvumi_send_command()
1875 mvumi_get_ib_list_entry(mhba, &ib_entry); in mvumi_send_command()
1877 cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool); in mvumi_send_command()
1878 cmd->frame->request_id = mhba->io_seq++; in mvumi_send_command()
1880 mhba->tag_cmd[cmd->frame->tag] = cmd; in mvumi_send_command()
1883 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { in mvumi_send_command()
1897 static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) in mvumi_fire_cmd() argument
1904 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list); in mvumi_fire_cmd()
1905 count = mhba->instancet->check_ib_list(mhba); in mvumi_fire_cmd()
1906 if (list_empty(&mhba->waiting_req_list) || !count) in mvumi_fire_cmd()
1910 cmd = list_first_entry(&mhba->waiting_req_list, in mvumi_fire_cmd()
1913 result = mvumi_send_command(mhba, cmd); in mvumi_fire_cmd()
1919 list_add(&cmd->queue_pointer, &mhba->waiting_req_list); in mvumi_fire_cmd()
1921 mvumi_send_ib_list_entry(mhba); in mvumi_fire_cmd()
1925 } while (!list_empty(&mhba->waiting_req_list) && count--); in mvumi_fire_cmd()
1928 mvumi_send_ib_list_entry(mhba); in mvumi_fire_cmd()
1935 static void mvumi_enable_intr(struct mvumi_hba *mhba) in mvumi_enable_intr() argument
1938 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_enable_intr()
1950 static void mvumi_disable_intr(struct mvumi_hba *mhba) in mvumi_disable_intr() argument
1953 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_disable_intr()
1964 struct mvumi_hba *mhba = (struct mvumi_hba *) extend; in mvumi_clear_intr() local
1966 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_clear_intr()
1973 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { in mvumi_clear_intr()
1984 status ^= mhba->regs->int_comaerr; in mvumi_clear_intr()
1998 mhba->global_isr = status; in mvumi_clear_intr()
1999 mhba->isr_status = isr_status; in mvumi_clear_intr()
2008 static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba) in mvumi_read_fw_status_reg() argument
2012 status = ioread32(mhba->regs->arm_to_pciea_drbl_reg); in mvumi_read_fw_status_reg()
2014 iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg); in mvumi_read_fw_status_reg()
2042 struct mvumi_hba *mhba; in mvumi_slave_configure() local
2045 mhba = (struct mvumi_hba *) sdev->host->hostdata; in mvumi_slave_configure()
2046 if (sdev->id >= mhba->max_target_id) in mvumi_slave_configure()
2049 mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount)); in mvumi_slave_configure()
2062 static unsigned char mvumi_build_frame(struct mvumi_hba *mhba, in mvumi_build_frame() argument
2086 dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] " in mvumi_build_frame()
2095 if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0], in mvumi_build_frame()
2123 struct mvumi_hba *mhba; in mvumi_queue_command() local
2129 mhba = (struct mvumi_hba *) shost->hostdata; in mvumi_queue_command()
2131 cmd = mvumi_get_cmd(mhba); in mvumi_queue_command()
2137 if (unlikely(mvumi_build_frame(mhba, scmd, cmd))) in mvumi_queue_command()
2142 mhba->instancet->fire_cmd(mhba, cmd); in mvumi_queue_command()
2147 mvumi_return_cmd(mhba, cmd); in mvumi_queue_command()
2157 struct mvumi_hba *mhba = shost_priv(host); in mvumi_timed_out() local
2160 spin_lock_irqsave(mhba->shost->host_lock, flags); in mvumi_timed_out()
2162 if (mhba->tag_cmd[cmd->frame->tag]) { in mvumi_timed_out()
2163 mhba->tag_cmd[cmd->frame->tag] = 0; in mvumi_timed_out()
2164 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); in mvumi_timed_out()
2169 atomic_dec(&mhba->fw_outstanding); in mvumi_timed_out()
2175 pci_unmap_sg(mhba->pdev, in mvumi_timed_out()
2180 pci_unmap_single(mhba->pdev, in mvumi_timed_out()
2188 mvumi_return_cmd(mhba, cmd); in mvumi_timed_out()
2189 spin_unlock_irqrestore(mhba->shost->host_lock, flags); in mvumi_timed_out()
2237 static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba) in mvumi_cfg_hw_reg() argument
2242 switch (mhba->pdev->device) { in mvumi_cfg_hw_reg()
2244 mhba->mmio = mhba->base_addr[0]; in mvumi_cfg_hw_reg()
2245 base = mhba->mmio; in mvumi_cfg_hw_reg()
2246 if (!mhba->regs) { in mvumi_cfg_hw_reg()
2247 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); in mvumi_cfg_hw_reg()
2248 if (mhba->regs == NULL) in mvumi_cfg_hw_reg()
2251 regs = mhba->regs; in mvumi_cfg_hw_reg()
2296 mhba->mmio = mhba->base_addr[2]; in mvumi_cfg_hw_reg()
2297 base = mhba->mmio; in mvumi_cfg_hw_reg()
2298 if (!mhba->regs) { in mvumi_cfg_hw_reg()
2299 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); in mvumi_cfg_hw_reg()
2300 if (mhba->regs == NULL) in mvumi_cfg_hw_reg()
2303 regs = mhba->regs; in mvumi_cfg_hw_reg()
2361 static int mvumi_init_fw(struct mvumi_hba *mhba) in mvumi_init_fw() argument
2365 if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) { in mvumi_init_fw()
2366 dev_err(&mhba->pdev->dev, "IO memory region busy!\n"); in mvumi_init_fw()
2369 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); in mvumi_init_fw()
2373 switch (mhba->pdev->device) { in mvumi_init_fw()
2375 mhba->instancet = &mvumi_instance_9143; in mvumi_init_fw()
2376 mhba->io_seq = 0; in mvumi_init_fw()
2377 mhba->max_sge = MVUMI_MAX_SG_ENTRY; in mvumi_init_fw()
2378 mhba->request_id_enabled = 1; in mvumi_init_fw()
2381 mhba->instancet = &mvumi_instance_9580; in mvumi_init_fw()
2382 mhba->io_seq = 0; in mvumi_init_fw()
2383 mhba->max_sge = MVUMI_MAX_SG_ENTRY; in mvumi_init_fw()
2386 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n", in mvumi_init_fw()
2387 mhba->pdev->device); in mvumi_init_fw()
2388 mhba->instancet = NULL; in mvumi_init_fw()
2392 dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n", in mvumi_init_fw()
2393 mhba->pdev->device); in mvumi_init_fw()
2394 ret = mvumi_cfg_hw_reg(mhba); in mvumi_init_fw()
2396 dev_err(&mhba->pdev->dev, in mvumi_init_fw()
2401 mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE, in mvumi_init_fw()
2402 &mhba->handshake_page_phys); in mvumi_init_fw()
2403 if (!mhba->handshake_page) { in mvumi_init_fw()
2404 dev_err(&mhba->pdev->dev, in mvumi_init_fw()
2410 if (mvumi_start(mhba)) { in mvumi_init_fw()
2414 ret = mvumi_alloc_cmds(mhba); in mvumi_init_fw()
2421 mvumi_release_mem_resource(mhba); in mvumi_init_fw()
2422 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE, in mvumi_init_fw()
2423 mhba->handshake_page, mhba->handshake_page_phys); in mvumi_init_fw()
2425 kfree(mhba->regs); in mvumi_init_fw()
2427 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); in mvumi_init_fw()
2429 pci_release_regions(mhba->pdev); in mvumi_init_fw()
2438 static int mvumi_io_attach(struct mvumi_hba *mhba) in mvumi_io_attach() argument
2440 struct Scsi_Host *host = mhba->shost; in mvumi_io_attach()
2443 unsigned int max_sg = (mhba->ib_max_size + 4 - in mvumi_io_attach()
2446 host->irq = mhba->pdev->irq; in mvumi_io_attach()
2447 host->unique_id = mhba->unique_id; in mvumi_io_attach()
2448 host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; in mvumi_io_attach()
2449 host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge; in mvumi_io_attach()
2450 host->max_sectors = mhba->max_transfer_size / 512; in mvumi_io_attach()
2451 host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; in mvumi_io_attach()
2452 host->max_id = mhba->max_target_id; in mvumi_io_attach()
2456 ret = scsi_add_host(host, &mhba->pdev->dev); in mvumi_io_attach()
2458 dev_err(&mhba->pdev->dev, "scsi_add_host failed\n"); in mvumi_io_attach()
2461 mhba->fw_flag |= MVUMI_FW_ATTACH; in mvumi_io_attach()
2463 mutex_lock(&mhba->sas_discovery_mutex); in mvumi_io_attach()
2464 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) in mvumi_io_attach()
2465 ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0); in mvumi_io_attach()
2469 dev_err(&mhba->pdev->dev, "add virtual device failed\n"); in mvumi_io_attach()
2470 mutex_unlock(&mhba->sas_discovery_mutex); in mvumi_io_attach()
2474 mhba->dm_thread = kthread_create(mvumi_rescan_bus, in mvumi_io_attach()
2475 mhba, "mvumi_scanthread"); in mvumi_io_attach()
2476 if (IS_ERR(mhba->dm_thread)) { in mvumi_io_attach()
2477 dev_err(&mhba->pdev->dev, in mvumi_io_attach()
2479 mutex_unlock(&mhba->sas_discovery_mutex); in mvumi_io_attach()
2482 atomic_set(&mhba->pnp_count, 1); in mvumi_io_attach()
2483 wake_up_process(mhba->dm_thread); in mvumi_io_attach()
2485 mutex_unlock(&mhba->sas_discovery_mutex); in mvumi_io_attach()
2489 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) in mvumi_io_attach()
2490 sdev = scsi_device_lookup(mhba->shost, 0, in mvumi_io_attach()
2491 mhba->max_target_id - 1, 0); in mvumi_io_attach()
2497 scsi_remove_host(mhba->shost); in mvumi_io_attach()
2509 struct mvumi_hba *mhba; in mvumi_probe_one() local
2535 host = scsi_host_alloc(&mvumi_template, sizeof(*mhba)); in mvumi_probe_one()
2541 mhba = shost_priv(host); in mvumi_probe_one()
2543 INIT_LIST_HEAD(&mhba->cmd_pool); in mvumi_probe_one()
2544 INIT_LIST_HEAD(&mhba->ob_data_list); in mvumi_probe_one()
2545 INIT_LIST_HEAD(&mhba->free_ob_list); in mvumi_probe_one()
2546 INIT_LIST_HEAD(&mhba->res_list); in mvumi_probe_one()
2547 INIT_LIST_HEAD(&mhba->waiting_req_list); in mvumi_probe_one()
2548 mutex_init(&mhba->device_lock); in mvumi_probe_one()
2549 INIT_LIST_HEAD(&mhba->mhba_dev_list); in mvumi_probe_one()
2550 INIT_LIST_HEAD(&mhba->shost_dev_list); in mvumi_probe_one()
2551 atomic_set(&mhba->fw_outstanding, 0); in mvumi_probe_one()
2552 init_waitqueue_head(&mhba->int_cmd_wait_q); in mvumi_probe_one()
2553 mutex_init(&mhba->sas_discovery_mutex); in mvumi_probe_one()
2555 mhba->pdev = pdev; in mvumi_probe_one()
2556 mhba->shost = host; in mvumi_probe_one()
2557 mhba->unique_id = pdev->bus->number << 8 | pdev->devfn; in mvumi_probe_one()
2559 ret = mvumi_init_fw(mhba); in mvumi_probe_one()
2563 ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED, in mvumi_probe_one()
2564 "mvumi", mhba); in mvumi_probe_one()
2570 mhba->instancet->enable_intr(mhba); in mvumi_probe_one()
2571 pci_set_drvdata(pdev, mhba); in mvumi_probe_one()
2573 ret = mvumi_io_attach(mhba); in mvumi_probe_one()
2577 mvumi_backup_bar_addr(mhba); in mvumi_probe_one()
2583 mhba->instancet->disable_intr(mhba); in mvumi_probe_one()
2584 free_irq(mhba->pdev->irq, mhba); in mvumi_probe_one()
2586 mvumi_release_fw(mhba); in mvumi_probe_one()
2600 struct mvumi_hba *mhba; in mvumi_detach_one() local
2602 mhba = pci_get_drvdata(pdev); in mvumi_detach_one()
2603 if (mhba->dm_thread) { in mvumi_detach_one()
2604 kthread_stop(mhba->dm_thread); in mvumi_detach_one()
2605 mhba->dm_thread = NULL; in mvumi_detach_one()
2608 mvumi_detach_devices(mhba); in mvumi_detach_one()
2609 host = mhba->shost; in mvumi_detach_one()
2610 scsi_remove_host(mhba->shost); in mvumi_detach_one()
2611 mvumi_flush_cache(mhba); in mvumi_detach_one()
2613 mhba->instancet->disable_intr(mhba); in mvumi_detach_one()
2614 free_irq(mhba->pdev->irq, mhba); in mvumi_detach_one()
2615 mvumi_release_fw(mhba); in mvumi_detach_one()
2627 struct mvumi_hba *mhba = pci_get_drvdata(pdev); in mvumi_shutdown() local
2629 mvumi_flush_cache(mhba); in mvumi_shutdown()
2634 struct mvumi_hba *mhba = NULL; in mvumi_suspend() local
2636 mhba = pci_get_drvdata(pdev); in mvumi_suspend()
2637 mvumi_flush_cache(mhba); in mvumi_suspend()
2639 pci_set_drvdata(pdev, mhba); in mvumi_suspend()
2640 mhba->instancet->disable_intr(mhba); in mvumi_suspend()
2641 free_irq(mhba->pdev->irq, mhba); in mvumi_suspend()
2642 mvumi_unmap_pci_addr(pdev, mhba->base_addr); in mvumi_suspend()
2654 struct mvumi_hba *mhba = NULL; in mvumi_resume() local
2656 mhba = pci_get_drvdata(pdev); in mvumi_resume()
2680 ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME); in mvumi_resume()
2683 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); in mvumi_resume()
2687 if (mvumi_cfg_hw_reg(mhba)) { in mvumi_resume()
2692 mhba->mmio = mhba->base_addr[0]; in mvumi_resume()
2693 mvumi_reset(mhba); in mvumi_resume()
2695 if (mvumi_start(mhba)) { in mvumi_resume()
2700 ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED, in mvumi_resume()
2701 "mvumi", mhba); in mvumi_resume()
2706 mhba->instancet->enable_intr(mhba); in mvumi_resume()
2711 mvumi_unmap_pci_addr(pdev, mhba->base_addr); in mvumi_resume()