Lines Matching refs:ha

52 	struct qla_hw_data *ha = vha->hw;  in qlafx00_mailbox_command()  local
53 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); in qlafx00_mailbox_command()
55 if (ha->pdev->error_state > pci_channel_io_frozen) { in qlafx00_mailbox_command()
68 reg = ha->iobase; in qlafx00_mailbox_command()
74 if (ha->flags.pci_channel_io_perm_failure) { in qlafx00_mailbox_command()
80 if (ha->flags.isp82xx_fw_hung) { in qlafx00_mailbox_command()
84 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); in qlafx00_mailbox_command()
94 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { in qlafx00_mailbox_command()
102 ha->flags.mbox_busy = 1; in qlafx00_mailbox_command()
104 ha->mcp32 = mcp; in qlafx00_mailbox_command()
109 spin_lock_irqsave(&ha->hardware_lock, flags); in qlafx00_mailbox_command()
118 for (cnt = 0; cnt < ha->mbx_count; cnt++) { in qlafx00_mailbox_command()
128 ha->flags.mbox_int = 0; in qlafx00_mailbox_command()
129 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); in qlafx00_mailbox_command()
144 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { in qlafx00_mailbox_command()
145 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); in qlafx00_mailbox_command()
147 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); in qlafx00_mailbox_command()
148 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlafx00_mailbox_command()
150 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); in qlafx00_mailbox_command()
155 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); in qlafx00_mailbox_command()
156 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlafx00_mailbox_command()
159 while (!ha->flags.mbox_int) { in qlafx00_mailbox_command()
164 qla2x00_poll(ha->rsp_q_map[0]); in qlafx00_mailbox_command()
166 if (!ha->flags.mbox_int && in qlafx00_mailbox_command()
167 !(IS_QLA2200(ha) && in qlafx00_mailbox_command()
177 if (ha->flags.mbox_int) { in qlafx00_mailbox_command()
184 ha->flags.mbox_int = 0; in qlafx00_mailbox_command()
185 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); in qlafx00_mailbox_command()
187 if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE) in qlafx00_mailbox_command()
192 iptr = (uint32_t *)&ha->mailbox_out32[0]; in qlafx00_mailbox_command()
194 for (cnt = 0; cnt < ha->mbx_count; cnt++) { in qlafx00_mailbox_command()
207 ha->flags.mbox_busy = 0; in qlafx00_mailbox_command()
210 ha->mcp32 = NULL; in qlafx00_mailbox_command()
212 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { in qlafx00_mailbox_command()
217 qla2x00_poll(ha->rsp_q_map[0]); in qlafx00_mailbox_command()
223 ha->flags.eeh_busy) { in qlafx00_mailbox_command()
236 ha->flags.eeh_busy); in qlafx00_mailbox_command()
256 if (ha->isp_ops->abort_isp(vha)) { in qlafx00_mailbox_command()
270 complete(&ha->mbx_cmd_comp); in qlafx00_mailbox_command()
396 struct qla_hw_data *ha = vha->hw; in qlafx00_init_firmware() local
404 mcp->mb[2] = MSD(ha->init_cb_dma); in qlafx00_init_firmware()
405 mcp->mb[3] = LSD(ha->init_cb_dma); in qlafx00_init_firmware()
499 struct qla_hw_data *ha = vha->hw; in qlafx00_pci_config() local
501 pci_set_master(ha->pdev); in qlafx00_pci_config()
502 pci_try_set_mwi(ha->pdev); in qlafx00_pci_config()
504 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qlafx00_pci_config()
507 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qlafx00_pci_config()
510 if (pci_is_pcie(ha->pdev)) in qlafx00_pci_config()
511 pcie_set_readrq(ha->pdev, 2048); in qlafx00_pci_config()
513 ha->chip_revision = ha->pdev->revision; in qlafx00_pci_config()
527 struct qla_hw_data *ha = vha->hw; in qlafx00_soc_cpu_reset() local
532 spin_lock_irqsave(&ha->hardware_lock, flags); in qlafx00_soc_cpu_reset()
534 QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0); in qlafx00_soc_cpu_reset()
535 QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0); in qlafx00_soc_cpu_reset()
538 QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02); in qlafx00_soc_cpu_reset()
539 QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02); in qlafx00_soc_cpu_reset()
540 QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02); in qlafx00_soc_cpu_reset()
541 QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02); in qlafx00_soc_cpu_reset()
544 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840); in qlafx00_soc_cpu_reset()
546 QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val); in qlafx00_soc_cpu_reset()
548 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844); in qlafx00_soc_cpu_reset()
550 QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val); in qlafx00_soc_cpu_reset()
552 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848); in qlafx00_soc_cpu_reset()
554 QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val); in qlafx00_soc_cpu_reset()
556 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C); in qlafx00_soc_cpu_reset()
558 QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val); in qlafx00_soc_cpu_reset()
561 if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 && in qlafx00_soc_cpu_reset()
562 (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0) in qlafx00_soc_cpu_reset()
569 QLAFX00_SET_HBA_SOC_REG(ha, in qlafx00_soc_cpu_reset()
571 QLAFX00_SET_HBA_SOC_REG(ha, in qlafx00_soc_cpu_reset()
576 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101)); in qlafx00_soc_cpu_reset()
579 QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1); in qlafx00_soc_cpu_reset()
580 QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0); in qlafx00_soc_cpu_reset()
584 QLAFX00_SET_HBA_SOC_REG(ha, in qlafx00_soc_cpu_reset()
590 QLAFX00_SET_HBA_SOC_REG(ha, in qlafx00_soc_cpu_reset()
597 QLAFX00_SET_HBA_SOC_REG(ha, in qlafx00_soc_cpu_reset()
602 QLAFX00_SET_HBA_SOC_REG(ha, in qlafx00_soc_cpu_reset()
606 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2)); in qlafx00_soc_cpu_reset()
607 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3)); in qlafx00_soc_cpu_reset()
610 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0)); in qlafx00_soc_cpu_reset()
613 QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00)); in qlafx00_soc_cpu_reset()
615 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlafx00_soc_cpu_reset()
633 struct qla_hw_data *ha = vha->hw; in qlafx00_soft_reset() local
635 if (unlikely(pci_channel_offline(ha->pdev) && in qlafx00_soft_reset()
636 ha->flags.pci_channel_io_perm_failure)) in qlafx00_soft_reset()
639 ha->isp_ops->disable_intrs(ha); in qlafx00_soft_reset()
653 struct qla_hw_data *ha = vha->hw; in qlafx00_chip_diag() local
654 struct req_que *req = ha->req_q_map[0]; in qlafx00_chip_diag()
656 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; in qlafx00_chip_diag()
672 struct qla_hw_data *ha = vha->hw; in qlafx00_config_rings() local
673 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; in qlafx00_config_rings()
688 struct qla_hw_data *ha = vha->hw; in qlafx00_pci_info_str() local
690 if (pci_is_pcie(ha->pdev)) { in qlafx00_pci_info_str()
700 struct qla_hw_data *ha = vha->hw; in qlafx00_fw_version_str() local
702 snprintf(str, size, "%s", ha->mr.fw_version); in qlafx00_fw_version_str()
707 qlafx00_enable_intrs(struct qla_hw_data *ha) in qlafx00_enable_intrs() argument
711 spin_lock_irqsave(&ha->hardware_lock, flags); in qlafx00_enable_intrs()
712 ha->interrupts_on = 1; in qlafx00_enable_intrs()
713 QLAFX00_ENABLE_ICNTRL_REG(ha); in qlafx00_enable_intrs()
714 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlafx00_enable_intrs()
718 qlafx00_disable_intrs(struct qla_hw_data *ha) in qlafx00_disable_intrs() argument
722 spin_lock_irqsave(&ha->hardware_lock, flags); in qlafx00_disable_intrs()
723 ha->interrupts_on = 0; in qlafx00_disable_intrs()
724 QLAFX00_DISABLE_ICNTRL_REG(ha); in qlafx00_disable_intrs()
725 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlafx00_disable_intrs()
745 struct qla_hw_data *ha = vha->hw; in qlafx00_loop_reset() local
752 ret = ha->isp_ops->target_reset(fcport, 0, 0); in qlafx00_loop_reset()
764 qlafx00_iospace_config(struct qla_hw_data *ha) in qlafx00_iospace_config() argument
766 if (pci_request_selected_regions(ha->pdev, ha->bars, in qlafx00_iospace_config()
768 ql_log_pci(ql_log_fatal, ha->pdev, 0x014e, in qlafx00_iospace_config()
770 pci_name(ha->pdev)); in qlafx00_iospace_config()
775 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { in qlafx00_iospace_config()
776 ql_log_pci(ql_log_warn, ha->pdev, 0x014f, in qlafx00_iospace_config()
778 pci_name(ha->pdev)); in qlafx00_iospace_config()
781 if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) { in qlafx00_iospace_config()
782 ql_log_pci(ql_log_warn, ha->pdev, 0x0127, in qlafx00_iospace_config()
784 pci_name(ha->pdev)); in qlafx00_iospace_config()
788 ha->cregbase = in qlafx00_iospace_config()
789 ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); in qlafx00_iospace_config()
790 if (!ha->cregbase) { in qlafx00_iospace_config()
791 ql_log_pci(ql_log_fatal, ha->pdev, 0x0128, in qlafx00_iospace_config()
792 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); in qlafx00_iospace_config()
796 if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) { in qlafx00_iospace_config()
797 ql_log_pci(ql_log_warn, ha->pdev, 0x0129, in qlafx00_iospace_config()
799 pci_name(ha->pdev)); in qlafx00_iospace_config()
802 if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) { in qlafx00_iospace_config()
803 ql_log_pci(ql_log_warn, ha->pdev, 0x012a, in qlafx00_iospace_config()
805 pci_name(ha->pdev)); in qlafx00_iospace_config()
809 ha->iobase = in qlafx00_iospace_config()
810 ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); in qlafx00_iospace_config()
811 if (!ha->iobase) { in qlafx00_iospace_config()
812 ql_log_pci(ql_log_fatal, ha->pdev, 0x012b, in qlafx00_iospace_config()
813 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); in qlafx00_iospace_config()
818 ha->max_req_queues = ha->max_rsp_queues = 1; in qlafx00_iospace_config()
820 ql_log_pci(ql_log_info, ha->pdev, 0x012c, in qlafx00_iospace_config()
822 ha->bars, ha->cregbase, ha->iobase); in qlafx00_iospace_config()
833 struct qla_hw_data *ha = vha->hw; in qlafx00_save_queue_ptrs() local
834 struct req_que *req = ha->req_q_map[0]; in qlafx00_save_queue_ptrs()
835 struct rsp_que *rsp = ha->rsp_q_map[0]; in qlafx00_save_queue_ptrs()
859 struct qla_hw_data *ha = vha->hw; in qlafx00_config_queues() local
860 struct req_que *req = ha->req_q_map[0]; in qlafx00_config_queues()
861 struct rsp_que *rsp = ha->rsp_q_map[0]; in qlafx00_config_queues()
862 dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2); in qlafx00_config_queues()
864 req->length = ha->req_que_len; in qlafx00_config_queues()
865 req->ring = (void __force *)ha->iobase + ha->req_que_off; in qlafx00_config_queues()
866 req->dma = bar2_hdl + ha->req_que_off; in qlafx00_config_queues()
868 ql_log_pci(ql_log_info, ha->pdev, 0x012f, in qlafx00_config_queues()
877 ha->req_que_off, (u64)req->dma); in qlafx00_config_queues()
879 rsp->length = ha->rsp_que_len; in qlafx00_config_queues()
880 rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off; in qlafx00_config_queues()
881 rsp->dma = bar2_hdl + ha->rsp_que_off; in qlafx00_config_queues()
883 ql_log_pci(ql_log_info, ha->pdev, 0x0131, in qlafx00_config_queues()
892 ha->rsp_que_off, (u64)rsp->dma); in qlafx00_config_queues()
903 struct qla_hw_data *ha = vha->hw; in qlafx00_init_fw_ready() local
904 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; in qlafx00_init_fw_ready()
916 ha->mbx_intr_code = MSW(aenmbx7); in qlafx00_init_fw_ready()
917 ha->rqstq_intr_code = LSW(aenmbx7); in qlafx00_init_fw_ready()
946 ha->mbx_intr_code = MSW(aenmbx7); in qlafx00_init_fw_ready()
947 ha->rqstq_intr_code = LSW(aenmbx7); in qlafx00_init_fw_ready()
948 ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1); in qlafx00_init_fw_ready()
949 ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3); in qlafx00_init_fw_ready()
950 ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5); in qlafx00_init_fw_ready()
951 ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6); in qlafx00_init_fw_ready()
957 ha->mbx_intr_code, ha->rqstq_intr_code); in qlafx00_init_fw_ready()
958 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); in qlafx00_init_fw_ready()
984 ha->mbx_intr_code = MSW(aenmbx7); in qlafx00_init_fw_ready()
985 ha->rqstq_intr_code = LSW(aenmbx7); in qlafx00_init_fw_ready()
986 ha->req_que_off = RD_REG_DWORD(&reg->initval1); in qlafx00_init_fw_ready()
987 ha->rsp_que_off = RD_REG_DWORD(&reg->initval3); in qlafx00_init_fw_ready()
988 ha->req_que_len = RD_REG_DWORD(&reg->initval5); in qlafx00_init_fw_ready()
989 ha->rsp_que_len = RD_REG_DWORD(&reg->initval6); in qlafx00_init_fw_ready()
993 ha->mbx_intr_code, ha->rqstq_intr_code); in qlafx00_init_fw_ready()
994 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); in qlafx00_init_fw_ready()
1123 struct qla_hw_data *ha = vha->hw; in qlafx00_find_all_targets() local
1140 0x2089, (uint8_t *)ha->gid_list, 32); in qlafx00_find_all_targets()
1147 for_each_set_bit(tgt_id, (void *)ha->gid_list, in qlafx00_find_all_targets()
1353 struct qla_hw_data *ha = vha->hw; in qlafx00_abort_isp_cleanup() local
1357 ha->mr.fw_hbt_en = 0; in qlafx00_abort_isp_cleanup()
1360 ha->flags.chip_reset_done = 0; in qlafx00_abort_isp_cleanup()
1364 "Performing ISP error recovery - ha = %p.\n", ha); in qlafx00_abort_isp_cleanup()
1365 ha->isp_ops->reset_chip(vha); in qlafx00_abort_isp_cleanup()
1385 if (!ha->flags.eeh_busy) { in qlafx00_abort_isp_cleanup()
1401 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); in qlafx00_abort_isp_cleanup()
1404 "%s Done done - ha=%p.\n", __func__, ha); in qlafx00_abort_isp_cleanup()
1438 struct qla_hw_data *ha = vha->hw; in qlafx00_rescan_isp() local
1439 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; in qlafx00_rescan_isp()
1442 qla2x00_request_irqs(ha, ha->rsp_q_map[0]); in qlafx00_rescan_isp()
1445 ha->mbx_intr_code = MSW(aenmbx7); in qlafx00_rescan_isp()
1446 ha->rqstq_intr_code = LSW(aenmbx7); in qlafx00_rescan_isp()
1447 ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1); in qlafx00_rescan_isp()
1448 ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3); in qlafx00_rescan_isp()
1449 ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5); in qlafx00_rescan_isp()
1450 ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6); in qlafx00_rescan_isp()
1455 ha->mbx_intr_code, ha->rqstq_intr_code, in qlafx00_rescan_isp()
1456 ha->req_que_off, ha->rsp_que_len); in qlafx00_rescan_isp()
1459 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); in qlafx00_rescan_isp()
1481 struct qla_hw_data *ha = vha->hw; in qlafx00_timer_routine() local
1484 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; in qlafx00_timer_routine()
1488 if (ha->mr.fw_hbt_cnt) in qlafx00_timer_routine()
1489 ha->mr.fw_hbt_cnt--; in qlafx00_timer_routine()
1491 if ((!ha->flags.mr_reset_hdlr_active) && in qlafx00_timer_routine()
1494 (ha->mr.fw_hbt_en)) { in qlafx00_timer_routine()
1496 if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) { in qlafx00_timer_routine()
1497 ha->mr.old_fw_hbt_cnt = fw_heart_beat; in qlafx00_timer_routine()
1498 ha->mr.fw_hbt_miss_cnt = 0; in qlafx00_timer_routine()
1500 ha->mr.fw_hbt_miss_cnt++; in qlafx00_timer_routine()
1501 if (ha->mr.fw_hbt_miss_cnt == in qlafx00_timer_routine()
1506 ha->mr.fw_hbt_miss_cnt = 0; in qlafx00_timer_routine()
1510 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; in qlafx00_timer_routine()
1516 if (ha->mr.fw_reset_timer_exp) { in qlafx00_timer_routine()
1519 ha->mr.fw_reset_timer_exp = 0; in qlafx00_timer_routine()
1525 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; in qlafx00_timer_routine()
1527 (!ha->mr.fw_hbt_en)) { in qlafx00_timer_routine()
1528 ha->mr.fw_hbt_en = 1; in qlafx00_timer_routine()
1529 } else if (!ha->mr.fw_reset_timer_tick) { in qlafx00_timer_routine()
1530 if (aenmbx0 == ha->mr.old_aenmbx0_state) in qlafx00_timer_routine()
1531 ha->mr.fw_reset_timer_exp = 1; in qlafx00_timer_routine()
1532 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; in qlafx00_timer_routine()
1536 data0 = QLAFX00_RD_REG(ha, in qlafx00_timer_routine()
1538 data1 = QLAFX00_RD_REG(ha, in qlafx00_timer_routine()
1544 QLAFX00_WR_REG(ha, in qlafx00_timer_routine()
1548 ha->mr.fw_reset_timer_tick = in qlafx00_timer_routine()
1551 ha->mr.fw_reset_timer_tick = in qlafx00_timer_routine()
1554 if (ha->mr.old_aenmbx0_state != aenmbx0) { in qlafx00_timer_routine()
1555 ha->mr.old_aenmbx0_state = aenmbx0; in qlafx00_timer_routine()
1556 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; in qlafx00_timer_routine()
1558 ha->mr.fw_reset_timer_tick--; in qlafx00_timer_routine()
1565 if (ha->mr.fw_critemp_timer_tick == 0) { in qlafx00_timer_routine()
1566 tempc = QLAFX00_GET_TEMPERATURE(ha); in qlafx00_timer_routine()
1571 if (tempc < ha->mr.critical_temperature) { in qlafx00_timer_routine()
1577 ha->mr.fw_critemp_timer_tick = in qlafx00_timer_routine()
1580 ha->mr.fw_critemp_timer_tick--; in qlafx00_timer_routine()
1583 if (ha->mr.host_info_resend) { in qlafx00_timer_routine()
1588 if (ha->mr.hinfo_resend_timer_tick == 0) { in qlafx00_timer_routine()
1589 ha->mr.host_info_resend = false; in qlafx00_timer_routine()
1591 ha->mr.hinfo_resend_timer_tick = in qlafx00_timer_routine()
1595 ha->mr.hinfo_resend_timer_tick--; in qlafx00_timer_routine()
1614 struct qla_hw_data *ha = vha->hw; in qlafx00_reset_initialize() local
1622 ha->flags.mr_reset_hdlr_active = 1; in qlafx00_reset_initialize()
1631 ha->flags.mr_reset_hdlr_active = 0; in qlafx00_reset_initialize()
1648 struct qla_hw_data *ha = vha->hw; in qlafx00_abort_isp() local
1651 if (unlikely(pci_channel_offline(ha->pdev) && in qlafx00_abort_isp()
1652 ha->flags.pci_channel_io_perm_failure)) { in qlafx00_abort_isp()
1663 ha->isp_ops->reset_chip(vha); in qlafx00_abort_isp()
1666 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); in qlafx00_abort_isp()
1811 struct qla_hw_data *ha = vha->hw; in qlafx00_fx_disc() local
1861 fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev, in qlafx00_fx_disc()
1878 ha->mr.host_info_resend = true; in qlafx00_fx_disc()
1893 ha->pdev->device); in qlafx00_fx_disc()
1916 fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev, in qlafx00_fx_disc()
1957 ha->mr.extended_io_enabled = (pinfo->enabled_capabilities & in qlafx00_fx_disc()
1994 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len, in qlafx00_fx_disc()
1999 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len, in qlafx00_fx_disc()
2021 struct qla_hw_data *ha = vha->hw; in qlafx00_initialize_adapter() local
2026 ha->flags.chip_reset_done = 0; in qlafx00_initialize_adapter()
2028 ha->flags.pci_channel_io_perm_failure = 0; in qlafx00_initialize_adapter()
2029 ha->flags.eeh_busy = 0; in qlafx00_initialize_adapter()
2035 ha->isp_abort_cnt = 0; in qlafx00_initialize_adapter()
2036 ha->beacon_blink_led = 0; in qlafx00_initialize_adapter()
2038 set_bit(0, ha->req_qid_map); in qlafx00_initialize_adapter()
2039 set_bit(0, ha->rsp_qid_map); in qlafx00_initialize_adapter()
2044 rval = ha->isp_ops->pci_config(vha); in qlafx00_initialize_adapter()
2065 rval = qla2x00_alloc_outstanding_cmds(ha, vha->req); in qlafx00_initialize_adapter()
2070 ha->flags.chip_reset_done = 1; in qlafx00_initialize_adapter()
2072 tempc = QLAFX00_GET_TEMPERATURE(ha); in qlafx00_initialize_adapter()
2103 struct qla_hw_data *ha = ((struct scsi_qla_host *) in qlafx00_get_host_speed() local
2107 switch (ha->link_data_rate) { in qlafx00_get_host_speed()
2287 struct qla_hw_data *ha = vha->hw; in qlafx00_status_entry() local
2302 req = ha->req_q_map[que]; in qlafx00_status_entry()
2412 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) in qlafx00_status_entry()
2418 if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) in qlafx00_status_entry()
2538 sp->done(ha, sp, res); in qlafx00_status_entry()
2552 struct qla_hw_data *ha = rsp->hw; in qlafx00_status_cont_entry() local
2553 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); in qlafx00_status_cont_entry()
2615 sp->done(ha, sp, cp->result); in qlafx00_status_cont_entry()
2629 struct qla_hw_data *ha = vha->hw; in qlafx00_multistatus_entry() local
2653 req = ha->req_q_map[que]; in qlafx00_multistatus_entry()
2683 struct qla_hw_data *ha = vha->hw; in qlafx00_error_entry() local
2692 req = ha->req_q_map[que]; in qlafx00_error_entry()
2696 sp->done(ha, sp, res); in qlafx00_error_entry()
2787 struct qla_hw_data *ha = vha->hw; in qlafx00_async_event() local
2791 reg = &ha->iobase->ispfx00; in qlafx00_async_event()
2793 switch (ha->aenmb[0]) { in qlafx00_async_event()
2796 "ISP System Error - mbx1=%x\n", ha->aenmb[0]); in qlafx00_async_event()
2808 ha->aenmb[1] = RD_REG_DWORD(&reg->aenmailbox1); in qlafx00_async_event()
2809 ha->aenmb[2] = RD_REG_DWORD(&reg->aenmailbox2); in qlafx00_async_event()
2810 ha->aenmb[3] = RD_REG_DWORD(&reg->aenmailbox3); in qlafx00_async_event()
2814 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]); in qlafx00_async_event()
2822 ha->aenmb[0]); in qlafx00_async_event()
2829 ha->aenmb[0]); in qlafx00_async_event()
2836 ha->aenmb[0]); in qlafx00_async_event()
2840 ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1); in qlafx00_async_event()
2841 ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2); in qlafx00_async_event()
2842 ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3); in qlafx00_async_event()
2843 ha->aenmb[4] = RD_REG_WORD(&reg->aenmailbox4); in qlafx00_async_event()
2844 ha->aenmb[5] = RD_REG_WORD(&reg->aenmailbox5); in qlafx00_async_event()
2845 ha->aenmb[6] = RD_REG_WORD(&reg->aenmailbox6); in qlafx00_async_event()
2846 ha->aenmb[7] = RD_REG_WORD(&reg->aenmailbox7); in qlafx00_async_event()
2849 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3], in qlafx00_async_event()
2850 ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]); in qlafx00_async_event()
2853 qlafx00_post_aenfx_work(vha, ha->aenmb[0], in qlafx00_async_event()
2854 (uint32_t *)ha->aenmb, data_size); in qlafx00_async_event()
2868 struct qla_hw_data *ha = vha->hw; in qlafx00_mbx_completion() local
2869 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; in qlafx00_mbx_completion()
2871 if (!ha->mcp32) in qlafx00_mbx_completion()
2875 ha->flags.mbox_int = 1; in qlafx00_mbx_completion()
2876 ha->mailbox_out32[0] = mb0; in qlafx00_mbx_completion()
2879 for (cnt = 1; cnt < ha->mbx_count; cnt++) { in qlafx00_mbx_completion()
2880 ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr); in qlafx00_mbx_completion()
2898 struct qla_hw_data *ha; in qlafx00_intr_handler() local
2916 ha = rsp->hw; in qlafx00_intr_handler()
2917 reg = &ha->iobase->ispfx00; in qlafx00_intr_handler()
2920 if (unlikely(pci_channel_offline(ha->pdev))) in qlafx00_intr_handler()
2923 spin_lock_irqsave(&ha->hardware_lock, flags); in qlafx00_intr_handler()
2924 vha = pci_get_drvdata(ha->pdev); in qlafx00_intr_handler()
2926 stat = QLAFX00_RD_INTR_REG(ha); in qlafx00_intr_handler()
2940 ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0); in qlafx00_intr_handler()
2949 QLAFX00_CLR_INTR_REG(ha, clr_intr); in qlafx00_intr_handler()
2950 QLAFX00_RD_INTR_REG(ha); in qlafx00_intr_handler()
2953 qla2x00_handle_mbx_completion(ha, status); in qlafx00_intr_handler()
2954 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlafx00_intr_handler()
3083 struct qla_hw_data *ha = vha->hw; in qlafx00_start_scsi() local
3089 rsp = ha->rsp_q_map[0]; in qlafx00_start_scsi()
3096 spin_lock_irqsave(&ha->hardware_lock, flags); in qlafx00_start_scsi()
3112 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), in qlafx00_start_scsi()
3188 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); in qlafx00_start_scsi()
3190 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlafx00_start_scsi()
3197 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlafx00_start_scsi()