Lines Matching refs:ha

51 	struct qla_hw_data *ha = fcport->vha->hw;  in qla2x00_sp_timeout()  local
55 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_sp_timeout()
56 req = ha->req_q_map[0]; in qla2x00_sp_timeout()
61 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_sp_timeout()
81 struct qla_hw_data *ha = vha->hw; in qla2x00_get_async_timeout() local
84 tmo = ha->r_a_tov / 10 * 2; in qla2x00_get_async_timeout()
85 if (IS_QLAFX00(ha)) { in qla2x00_get_async_timeout()
87 } else if (!IS_FWI2_CAPABLE(ha)) { in qla2x00_get_async_timeout()
92 tmo = ha->login_timeout; in qla2x00_get_async_timeout()
420 struct qla_hw_data *ha = vha->hw; in qla24xx_async_abort_command() local
423 spin_lock_irqsave(&ha->hardware_lock, flags); in qla24xx_async_abort_command()
428 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla24xx_async_abort_command()
537 struct qla_hw_data *ha = vha->hw; in qla83xx_nic_core_fw_load() local
546 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT; in qla83xx_nic_core_fw_load()
547 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT; in qla83xx_nic_core_fw_load()
567 if (ha->flags.nic_core_reset_owner) { in qla83xx_nic_core_fw_load()
588 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2)); in qla83xx_nic_core_fw_load()
591 if (ha->flags.nic_core_reset_owner) { in qla83xx_nic_core_fw_load()
620 struct qla_hw_data *ha = vha->hw; in qla2x00_initialize_adapter() local
621 struct req_que *req = ha->req_q_map[0]; in qla2x00_initialize_adapter()
625 ha->flags.chip_reset_done = 0; in qla2x00_initialize_adapter()
627 ha->flags.pci_channel_io_perm_failure = 0; in qla2x00_initialize_adapter()
628 ha->flags.eeh_busy = 0; in qla2x00_initialize_adapter()
636 ha->isp_abort_cnt = 0; in qla2x00_initialize_adapter()
637 ha->beacon_blink_led = 0; in qla2x00_initialize_adapter()
639 set_bit(0, ha->req_qid_map); in qla2x00_initialize_adapter()
640 set_bit(0, ha->rsp_qid_map); in qla2x00_initialize_adapter()
644 rval = ha->isp_ops->pci_config(vha); in qla2x00_initialize_adapter()
651 ha->isp_ops->reset_chip(vha); in qla2x00_initialize_adapter()
660 if (IS_QLA8044(ha)) { in qla2x00_initialize_adapter()
671 ha->isp_ops->get_flash_version(vha, req->ring); in qla2x00_initialize_adapter()
675 ha->isp_ops->nvram_config(vha); in qla2x00_initialize_adapter()
677 if (ha->flags.disable_serdes) { in qla2x00_initialize_adapter()
688 rval = ha->isp_ops->chip_diag(vha); in qla2x00_initialize_adapter()
696 if (IS_QLA84XX(ha)) { in qla2x00_initialize_adapter()
697 ha->cs84xx = qla84xx_get_chip(vha); in qla2x00_initialize_adapter()
698 if (!ha->cs84xx) { in qla2x00_initialize_adapter()
708 ha->flags.chip_reset_done = 1; in qla2x00_initialize_adapter()
710 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { in qla2x00_initialize_adapter()
721 if (IS_QLA8031(ha)) { in qla2x00_initialize_adapter()
728 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) in qla2x00_initialize_adapter()
731 if (IS_P3P_TYPE(ha)) in qla2x00_initialize_adapter()
750 struct qla_hw_data *ha = vha->hw; in qla2100_pci_config() local
751 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2100_pci_config()
753 pci_set_master(ha->pdev); in qla2100_pci_config()
754 pci_try_set_mwi(ha->pdev); in qla2100_pci_config()
756 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla2100_pci_config()
758 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla2100_pci_config()
760 pci_disable_rom(ha->pdev); in qla2100_pci_config()
763 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2100_pci_config()
764 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status); in qla2100_pci_config()
765 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2100_pci_config()
782 struct qla_hw_data *ha = vha->hw; in qla2300_pci_config() local
783 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2300_pci_config()
785 pci_set_master(ha->pdev); in qla2300_pci_config()
786 pci_try_set_mwi(ha->pdev); in qla2300_pci_config()
788 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla2300_pci_config()
791 if (IS_QLA2322(ha) || IS_QLA6322(ha)) in qla2300_pci_config()
793 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla2300_pci_config()
802 if (IS_QLA2300(ha)) { in qla2300_pci_config()
803 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2300_pci_config()
819 ha->fb_rev = RD_FB_CMD_REG(ha, reg); in qla2300_pci_config()
821 if (ha->fb_rev == FPM_2300) in qla2300_pci_config()
822 pci_clear_mwi(ha->pdev); in qla2300_pci_config()
837 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2300_pci_config()
840 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); in qla2300_pci_config()
842 pci_disable_rom(ha->pdev); in qla2300_pci_config()
845 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2300_pci_config()
846 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status); in qla2300_pci_config()
847 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2300_pci_config()
863 struct qla_hw_data *ha = vha->hw; in qla24xx_pci_config() local
864 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; in qla24xx_pci_config()
866 pci_set_master(ha->pdev); in qla24xx_pci_config()
867 pci_try_set_mwi(ha->pdev); in qla24xx_pci_config()
869 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla24xx_pci_config()
872 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla24xx_pci_config()
874 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); in qla24xx_pci_config()
877 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) in qla24xx_pci_config()
878 pcix_set_mmrbc(ha->pdev, 2048); in qla24xx_pci_config()
881 if (pci_is_pcie(ha->pdev)) in qla24xx_pci_config()
882 pcie_set_readrq(ha->pdev, 4096); in qla24xx_pci_config()
884 pci_disable_rom(ha->pdev); in qla24xx_pci_config()
886 ha->chip_revision = ha->pdev->revision; in qla24xx_pci_config()
889 spin_lock_irqsave(&ha->hardware_lock, flags); in qla24xx_pci_config()
890 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status); in qla24xx_pci_config()
891 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla24xx_pci_config()
906 struct qla_hw_data *ha = vha->hw; in qla25xx_pci_config() local
908 pci_set_master(ha->pdev); in qla25xx_pci_config()
909 pci_try_set_mwi(ha->pdev); in qla25xx_pci_config()
911 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla25xx_pci_config()
914 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla25xx_pci_config()
917 if (pci_is_pcie(ha->pdev)) in qla25xx_pci_config()
918 pcie_set_readrq(ha->pdev, 4096); in qla25xx_pci_config()
920 pci_disable_rom(ha->pdev); in qla25xx_pci_config()
922 ha->chip_revision = ha->pdev->revision; in qla25xx_pci_config()
939 struct qla_hw_data *ha = vha->hw; in qla2x00_isp_firmware() local
944 if (ha->flags.disable_risc_code_load) { in qla2x00_isp_firmware()
948 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); in qla2x00_isp_firmware()
973 struct qla_hw_data *ha = vha->hw; in qla2x00_reset_chip() local
974 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_reset_chip()
978 if (unlikely(pci_channel_offline(ha->pdev))) in qla2x00_reset_chip()
981 ha->isp_ops->disable_intrs(ha); in qla2x00_reset_chip()
983 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_reset_chip()
987 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); in qla2x00_reset_chip()
989 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); in qla2x00_reset_chip()
991 if (!IS_QLA2100(ha)) { in qla2x00_reset_chip()
994 if (IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_reset_chip()
1015 if (!IS_QLA2200(ha)) { in qla2x00_reset_chip()
1025 if (IS_QLA2200(ha)) { in qla2x00_reset_chip()
1026 WRT_FB_CMD_REG(ha, reg, 0xa000); in qla2x00_reset_chip()
1027 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ in qla2x00_reset_chip()
1029 WRT_FB_CMD_REG(ha, reg, 0x00fc); in qla2x00_reset_chip()
1033 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) in qla2x00_reset_chip()
1059 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_reset_chip()
1084 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_reset_chip()
1086 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) in qla2x00_reset_chip()
1096 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); in qla2x00_reset_chip()
1099 if (!IS_QLA2100(ha)) { in qla2x00_reset_chip()
1104 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_reset_chip()
1133 struct qla_hw_data *ha = vha->hw; in qla24xx_reset_risc() local
1134 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; in qla24xx_reset_risc()
1140 spin_lock_irqsave(&ha->hardware_lock, flags); in qla24xx_reset_risc()
1152 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
1162 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); in qla24xx_reset_risc()
1178 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
1196 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
1239 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
1246 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla24xx_reset_risc()
1250 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling"); in qla24xx_reset_risc()
1252 if (IS_NOPOLLING_TYPE(ha)) in qla24xx_reset_risc()
1253 ha->isp_ops->enable_intrs(ha); in qla24xx_reset_risc()
1280 struct qla_hw_data *ha = vha->hw; in qla25xx_manipulate_risc_semaphore() local
1287 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha)) in qla25xx_manipulate_risc_semaphore()
1344 struct qla_hw_data *ha = vha->hw; in qla24xx_reset_chip() local
1346 if (pci_channel_offline(ha->pdev) && in qla24xx_reset_chip()
1347 ha->flags.pci_channel_io_perm_failure) { in qla24xx_reset_chip()
1351 ha->isp_ops->disable_intrs(ha); in qla24xx_reset_chip()
1369 struct qla_hw_data *ha = vha->hw; in qla2x00_chip_diag() local
1370 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_chip_diag()
1375 struct req_que *req = ha->req_q_map[0]; in qla2x00_chip_diag()
1383 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_chip_diag()
1411 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_chip_diag()
1412 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); in qla2x00_chip_diag()
1415 data = RD_MAILBOX_REG(ha, reg, 0); in qla2x00_chip_diag()
1427 mb[1] = RD_MAILBOX_REG(ha, reg, 1); in qla2x00_chip_diag()
1428 mb[2] = RD_MAILBOX_REG(ha, reg, 2); in qla2x00_chip_diag()
1429 mb[3] = RD_MAILBOX_REG(ha, reg, 3); in qla2x00_chip_diag()
1430 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); in qla2x00_chip_diag()
1439 ha->product_id[0] = mb[1]; in qla2x00_chip_diag()
1440 ha->product_id[1] = mb[2]; in qla2x00_chip_diag()
1441 ha->product_id[2] = mb[3]; in qla2x00_chip_diag()
1442 ha->product_id[3] = mb[4]; in qla2x00_chip_diag()
1446 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; in qla2x00_chip_diag()
1448 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * in qla2x00_chip_diag()
1451 if (IS_QLA2200(ha) && in qla2x00_chip_diag()
1452 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { in qla2x00_chip_diag()
1456 ha->device_type |= DT_ISP2200A; in qla2x00_chip_diag()
1457 ha->fw_transfer_size = 128; in qla2x00_chip_diag()
1461 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_chip_diag()
1471 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_chip_diag()
1478 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_chip_diag()
1493 struct qla_hw_data *ha = vha->hw; in qla24xx_chip_diag() local
1494 struct req_que *req = ha->req_q_map[0]; in qla24xx_chip_diag()
1496 if (IS_P3P_TYPE(ha)) in qla24xx_chip_diag()
1499 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; in qla24xx_chip_diag()
1521 struct qla_hw_data *ha = vha->hw; in qla2x00_alloc_fw_dump() local
1522 struct req_que *req = ha->req_q_map[0]; in qla2x00_alloc_fw_dump()
1523 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla2x00_alloc_fw_dump()
1525 if (ha->fw_dump) { in qla2x00_alloc_fw_dump()
1531 ha->fw_dumped = 0; in qla2x00_alloc_fw_dump()
1532 ha->fw_dump_cap_flags = 0; in qla2x00_alloc_fw_dump()
1536 if (IS_QLA27XX(ha)) in qla2x00_alloc_fw_dump()
1539 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { in qla2x00_alloc_fw_dump()
1541 } else if (IS_QLA23XX(ha)) { in qla2x00_alloc_fw_dump()
1543 mem_size = (ha->fw_memory_size - 0x11000 + 1) * in qla2x00_alloc_fw_dump()
1545 } else if (IS_FWI2_CAPABLE(ha)) { in qla2x00_alloc_fw_dump()
1546 if (IS_QLA83XX(ha)) in qla2x00_alloc_fw_dump()
1548 else if (IS_QLA81XX(ha)) in qla2x00_alloc_fw_dump()
1550 else if (IS_QLA25XX(ha)) in qla2x00_alloc_fw_dump()
1555 mem_size = (ha->fw_memory_size - 0x100000 + 1) * in qla2x00_alloc_fw_dump()
1557 if (ha->mqenable) { in qla2x00_alloc_fw_dump()
1558 if (!IS_QLA83XX(ha)) in qla2x00_alloc_fw_dump()
1564 mq_size += ha->max_req_queues * in qla2x00_alloc_fw_dump()
1566 mq_size += ha->max_rsp_queues * in qla2x00_alloc_fw_dump()
1569 if (ha->tgt.atio_ring) in qla2x00_alloc_fw_dump()
1570 mq_size += ha->tgt.atio_q_length * sizeof(request_t); in qla2x00_alloc_fw_dump()
1572 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && in qla2x00_alloc_fw_dump()
1573 !IS_QLA27XX(ha)) in qla2x00_alloc_fw_dump()
1577 if (ha->fce) in qla2x00_alloc_fw_dump()
1578 dma_free_coherent(&ha->pdev->dev, in qla2x00_alloc_fw_dump()
1579 FCE_SIZE, ha->fce, ha->fce_dma); in qla2x00_alloc_fw_dump()
1582 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, in qla2x00_alloc_fw_dump()
1592 ha->fce_mb, &ha->fce_bufs); in qla2x00_alloc_fw_dump()
1596 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, in qla2x00_alloc_fw_dump()
1598 ha->flags.fce_enabled = 0; in qla2x00_alloc_fw_dump()
1605 ha->flags.fce_enabled = 1; in qla2x00_alloc_fw_dump()
1606 ha->fce_dma = tc_dma; in qla2x00_alloc_fw_dump()
1607 ha->fce = tc; in qla2x00_alloc_fw_dump()
1610 if (ha->eft) in qla2x00_alloc_fw_dump()
1611 dma_free_coherent(&ha->pdev->dev, in qla2x00_alloc_fw_dump()
1612 EFT_SIZE, ha->eft, ha->eft_dma); in qla2x00_alloc_fw_dump()
1615 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, in qla2x00_alloc_fw_dump()
1628 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, in qla2x00_alloc_fw_dump()
1636 ha->eft_dma = tc_dma; in qla2x00_alloc_fw_dump()
1637 ha->eft = tc; in qla2x00_alloc_fw_dump()
1641 if (IS_QLA27XX(ha)) { in qla2x00_alloc_fw_dump()
1642 if (!ha->fw_dump_template) { in qla2x00_alloc_fw_dump()
1657 ha->chain_offset = dump_size; in qla2x00_alloc_fw_dump()
1661 ha->fw_dump = vmalloc(dump_size); in qla2x00_alloc_fw_dump()
1662 if (!ha->fw_dump) { in qla2x00_alloc_fw_dump()
1667 if (ha->fce) { in qla2x00_alloc_fw_dump()
1668 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, in qla2x00_alloc_fw_dump()
1669 ha->fce_dma); in qla2x00_alloc_fw_dump()
1670 ha->fce = NULL; in qla2x00_alloc_fw_dump()
1671 ha->fce_dma = 0; in qla2x00_alloc_fw_dump()
1674 if (ha->eft) { in qla2x00_alloc_fw_dump()
1675 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft, in qla2x00_alloc_fw_dump()
1676 ha->eft_dma); in qla2x00_alloc_fw_dump()
1677 ha->eft = NULL; in qla2x00_alloc_fw_dump()
1678 ha->eft_dma = 0; in qla2x00_alloc_fw_dump()
1682 ha->fw_dump_len = dump_size; in qla2x00_alloc_fw_dump()
1686 if (IS_QLA27XX(ha)) in qla2x00_alloc_fw_dump()
1689 ha->fw_dump->signature[0] = 'Q'; in qla2x00_alloc_fw_dump()
1690 ha->fw_dump->signature[1] = 'L'; in qla2x00_alloc_fw_dump()
1691 ha->fw_dump->signature[2] = 'G'; in qla2x00_alloc_fw_dump()
1692 ha->fw_dump->signature[3] = 'C'; in qla2x00_alloc_fw_dump()
1693 ha->fw_dump->version = __constant_htonl(1); in qla2x00_alloc_fw_dump()
1695 ha->fw_dump->fixed_size = htonl(fixed_size); in qla2x00_alloc_fw_dump()
1696 ha->fw_dump->mem_size = htonl(mem_size); in qla2x00_alloc_fw_dump()
1697 ha->fw_dump->req_q_size = htonl(req_q_size); in qla2x00_alloc_fw_dump()
1698 ha->fw_dump->rsp_q_size = htonl(rsp_q_size); in qla2x00_alloc_fw_dump()
1700 ha->fw_dump->eft_size = htonl(eft_size); in qla2x00_alloc_fw_dump()
1701 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma)); in qla2x00_alloc_fw_dump()
1702 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma)); in qla2x00_alloc_fw_dump()
1704 ha->fw_dump->header_size = in qla2x00_alloc_fw_dump()
1756 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) in qla2x00_alloc_outstanding_cmds() argument
1762 if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase && in qla2x00_alloc_outstanding_cmds()
1766 if (ha->fw_xcb_count <= ha->fw_iocb_count) in qla2x00_alloc_outstanding_cmds()
1767 req->num_outstanding_cmds = ha->fw_xcb_count; in qla2x00_alloc_outstanding_cmds()
1769 req->num_outstanding_cmds = ha->fw_iocb_count; in qla2x00_alloc_outstanding_cmds()
1807 struct qla_hw_data *ha = vha->hw; in qla2x00_setup_chip() local
1808 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_setup_chip()
1812 if (IS_P3P_TYPE(ha)) { in qla2x00_setup_chip()
1813 rval = ha->isp_ops->load_risc(vha, &srisc_address); in qla2x00_setup_chip()
1821 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { in qla2x00_setup_chip()
1823 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_setup_chip()
1826 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_setup_chip()
1832 rval = ha->isp_ops->load_risc(vha, &srisc_address); in qla2x00_setup_chip()
1847 fw_major_version = ha->fw_major_version; in qla2x00_setup_chip()
1848 if (IS_P3P_TYPE(ha)) in qla2x00_setup_chip()
1854 ha->flags.npiv_supported = 0; in qla2x00_setup_chip()
1855 if (IS_QLA2XXX_MIDTYPE(ha) && in qla2x00_setup_chip()
1856 (ha->fw_attributes & BIT_2)) { in qla2x00_setup_chip()
1857 ha->flags.npiv_supported = 1; in qla2x00_setup_chip()
1858 if ((!ha->max_npiv_vports) || in qla2x00_setup_chip()
1859 ((ha->max_npiv_vports + 1) % in qla2x00_setup_chip()
1861 ha->max_npiv_vports = in qla2x00_setup_chip()
1865 &ha->fw_xcb_count, NULL, &ha->fw_iocb_count, in qla2x00_setup_chip()
1866 &ha->max_npiv_vports, NULL); in qla2x00_setup_chip()
1872 rval = qla2x00_alloc_outstanding_cmds(ha, in qla2x00_setup_chip()
1878 && !(IS_P3P_TYPE(ha))) in qla2x00_setup_chip()
1891 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { in qla2x00_setup_chip()
1893 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_setup_chip()
1894 if (IS_QLA2300(ha)) in qla2x00_setup_chip()
1901 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_setup_chip()
1904 if (IS_QLA27XX(ha)) in qla2x00_setup_chip()
1905 ha->flags.fac_supported = 1; in qla2x00_setup_chip()
1906 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { in qla2x00_setup_chip()
1911 ha->flags.fac_supported = 1; in qla2x00_setup_chip()
1912 ha->fdt_block_size = size << 2; in qla2x00_setup_chip()
1916 ha->fw_major_version, ha->fw_minor_version, in qla2x00_setup_chip()
1917 ha->fw_subminor_version); in qla2x00_setup_chip()
1919 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { in qla2x00_setup_chip()
1920 ha->flags.fac_supported = 0; in qla2x00_setup_chip()
1969 struct qla_hw_data *ha = vha->hw; in qla2x00_update_fw_options() local
1971 memset(ha->fw_options, 0, sizeof(ha->fw_options)); in qla2x00_update_fw_options()
1972 qla2x00_get_fw_options(vha, ha->fw_options); in qla2x00_update_fw_options()
1974 if (IS_QLA2100(ha) || IS_QLA2200(ha)) in qla2x00_update_fw_options()
1981 (uint8_t *)&ha->fw_seriallink_options, in qla2x00_update_fw_options()
1982 sizeof(ha->fw_seriallink_options)); in qla2x00_update_fw_options()
1984 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; in qla2x00_update_fw_options()
1985 if (ha->fw_seriallink_options[3] & BIT_2) { in qla2x00_update_fw_options()
1986 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; in qla2x00_update_fw_options()
1989 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); in qla2x00_update_fw_options()
1990 emphasis = (ha->fw_seriallink_options[2] & in qla2x00_update_fw_options()
1992 tx_sens = ha->fw_seriallink_options[0] & in qla2x00_update_fw_options()
1994 rx_sens = (ha->fw_seriallink_options[0] & in qla2x00_update_fw_options()
1996 ha->fw_options[10] = (emphasis << 14) | (swing << 8); in qla2x00_update_fw_options()
1997 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { in qla2x00_update_fw_options()
2000 ha->fw_options[10] |= (tx_sens << 4) | rx_sens; in qla2x00_update_fw_options()
2001 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) in qla2x00_update_fw_options()
2002 ha->fw_options[10] |= BIT_5 | in qla2x00_update_fw_options()
2007 swing = (ha->fw_seriallink_options[2] & in qla2x00_update_fw_options()
2009 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); in qla2x00_update_fw_options()
2010 tx_sens = ha->fw_seriallink_options[1] & in qla2x00_update_fw_options()
2012 rx_sens = (ha->fw_seriallink_options[1] & in qla2x00_update_fw_options()
2014 ha->fw_options[11] = (emphasis << 14) | (swing << 8); in qla2x00_update_fw_options()
2015 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { in qla2x00_update_fw_options()
2018 ha->fw_options[11] |= (tx_sens << 4) | rx_sens; in qla2x00_update_fw_options()
2019 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) in qla2x00_update_fw_options()
2020 ha->fw_options[11] |= BIT_5 | in qla2x00_update_fw_options()
2027 ha->fw_options[3] |= BIT_13; in qla2x00_update_fw_options()
2030 if (ha->flags.enable_led_scheme) in qla2x00_update_fw_options()
2031 ha->fw_options[2] |= BIT_12; in qla2x00_update_fw_options()
2034 if (IS_QLA6312(ha)) in qla2x00_update_fw_options()
2035 ha->fw_options[2] |= BIT_13; in qla2x00_update_fw_options()
2038 qla2x00_set_fw_options(vha, ha->fw_options); in qla2x00_update_fw_options()
2045 struct qla_hw_data *ha = vha->hw; in qla24xx_update_fw_options() local
2047 if (IS_P3P_TYPE(ha)) in qla24xx_update_fw_options()
2051 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) in qla24xx_update_fw_options()
2055 le16_to_cpu(ha->fw_seriallink_options24[1]), in qla24xx_update_fw_options()
2056 le16_to_cpu(ha->fw_seriallink_options24[2]), in qla24xx_update_fw_options()
2057 le16_to_cpu(ha->fw_seriallink_options24[3])); in qla24xx_update_fw_options()
2067 struct qla_hw_data *ha = vha->hw; in qla2x00_config_rings() local
2068 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_config_rings()
2069 struct req_que *req = ha->req_q_map[0]; in qla2x00_config_rings()
2070 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla2x00_config_rings()
2073 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0); in qla2x00_config_rings()
2074 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0); in qla2x00_config_rings()
2075 ha->init_cb->request_q_length = cpu_to_le16(req->length); in qla2x00_config_rings()
2076 ha->init_cb->response_q_length = cpu_to_le16(rsp->length); in qla2x00_config_rings()
2077 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); in qla2x00_config_rings()
2078 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); in qla2x00_config_rings()
2079 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); in qla2x00_config_rings()
2080 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); in qla2x00_config_rings()
2082 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); in qla2x00_config_rings()
2083 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); in qla2x00_config_rings()
2084 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0); in qla2x00_config_rings()
2085 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0); in qla2x00_config_rings()
2086 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ in qla2x00_config_rings()
2092 struct qla_hw_data *ha = vha->hw; in qla24xx_config_rings() local
2093 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0); in qla24xx_config_rings()
2094 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; in qla24xx_config_rings()
2098 struct req_que *req = ha->req_q_map[0]; in qla24xx_config_rings()
2099 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla24xx_config_rings()
2102 icb = (struct init_cb_24xx *)ha->init_cb; in qla24xx_config_rings()
2114 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); in qla24xx_config_rings()
2115 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); in qla24xx_config_rings()
2116 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); in qla24xx_config_rings()
2118 if (IS_SHADOW_REG_CAPABLE(ha)) in qla24xx_config_rings()
2122 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { in qla24xx_config_rings()
2125 if (ha->flags.msix_enabled) { in qla24xx_config_rings()
2126 msix = &ha->msix_entries[1]; in qla24xx_config_rings()
2142 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && in qla24xx_config_rings()
2143 (ha->flags.msix_enabled)) { in qla24xx_config_rings()
2146 ha->flags.disable_msix_handshake = 1; in qla24xx_config_rings()
2186 struct qla_hw_data *ha = vha->hw; in qla2x00_init_rings() local
2190 (struct mid_init_cb_24xx *) ha->init_cb; in qla2x00_init_rings()
2192 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_init_rings()
2195 for (que = 0; que < ha->max_req_queues; que++) { in qla2x00_init_rings()
2196 req = ha->req_q_map[que]; in qla2x00_init_rings()
2197 if (!req || !test_bit(que, ha->req_qid_map)) in qla2x00_init_rings()
2212 for (que = 0; que < ha->max_rsp_queues; que++) { in qla2x00_init_rings()
2213 rsp = ha->rsp_q_map[que]; in qla2x00_init_rings()
2214 if (!rsp || !test_bit(que, ha->rsp_qid_map)) in qla2x00_init_rings()
2219 if (IS_QLAFX00(ha)) in qla2x00_init_rings()
2225 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; in qla2x00_init_rings()
2226 ha->tgt.atio_ring_index = 0; in qla2x00_init_rings()
2230 ha->isp_ops->config_rings(vha); in qla2x00_init_rings()
2232 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_init_rings()
2236 if (IS_QLAFX00(ha)) { in qla2x00_init_rings()
2237 rval = qlafx00_init_firmware(vha, ha->init_cb_size); in qla2x00_init_rings()
2242 ha->isp_ops->update_fw_options(vha); in qla2x00_init_rings()
2244 if (ha->flags.npiv_supported) { in qla2x00_init_rings()
2245 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) in qla2x00_init_rings()
2246 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; in qla2x00_init_rings()
2247 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); in qla2x00_init_rings()
2250 if (IS_FWI2_CAPABLE(ha)) { in qla2x00_init_rings()
2253 cpu_to_le16(ha->fw_xcb_count); in qla2x00_init_rings()
2255 if (IS_DPORT_CAPABLE(ha)) in qla2x00_init_rings()
2259 ha->flags.fawwpn_enabled = in qla2x00_init_rings()
2262 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); in qla2x00_init_rings()
2265 rval = qla2x00_init_firmware(vha, ha->init_cb_size); in qla2x00_init_rings()
2292 struct qla_hw_data *ha = vha->hw; in qla2x00_fw_ready() local
2300 if (IS_P3P_TYPE(ha)) in qla2x00_fw_ready()
2309 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { in qla2x00_fw_ready()
2331 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { in qla2x00_fw_ready()
2362 qla2x00_get_retry_cnt(vha, &ha->retry_count, in qla2x00_fw_ready()
2363 &ha->login_timeout, &ha->r_a_tov); in qla2x00_fw_ready()
2387 ha->flags.isp82xx_fw_hung) in qla2x00_fw_ready()
2434 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_hba() local
2436 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); in qla2x00_configure_hba()
2442 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || in qla2x00_configure_hba()
2443 IS_CNA_CAPABLE(ha) || in qla2x00_configure_hba()
2450 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) && in qla2x00_configure_hba()
2471 ha->min_external_loopid = SNS_FIRST_LOOP_ID; in qla2x00_configure_hba()
2472 ha->operating_mode = LOOP; in qla2x00_configure_hba()
2473 ha->switch_cap = 0; in qla2x00_configure_hba()
2478 ha->current_topology = ISP_CFG_NL; in qla2x00_configure_hba()
2484 ha->switch_cap = sw_cap; in qla2x00_configure_hba()
2485 ha->current_topology = ISP_CFG_FL; in qla2x00_configure_hba()
2491 ha->operating_mode = P2P; in qla2x00_configure_hba()
2492 ha->current_topology = ISP_CFG_N; in qla2x00_configure_hba()
2498 ha->switch_cap = sw_cap; in qla2x00_configure_hba()
2499 ha->operating_mode = P2P; in qla2x00_configure_hba()
2500 ha->current_topology = ISP_CFG_F; in qla2x00_configure_hba()
2507 ha->current_topology = ISP_CFG_NL; in qla2x00_configure_hba()
2518 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_configure_hba()
2520 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_configure_hba()
2536 struct qla_hw_data *ha = vha->hw; in qla2x00_set_model_info() local
2537 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && in qla2x00_set_model_info()
2538 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha); in qla2x00_set_model_info()
2541 strncpy(ha->model_number, model, len); in qla2x00_set_model_info()
2542 st = en = ha->model_number; in qla2x00_set_model_info()
2550 index = (ha->pdev->subsystem_device & 0xff); in qla2x00_set_model_info()
2552 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && in qla2x00_set_model_info()
2554 strncpy(ha->model_desc, in qla2x00_set_model_info()
2556 sizeof(ha->model_desc) - 1); in qla2x00_set_model_info()
2558 index = (ha->pdev->subsystem_device & 0xff); in qla2x00_set_model_info()
2560 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && in qla2x00_set_model_info()
2562 strcpy(ha->model_number, in qla2x00_set_model_info()
2564 strncpy(ha->model_desc, in qla2x00_set_model_info()
2566 sizeof(ha->model_desc) - 1); in qla2x00_set_model_info()
2568 strcpy(ha->model_number, def); in qla2x00_set_model_info()
2571 if (IS_FWI2_CAPABLE(ha)) in qla2x00_set_model_info()
2572 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, in qla2x00_set_model_info()
2573 sizeof(ha->model_desc)); in qla2x00_set_model_info()
2582 struct qla_hw_data *ha = vha->hw; in qla2xxx_nvram_wwn_from_ofw() local
2583 struct pci_dev *pdev = ha->pdev; in qla2xxx_nvram_wwn_from_ofw()
2618 struct qla_hw_data *ha = vha->hw; in qla2x00_nvram_config() local
2619 init_cb_t *icb = ha->init_cb; in qla2x00_nvram_config()
2620 nvram_t *nv = ha->nvram; in qla2x00_nvram_config()
2621 uint8_t *ptr = ha->nvram; in qla2x00_nvram_config()
2622 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_nvram_config()
2627 ha->nvram_size = sizeof(nvram_t); in qla2x00_nvram_config()
2628 ha->nvram_base = 0; in qla2x00_nvram_config()
2629 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) in qla2x00_nvram_config()
2631 ha->nvram_base = 0x80; in qla2x00_nvram_config()
2634 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); in qla2x00_nvram_config()
2635 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) in qla2x00_nvram_config()
2641 (uint8_t *)nv, ha->nvram_size); in qla2x00_nvram_config()
2658 memset(nv, 0, ha->nvram_size); in qla2x00_nvram_config()
2661 if (IS_QLA23XX(ha)) { in qla2x00_nvram_config()
2668 } else if (IS_QLA2200(ha)) { in qla2x00_nvram_config()
2674 } else if (IS_QLA2100(ha)) { in qla2x00_nvram_config()
2714 if (IS_QLA23XX(ha)) in qla2x00_nvram_config()
2720 memset(icb, 0, ha->init_cb_size); in qla2x00_nvram_config()
2730 if (IS_QLA23XX(ha)) { in qla2x00_nvram_config()
2736 if (IS_QLA2300(ha)) { in qla2x00_nvram_config()
2737 if (ha->fb_rev == FPM_2310) { in qla2x00_nvram_config()
2738 strcpy(ha->model_number, "QLA2310"); in qla2x00_nvram_config()
2740 strcpy(ha->model_number, "QLA2300"); in qla2x00_nvram_config()
2746 } else if (IS_QLA2200(ha)) { in qla2x00_nvram_config()
2758 strcpy(ha->model_number, "QLA22xx"); in qla2x00_nvram_config()
2760 strcpy(ha->model_number, "QLA2100"); in qla2x00_nvram_config()
2804 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); in qla2x00_nvram_config()
2806 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) in qla2x00_nvram_config()
2807 ha->flags.disable_risc_code_load = 0; in qla2x00_nvram_config()
2808 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); in qla2x00_nvram_config()
2809 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); in qla2x00_nvram_config()
2810 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); in qla2x00_nvram_config()
2811 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; in qla2x00_nvram_config()
2812 ha->flags.disable_serdes = 0; in qla2x00_nvram_config()
2814 ha->operating_mode = in qla2x00_nvram_config()
2817 memcpy(ha->fw_seriallink_options, nv->seriallink_options, in qla2x00_nvram_config()
2818 sizeof(ha->fw_seriallink_options)); in qla2x00_nvram_config()
2821 ha->serial0 = icb->port_name[5]; in qla2x00_nvram_config()
2822 ha->serial1 = icb->port_name[6]; in qla2x00_nvram_config()
2823 ha->serial2 = icb->port_name[7]; in qla2x00_nvram_config()
2829 ha->retry_count = nv->retry_count; in qla2x00_nvram_config()
2836 ha->login_timeout = nv->login_timeout; in qla2x00_nvram_config()
2840 ha->r_a_tov = 100; in qla2x00_nvram_config()
2842 ha->loop_reset_delay = nv->reset_delay; in qla2x00_nvram_config()
2855 ha->loop_down_abort_time = in qla2x00_nvram_config()
2858 ha->link_down_timeout = nv->link_down_timeout; in qla2x00_nvram_config()
2859 ha->loop_down_abort_time = in qla2x00_nvram_config()
2860 (LOOP_DOWN_TIME - ha->link_down_timeout); in qla2x00_nvram_config()
2866 ha->port_down_retry_count = nv->port_down_retry_count; in qla2x00_nvram_config()
2868 ha->port_down_retry_count = qlport_down_retry; in qla2x00_nvram_config()
2870 ha->login_retry_count = nv->retry_count; in qla2x00_nvram_config()
2871 if (ha->port_down_retry_count == nv->port_down_retry_count && in qla2x00_nvram_config()
2872 ha->port_down_retry_count > 3) in qla2x00_nvram_config()
2873 ha->login_retry_count = ha->port_down_retry_count; in qla2x00_nvram_config()
2874 else if (ha->port_down_retry_count > (int)ha->login_retry_count) in qla2x00_nvram_config()
2875 ha->login_retry_count = ha->port_down_retry_count; in qla2x00_nvram_config()
2877 ha->login_retry_count = ql2xloginretrycount; in qla2x00_nvram_config()
2884 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { in qla2x00_nvram_config()
2897 ha->zio_mode = icb->add_firmware_options[0] & in qla2x00_nvram_config()
2899 ha->zio_timer = icb->interrupt_delay_timer ? in qla2x00_nvram_config()
2905 if (ha->zio_mode != QLA_ZIO_DISABLED) { in qla2x00_nvram_config()
2906 ha->zio_mode = QLA_ZIO_MODE_6; in qla2x00_nvram_config()
2910 ha->zio_mode, ha->zio_timer * 100); in qla2x00_nvram_config()
2912 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; in qla2x00_nvram_config()
2913 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; in qla2x00_nvram_config()
2983 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_loop() local
3010 if (ha->current_topology == ISP_CFG_FL && in qla2x00_configure_loop()
3015 } else if (ha->current_topology == ISP_CFG_F && in qla2x00_configure_loop()
3021 } else if (ha->current_topology == ISP_CFG_N) { in qla2x00_configure_loop()
3106 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_local_loop() local
3113 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); in qla2x00_configure_local_loop()
3114 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, in qla2x00_configure_local_loop()
3122 (uint8_t *)ha->gid_list, in qla2x00_configure_local_loop()
3152 id_iter = (char *)ha->gid_list; in qla2x00_configure_local_loop()
3157 if (IS_QLA2100(ha) || IS_QLA2200(ha)) in qla2x00_configure_local_loop()
3163 id_iter += ha->gid_list_info_size; in qla2x00_configure_local_loop()
3233 fcport->fp_speed = ha->link_data_rate; in qla2x00_configure_local_loop()
3256 struct qla_hw_data *ha = vha->hw; in qla2x00_iidma_fcport() local
3258 if (!IS_IIDMA_CAPABLE(ha)) in qla2x00_iidma_fcport()
3265 fcport->fp_speed > ha->link_data_rate) in qla2x00_iidma_fcport()
3277 qla2x00_get_link_speed_str(ha, fcport->fp_speed), in qla2x00_iidma_fcport()
3375 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_fabric() local
3376 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); in qla2x00_configure_fabric()
3380 if (IS_FWI2_CAPABLE(ha)) in qla2x00_configure_fabric()
3401 if (IS_FWI2_CAPABLE(ha)) in qla2x00_configure_fabric()
3405 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, in qla2x00_configure_fabric()
3480 ha->isp_ops->fabric_logout(vha, in qla2x00_configure_fabric()
3514 next_loopid = ha->min_external_loopid; in qla2x00_configure_fabric()
3654 struct qla_hw_data *ha = vha->hw; in qla2x00_find_all_fabric_devs() local
3655 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); in qla2x00_find_all_fabric_devs()
3660 if (!ha->swl) in qla2x00_find_all_fabric_devs()
3661 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t), in qla2x00_find_all_fabric_devs()
3663 swl = ha->swl; in qla2x00_find_all_fabric_devs()
3669 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); in qla2x00_find_all_fabric_devs()
3700 loop_id = ha->min_external_loopid; in qla2x00_find_all_fabric_devs()
3701 for (; loop_id <= ha->max_loop_id; loop_id++) { in qla2x00_find_all_fabric_devs()
3705 if (ha->current_topology == ISP_CFG_FL && in qla2x00_find_all_fabric_devs()
3773 (vha->d_id.b24 & 0xffff00)) && ha->current_topology == in qla2x00_find_all_fabric_devs()
3852 ha->isp_ops->fabric_logout(vha, fcport->loop_id, in qla2x00_find_all_fabric_devs()
3902 struct qla_hw_data *ha = vha->hw; in qla2x00_find_new_loop_id() local
3907 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_find_new_loop_id()
3909 dev->loop_id = find_first_zero_bit(ha->loop_id_map, in qla2x00_find_new_loop_id()
3916 set_bit(dev->loop_id, ha->loop_id_map); in qla2x00_find_new_loop_id()
3918 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_find_new_loop_id()
3955 struct qla_hw_data *ha = vha->hw; in qla2x00_fabric_dev_login() local
3960 if (IS_ALOGIO_CAPABLE(ha)) { in qla2x00_fabric_dev_login()
3978 ha->isp_ops->fabric_logout(vha, fcport->loop_id, in qla2x00_fabric_dev_login()
4015 struct qla_hw_data *ha = vha->hw; in qla2x00_fabric_login() local
4028 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id, in qla2x00_fabric_login()
4081 if (IS_FWI2_CAPABLE(ha)) { in qla2x00_fabric_login()
4106 ha->isp_ops->fabric_logout(vha, fcport->loop_id, in qla2x00_fabric_login()
4124 ha->isp_ops->fabric_logout(vha, fcport->loop_id, in qla2x00_fabric_login()
4245 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) in qla2x00_perform_loop_resync() argument
4249 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) { in qla2x00_perform_loop_resync()
4251 atomic_set(&ha->loop_down_timer, 0); in qla2x00_perform_loop_resync()
4252 if (!(ha->device_flags & DFLG_NO_CABLE)) { in qla2x00_perform_loop_resync()
4253 atomic_set(&ha->loop_state, LOOP_UP); in qla2x00_perform_loop_resync()
4254 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); in qla2x00_perform_loop_resync()
4255 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); in qla2x00_perform_loop_resync()
4256 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); in qla2x00_perform_loop_resync()
4258 rval = qla2x00_loop_resync(ha); in qla2x00_perform_loop_resync()
4260 atomic_set(&ha->loop_state, LOOP_DEAD); in qla2x00_perform_loop_resync()
4262 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); in qla2x00_perform_loop_resync()
4273 struct qla_hw_data *ha = base_vha->hw; in qla2x00_update_fcports() local
4276 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_update_fcports()
4283 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_update_fcports()
4293 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_update_fcports()
4298 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_update_fcports()
4305 struct qla_hw_data *ha = vha->hw; in qla83xx_reset_ownership() local
4311 if (IS_QLA8044(ha)) { in qla83xx_reset_ownership()
4326 (i != ha->portnum)) { in qla83xx_reset_ownership()
4336 ((i + 8) != ha->portnum)) { in qla83xx_reset_ownership()
4346 drv_presence_mask = ~((1 << (ha->portnum)) | in qla83xx_reset_ownership()
4354 (ha->portnum < fcoe_other_function)) { in qla83xx_reset_ownership()
4357 ha->flags.nic_core_reset_owner = 1; in qla83xx_reset_ownership()
4365 struct qla_hw_data *ha = vha->hw; in __qla83xx_set_drv_ack() local
4370 drv_ack |= (1 << ha->portnum); in __qla83xx_set_drv_ack()
4381 struct qla_hw_data *ha = vha->hw; in __qla83xx_clear_drv_ack() local
4386 drv_ack &= ~(1 << ha->portnum); in __qla83xx_clear_drv_ack()
4420 struct qla_hw_data *ha = vha->hw; in qla83xx_idc_audit() local
4425 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000); in qla83xx_idc_audit()
4426 idc_audit_reg = (ha->portnum) | in qla83xx_idc_audit()
4427 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8); in qla83xx_idc_audit()
4433 jiffies_to_msecs(ha->idc_audit_ts)) / 1000); in qla83xx_idc_audit()
4434 idc_audit_reg = (ha->portnum) | in qla83xx_idc_audit()
4450 struct qla_hw_data *ha = vha->hw; in qla83xx_initiating_reset() local
4463 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) { in qla83xx_initiating_reset()
4504 struct qla_hw_data *ha = vha->hw; in qla83xx_check_driver_presence() local
4507 if (drv_presence & (1 << ha->portnum)) in qla83xx_check_driver_presence()
4517 struct qla_hw_data *ha = vha->hw; in qla83xx_nic_core_reset() local
4533 ha->portnum); in qla83xx_nic_core_reset()
4550 ha->flags.nic_core_hung = 0; in qla83xx_nic_core_reset()
4565 struct qla_hw_data *ha = vha->hw; in qla2xxx_mctp_dump() local
4568 if (!IS_MCTP_CAPABLE(ha)) { in qla2xxx_mctp_dump()
4575 if (!ha->mctp_dump) { in qla2xxx_mctp_dump()
4576 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev, in qla2xxx_mctp_dump()
4577 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL); in qla2xxx_mctp_dump()
4579 if (!ha->mctp_dump) { in qla2xxx_mctp_dump()
4587 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, in qla2xxx_mctp_dump()
4595 vha->host_no, ha->mctp_dump); in qla2xxx_mctp_dump()
4596 ha->mctp_dumped = 1; in qla2xxx_mctp_dump()
4599 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) { in qla2xxx_mctp_dump()
4600 ha->flags.nic_core_reset_hdlr_active = 1; in qla2xxx_mctp_dump()
4609 ha->flags.nic_core_reset_hdlr_active = 0; in qla2xxx_mctp_dump()
4627 struct qla_hw_data *ha = vha->hw; in qla2x00_quiesce_io() local
4631 "Quiescing I/O - ha=%p.\n", ha); in qla2x00_quiesce_io()
4633 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); in qla2x00_quiesce_io()
4637 list_for_each_entry(vp, &ha->vp_list, list) in qla2x00_quiesce_io()
4651 struct qla_hw_data *ha = vha->hw; in qla2x00_abort_isp_cleanup() local
4659 if (!(IS_P3P_TYPE(ha))) in qla2x00_abort_isp_cleanup()
4661 ha->flags.chip_reset_done = 0; in qla2x00_abort_isp_cleanup()
4666 "Performing ISP error recovery - ha=%p.\n", ha); in qla2x00_abort_isp_cleanup()
4672 if (!(IS_P3P_TYPE(ha))) in qla2x00_abort_isp_cleanup()
4673 ha->isp_ops->reset_chip(vha); in qla2x00_abort_isp_cleanup()
4680 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4681 list_for_each_entry(vp, &ha->vp_list, list) { in qla2x00_abort_isp_cleanup()
4683 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4687 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4690 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4700 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4701 list_for_each_entry(vp, &ha->vp_list, list) { in qla2x00_abort_isp_cleanup()
4703 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4708 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4711 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4713 if (!ha->flags.eeh_busy) { in qla2x00_abort_isp_cleanup()
4715 if (IS_P3P_TYPE(ha)) { in qla2x00_abort_isp_cleanup()
4730 ha->chip_reset++; in qla2x00_abort_isp_cleanup()
4750 struct qla_hw_data *ha = vha->hw; in qla2x00_abort_isp() local
4752 struct req_que *req = ha->req_q_map[0]; in qla2x00_abort_isp()
4758 if (IS_QLA8031(ha)) { in qla2x00_abort_isp()
4766 if (unlikely(pci_channel_offline(ha->pdev) && in qla2x00_abort_isp()
4767 ha->flags.pci_channel_io_perm_failure)) { in qla2x00_abort_isp()
4773 ha->isp_ops->get_flash_version(vha, req->ring); in qla2x00_abort_isp()
4775 ha->isp_ops->nvram_config(vha); in qla2x00_abort_isp()
4790 ha->isp_ops->enable_intrs(ha); in qla2x00_abort_isp()
4792 ha->isp_abort_cnt = 0; in qla2x00_abort_isp()
4795 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) in qla2x00_abort_isp()
4797 if (ha->fce) { in qla2x00_abort_isp()
4798 ha->flags.fce_enabled = 1; in qla2x00_abort_isp()
4799 memset(ha->fce, 0, in qla2x00_abort_isp()
4800 fce_calc_size(ha->fce_bufs)); in qla2x00_abort_isp()
4802 ha->fce_dma, ha->fce_bufs, ha->fce_mb, in qla2x00_abort_isp()
4803 &ha->fce_bufs); in qla2x00_abort_isp()
4808 ha->flags.fce_enabled = 0; in qla2x00_abort_isp()
4812 if (ha->eft) { in qla2x00_abort_isp()
4813 memset(ha->eft, 0, EFT_SIZE); in qla2x00_abort_isp()
4815 ha->eft_dma, EFT_NUM_BUFFERS); in qla2x00_abort_isp()
4825 if (ha->isp_abort_cnt == 0) { in qla2x00_abort_isp()
4833 ha->isp_ops->reset_adapter(vha); in qla2x00_abort_isp()
4839 ha->isp_abort_cnt--; in qla2x00_abort_isp()
4842 ha->isp_abort_cnt); in qla2x00_abort_isp()
4846 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; in qla2x00_abort_isp()
4849 "more times.\n", ha->isp_abort_cnt); in qla2x00_abort_isp()
4860 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp()
4861 list_for_each_entry(vp, &ha->vp_list, list) { in qla2x00_abort_isp()
4864 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp()
4868 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp()
4872 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp()
4874 if (IS_QLA8031(ha)) { in qla2x00_abort_isp()
4903 struct qla_hw_data *ha = vha->hw; in qla2x00_restart_isp() local
4904 struct req_que *req = ha->req_q_map[0]; in qla2x00_restart_isp()
4905 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla2x00_restart_isp()
4911 status = ha->isp_ops->chip_diag(vha); in qla2x00_restart_isp()
4918 ha->flags.chip_reset_done = 1; in qla2x00_restart_isp()
4921 qla25xx_init_queues(ha); in qla2x00_restart_isp()
4934 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_restart_isp()
4937 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_restart_isp()
4950 qla25xx_init_queues(struct qla_hw_data *ha) in qla25xx_init_queues() argument
4954 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); in qla25xx_init_queues()
4958 for (i = 1; i < ha->max_rsp_queues; i++) { in qla25xx_init_queues()
4959 rsp = ha->rsp_q_map[i]; in qla25xx_init_queues()
4960 if (rsp && test_bit(i, ha->rsp_qid_map)) { in qla25xx_init_queues()
4973 for (i = 1; i < ha->max_req_queues; i++) { in qla25xx_init_queues()
4974 req = ha->req_q_map[i]; in qla25xx_init_queues()
4975 if (req && test_bit(i, ha->req_qid_map)) { in qla25xx_init_queues()
5003 struct qla_hw_data *ha = vha->hw; in qla2x00_reset_adapter() local
5004 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_reset_adapter()
5007 ha->isp_ops->disable_intrs(ha); in qla2x00_reset_adapter()
5009 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_reset_adapter()
5014 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_reset_adapter()
5021 struct qla_hw_data *ha = vha->hw; in qla24xx_reset_adapter() local
5022 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; in qla24xx_reset_adapter()
5024 if (IS_P3P_TYPE(ha)) in qla24xx_reset_adapter()
5028 ha->isp_ops->disable_intrs(ha); in qla24xx_reset_adapter()
5030 spin_lock_irqsave(&ha->hardware_lock, flags); in qla24xx_reset_adapter()
5035 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla24xx_reset_adapter()
5037 if (IS_NOPOLLING_TYPE(ha)) in qla24xx_reset_adapter()
5038 ha->isp_ops->enable_intrs(ha); in qla24xx_reset_adapter()
5048 struct qla_hw_data *ha = vha->hw; in qla24xx_nvram_wwn_from_ofw() local
5049 struct pci_dev *pdev = ha->pdev; in qla24xx_nvram_wwn_from_ofw()
5074 struct qla_hw_data *ha = vha->hw; in qla24xx_nvram_config() local
5077 icb = (struct init_cb_24xx *)ha->init_cb; in qla24xx_nvram_config()
5078 nv = ha->nvram; in qla24xx_nvram_config()
5081 if (ha->port_no == 0) { in qla24xx_nvram_config()
5082 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; in qla24xx_nvram_config()
5083 ha->vpd_base = FA_NVRAM_VPD0_ADDR; in qla24xx_nvram_config()
5085 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; in qla24xx_nvram_config()
5086 ha->vpd_base = FA_NVRAM_VPD1_ADDR; in qla24xx_nvram_config()
5089 ha->nvram_size = sizeof(struct nvram_24xx); in qla24xx_nvram_config()
5090 ha->vpd_size = FA_NVRAM_VPD_SIZE; in qla24xx_nvram_config()
5093 ha->vpd = ha->nvram + VPD_OFFSET; in qla24xx_nvram_config()
5094 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, in qla24xx_nvram_config()
5095 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); in qla24xx_nvram_config()
5099 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base, in qla24xx_nvram_config()
5100 ha->nvram_size); in qla24xx_nvram_config()
5101 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) in qla24xx_nvram_config()
5107 (uint8_t *)nv, ha->nvram_size); in qla24xx_nvram_config()
5124 memset(nv, 0, ha->nvram_size); in qla24xx_nvram_config()
5132 nv->port_name[1] = 0x00 + ha->port_no + 1; in qla24xx_nvram_config()
5176 memset(icb, 0, ha->init_cb_size); in qla24xx_nvram_config()
5221 ha->flags.disable_risc_code_load = 0; in qla24xx_nvram_config()
5222 ha->flags.enable_lip_reset = 0; in qla24xx_nvram_config()
5223 ha->flags.enable_lip_full_login = in qla24xx_nvram_config()
5225 ha->flags.enable_target_reset = in qla24xx_nvram_config()
5227 ha->flags.enable_led_scheme = 0; in qla24xx_nvram_config()
5228 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; in qla24xx_nvram_config()
5230 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & in qla24xx_nvram_config()
5233 memcpy(ha->fw_seriallink_options24, nv->seriallink_options, in qla24xx_nvram_config()
5234 sizeof(ha->fw_seriallink_options24)); in qla24xx_nvram_config()
5237 ha->serial0 = icb->port_name[5]; in qla24xx_nvram_config()
5238 ha->serial1 = icb->port_name[6]; in qla24xx_nvram_config()
5239 ha->serial2 = icb->port_name[7]; in qla24xx_nvram_config()
5245 ha->retry_count = le16_to_cpu(nv->login_retry_count); in qla24xx_nvram_config()
5252 ha->login_timeout = le16_to_cpu(nv->login_timeout); in qla24xx_nvram_config()
5256 ha->r_a_tov = 100; in qla24xx_nvram_config()
5258 ha->loop_reset_delay = nv->reset_delay; in qla24xx_nvram_config()
5271 ha->loop_down_abort_time = in qla24xx_nvram_config()
5274 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); in qla24xx_nvram_config()
5275 ha->loop_down_abort_time = in qla24xx_nvram_config()
5276 (LOOP_DOWN_TIME - ha->link_down_timeout); in qla24xx_nvram_config()
5280 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); in qla24xx_nvram_config()
5282 ha->port_down_retry_count = qlport_down_retry; in qla24xx_nvram_config()
5285 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); in qla24xx_nvram_config()
5286 if (ha->port_down_retry_count == in qla24xx_nvram_config()
5288 ha->port_down_retry_count > 3) in qla24xx_nvram_config()
5289 ha->login_retry_count = ha->port_down_retry_count; in qla24xx_nvram_config()
5290 else if (ha->port_down_retry_count > (int)ha->login_retry_count) in qla24xx_nvram_config()
5291 ha->login_retry_count = ha->port_down_retry_count; in qla24xx_nvram_config()
5293 ha->login_retry_count = ql2xloginretrycount; in qla24xx_nvram_config()
5297 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & in qla24xx_nvram_config()
5299 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? in qla24xx_nvram_config()
5305 if (ha->zio_mode != QLA_ZIO_DISABLED) { in qla24xx_nvram_config()
5306 ha->zio_mode = QLA_ZIO_MODE_6; in qla24xx_nvram_config()
5310 ha->zio_mode, ha->zio_timer * 100); in qla24xx_nvram_config()
5313 (uint32_t)ha->zio_mode); in qla24xx_nvram_config()
5314 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); in qla24xx_nvram_config()
5335 struct qla_hw_data *ha = vha->hw; in qla24xx_load_risc_flash() local
5336 struct req_que *req = ha->req_q_map[0]; in qla24xx_load_risc_flash()
5375 dlen = (uint32_t)(ha->fw_transfer_size >> 2); in qla24xx_load_risc_flash()
5407 if (!IS_QLA27XX(ha)) in qla24xx_load_risc_flash()
5410 if (ha->fw_dump_template) in qla24xx_load_risc_flash()
5411 vfree(ha->fw_dump_template); in qla24xx_load_risc_flash()
5412 ha->fw_dump_template = NULL; in qla24xx_load_risc_flash()
5413 ha->fw_dump_template_len = 0; in qla24xx_load_risc_flash()
5427 ha->fw_dump_template = vmalloc(dlen); in qla24xx_load_risc_flash()
5428 if (!ha->fw_dump_template) { in qla24xx_load_risc_flash()
5436 dcode = ha->fw_dump_template; in qla24xx_load_risc_flash()
5456 ha->fw_dump_template_len = dlen; in qla24xx_load_risc_flash()
5461 if (ha->fw_dump_template) in qla24xx_load_risc_flash()
5462 vfree(ha->fw_dump_template); in qla24xx_load_risc_flash()
5463 ha->fw_dump_template = NULL; in qla24xx_load_risc_flash()
5464 ha->fw_dump_template_len = 0; in qla24xx_load_risc_flash()
5469 ha->fw_dump_template = vmalloc(dlen); in qla24xx_load_risc_flash()
5470 if (!ha->fw_dump_template) { in qla24xx_load_risc_flash()
5476 dcode = ha->fw_dump_template; in qla24xx_load_risc_flash()
5482 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { in qla24xx_load_risc_flash()
5488 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); in qla24xx_load_risc_flash()
5491 ha->fw_dump_template_len = dlen; in qla24xx_load_risc_flash()
5496 if (ha->fw_dump_template) in qla24xx_load_risc_flash()
5497 vfree(ha->fw_dump_template); in qla24xx_load_risc_flash()
5498 ha->fw_dump_template = NULL; in qla24xx_load_risc_flash()
5499 ha->fw_dump_template_len = 0; in qla24xx_load_risc_flash()
5513 struct qla_hw_data *ha = vha->hw; in qla2x00_load_risc() local
5514 struct req_que *req = ha->req_q_map[0]; in qla2x00_load_risc()
5570 wlen = (uint16_t)(ha->fw_transfer_size >> 1); in qla2x00_load_risc()
5616 struct qla_hw_data *ha = vha->hw; in qla24xx_load_risc_blob() local
5617 struct req_que *req = ha->req_q_map[0]; in qla24xx_load_risc_blob()
5680 dlen = (uint32_t)(ha->fw_transfer_size >> 2); in qla24xx_load_risc_blob()
5710 if (!IS_QLA27XX(ha)) in qla24xx_load_risc_blob()
5713 if (ha->fw_dump_template) in qla24xx_load_risc_blob()
5714 vfree(ha->fw_dump_template); in qla24xx_load_risc_blob()
5715 ha->fw_dump_template = NULL; in qla24xx_load_risc_blob()
5716 ha->fw_dump_template_len = 0; in qla24xx_load_risc_blob()
5730 ha->fw_dump_template = vmalloc(dlen); in qla24xx_load_risc_blob()
5731 if (!ha->fw_dump_template) { in qla24xx_load_risc_blob()
5739 dcode = ha->fw_dump_template; in qla24xx_load_risc_blob()
5758 ha->fw_dump_template_len = dlen; in qla24xx_load_risc_blob()
5763 if (ha->fw_dump_template) in qla24xx_load_risc_blob()
5764 vfree(ha->fw_dump_template); in qla24xx_load_risc_blob()
5765 ha->fw_dump_template = NULL; in qla24xx_load_risc_blob()
5766 ha->fw_dump_template_len = 0; in qla24xx_load_risc_blob()
5771 ha->fw_dump_template = vmalloc(dlen); in qla24xx_load_risc_blob()
5772 if (!ha->fw_dump_template) { in qla24xx_load_risc_blob()
5778 dcode = ha->fw_dump_template; in qla24xx_load_risc_blob()
5784 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { in qla24xx_load_risc_blob()
5790 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); in qla24xx_load_risc_blob()
5793 ha->fw_dump_template_len = dlen; in qla24xx_load_risc_blob()
5798 if (ha->fw_dump_template) in qla24xx_load_risc_blob()
5799 vfree(ha->fw_dump_template); in qla24xx_load_risc_blob()
5800 ha->fw_dump_template = NULL; in qla24xx_load_risc_blob()
5801 ha->fw_dump_template_len = 0; in qla24xx_load_risc_blob()
5830 struct qla_hw_data *ha = vha->hw; in qla81xx_load_risc() local
5841 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); in qla81xx_load_risc()
5847 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) in qla81xx_load_risc()
5852 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); in qla81xx_load_risc()
5857 ha->flags.running_gold_fw = 1; in qla81xx_load_risc()
5865 struct qla_hw_data *ha = vha->hw; in qla2x00_try_to_stop_firmware() local
5867 if (ha->flags.pci_channel_io_perm_failure) in qla2x00_try_to_stop_firmware()
5869 if (!IS_FWI2_CAPABLE(ha)) in qla2x00_try_to_stop_firmware()
5871 if (!ha->fw_major_version) in qla2x00_try_to_stop_firmware()
5877 ha->isp_ops->reset_chip(vha); in qla2x00_try_to_stop_firmware()
5878 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) in qla2x00_try_to_stop_firmware()
5894 struct qla_hw_data *ha = vha->hw; in qla24xx_configure_vhba() local
5895 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); in qla24xx_configure_vhba()
5903 if (ha->flags.cpu_affinity_enabled) in qla24xx_configure_vhba()
5904 req = ha->req_q_map[0]; in qla24xx_configure_vhba()
5917 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, in qla24xx_configure_vhba()
5950 struct qla_hw_data *ha = vha->hw; in qla84xx_get_chip() local
5956 if (cs84xx->bus == ha->pdev->bus) { in qla84xx_get_chip()
5969 cs84xx->bus = ha->pdev->bus; in qla84xx_get_chip()
5992 struct qla_hw_data *ha = vha->hw; in qla84xx_put_chip() local
5993 if (ha->cs84xx) in qla84xx_put_chip()
5994 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); in qla84xx_put_chip()
6002 struct qla_hw_data *ha = vha->hw; in qla84xx_init_chip() local
6004 mutex_lock(&ha->cs84xx->fw_update_mutex); in qla84xx_init_chip()
6008 mutex_unlock(&ha->cs84xx->fw_update_mutex); in qla84xx_init_chip()
6026 struct qla_hw_data *ha = vha->hw; in qla81xx_nvram_config() local
6029 icb = (struct init_cb_81xx *)ha->init_cb; in qla81xx_nvram_config()
6030 nv = ha->nvram; in qla81xx_nvram_config()
6033 ha->nvram_size = sizeof(struct nvram_81xx); in qla81xx_nvram_config()
6034 ha->vpd_size = FA_NVRAM_VPD_SIZE; in qla81xx_nvram_config()
6035 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) in qla81xx_nvram_config()
6036 ha->vpd_size = FA_VPD_SIZE_82XX; in qla81xx_nvram_config()
6039 ha->vpd = ha->nvram + VPD_OFFSET; in qla81xx_nvram_config()
6040 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, in qla81xx_nvram_config()
6041 ha->vpd_size); in qla81xx_nvram_config()
6044 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, in qla81xx_nvram_config()
6045 ha->nvram_size); in qla81xx_nvram_config()
6047 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) in qla81xx_nvram_config()
6053 (uint8_t *)nv, ha->nvram_size); in qla81xx_nvram_config()
6071 memset(nv, 0, ha->nvram_size); in qla81xx_nvram_config()
6078 nv->port_name[1] = 0x00 + ha->port_no + 1; in qla81xx_nvram_config()
6112 nv->enode_mac[5] = 0x06 + ha->port_no + 1; in qla81xx_nvram_config()
6117 if (IS_T10_PI_CAPABLE(ha)) in qla81xx_nvram_config()
6123 memset(icb, 0, ha->init_cb_size); in qla81xx_nvram_config()
6150 icb->enode_mac[5] = 0x06 + ha->port_no + 1; in qla81xx_nvram_config()
6154 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); in qla81xx_nvram_config()
6181 ha->flags.disable_risc_code_load = 0; in qla81xx_nvram_config()
6182 ha->flags.enable_lip_reset = 0; in qla81xx_nvram_config()
6183 ha->flags.enable_lip_full_login = in qla81xx_nvram_config()
6185 ha->flags.enable_target_reset = in qla81xx_nvram_config()
6187 ha->flags.enable_led_scheme = 0; in qla81xx_nvram_config()
6188 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; in qla81xx_nvram_config()
6190 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & in qla81xx_nvram_config()
6194 ha->serial0 = icb->port_name[5]; in qla81xx_nvram_config()
6195 ha->serial1 = icb->port_name[6]; in qla81xx_nvram_config()
6196 ha->serial2 = icb->port_name[7]; in qla81xx_nvram_config()
6202 ha->retry_count = le16_to_cpu(nv->login_retry_count); in qla81xx_nvram_config()
6209 ha->login_timeout = le16_to_cpu(nv->login_timeout); in qla81xx_nvram_config()
6213 ha->r_a_tov = 100; in qla81xx_nvram_config()
6215 ha->loop_reset_delay = nv->reset_delay; in qla81xx_nvram_config()
6228 ha->loop_down_abort_time = in qla81xx_nvram_config()
6231 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); in qla81xx_nvram_config()
6232 ha->loop_down_abort_time = in qla81xx_nvram_config()
6233 (LOOP_DOWN_TIME - ha->link_down_timeout); in qla81xx_nvram_config()
6237 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); in qla81xx_nvram_config()
6239 ha->port_down_retry_count = qlport_down_retry; in qla81xx_nvram_config()
6242 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); in qla81xx_nvram_config()
6243 if (ha->port_down_retry_count == in qla81xx_nvram_config()
6245 ha->port_down_retry_count > 3) in qla81xx_nvram_config()
6246 ha->login_retry_count = ha->port_down_retry_count; in qla81xx_nvram_config()
6247 else if (ha->port_down_retry_count > (int)ha->login_retry_count) in qla81xx_nvram_config()
6248 ha->login_retry_count = ha->port_down_retry_count; in qla81xx_nvram_config()
6250 ha->login_retry_count = ql2xloginretrycount; in qla81xx_nvram_config()
6253 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha))) in qla81xx_nvram_config()
6258 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & in qla81xx_nvram_config()
6260 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? in qla81xx_nvram_config()
6266 if (ha->zio_mode != QLA_ZIO_DISABLED) { in qla81xx_nvram_config()
6267 ha->zio_mode = QLA_ZIO_MODE_6; in qla81xx_nvram_config()
6271 ha->zio_mode, in qla81xx_nvram_config()
6272 ha->zio_timer * 100); in qla81xx_nvram_config()
6275 (uint32_t)ha->zio_mode); in qla81xx_nvram_config()
6276 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); in qla81xx_nvram_config()
6291 struct qla_hw_data *ha = vha->hw; in qla82xx_restart_isp() local
6292 struct req_que *req = ha->req_q_map[0]; in qla82xx_restart_isp()
6293 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla82xx_restart_isp()
6300 ha->flags.chip_reset_done = 1; in qla82xx_restart_isp()
6326 ha->isp_ops->enable_intrs(ha); in qla82xx_restart_isp()
6328 ha->isp_abort_cnt = 0; in qla82xx_restart_isp()
6334 if (ha->fce) { in qla82xx_restart_isp()
6335 ha->flags.fce_enabled = 1; in qla82xx_restart_isp()
6336 memset(ha->fce, 0, in qla82xx_restart_isp()
6337 fce_calc_size(ha->fce_bufs)); in qla82xx_restart_isp()
6339 ha->fce_dma, ha->fce_bufs, ha->fce_mb, in qla82xx_restart_isp()
6340 &ha->fce_bufs); in qla82xx_restart_isp()
6345 ha->flags.fce_enabled = 0; in qla82xx_restart_isp()
6349 if (ha->eft) { in qla82xx_restart_isp()
6350 memset(ha->eft, 0, EFT_SIZE); in qla82xx_restart_isp()
6352 ha->eft_dma, EFT_NUM_BUFFERS); in qla82xx_restart_isp()
6365 spin_lock_irqsave(&ha->vport_slock, flags); in qla82xx_restart_isp()
6366 list_for_each_entry(vp, &ha->vp_list, list) { in qla82xx_restart_isp()
6369 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla82xx_restart_isp()
6373 spin_lock_irqsave(&ha->vport_slock, flags); in qla82xx_restart_isp()
6377 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla82xx_restart_isp()
6390 struct qla_hw_data *ha = vha->hw; in qla81xx_update_fw_options() local
6396 memset(ha->fw_options, 0, sizeof(ha->fw_options)); in qla81xx_update_fw_options()
6397 ha->fw_options[2] |= BIT_9; in qla81xx_update_fw_options()
6398 qla2x00_set_fw_options(vha, ha->fw_options); in qla81xx_update_fw_options()
6428 struct qla_hw_data *ha = vha->hw; in qla24xx_get_fcp_prio() local
6430 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) in qla24xx_get_fcp_prio()
6434 entries = ha->fcp_prio_cfg->num_entries; in qla24xx_get_fcp_prio()
6435 pri_entry = &ha->fcp_prio_cfg->entry[0]; in qla24xx_get_fcp_prio()