Lines Matching refs:ha

51 	struct qla_hw_data *ha = fcport->vha->hw;  in qla2x00_sp_timeout()  local
55 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_sp_timeout()
56 req = ha->req_q_map[0]; in qla2x00_sp_timeout()
61 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_sp_timeout()
81 struct qla_hw_data *ha = vha->hw; in qla2x00_get_async_timeout() local
84 tmo = ha->r_a_tov / 10 * 2; in qla2x00_get_async_timeout()
85 if (IS_QLAFX00(ha)) { in qla2x00_get_async_timeout()
87 } else if (!IS_FWI2_CAPABLE(ha)) { in qla2x00_get_async_timeout()
92 tmo = ha->login_timeout; in qla2x00_get_async_timeout()
420 struct qla_hw_data *ha = vha->hw; in qla24xx_async_abort_command() local
423 spin_lock_irqsave(&ha->hardware_lock, flags); in qla24xx_async_abort_command()
428 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla24xx_async_abort_command()
537 struct qla_hw_data *ha = vha->hw; in qla83xx_nic_core_fw_load() local
546 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT; in qla83xx_nic_core_fw_load()
547 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT; in qla83xx_nic_core_fw_load()
567 if (ha->flags.nic_core_reset_owner) { in qla83xx_nic_core_fw_load()
588 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2)); in qla83xx_nic_core_fw_load()
591 if (ha->flags.nic_core_reset_owner) { in qla83xx_nic_core_fw_load()
620 struct qla_hw_data *ha = vha->hw; in qla2x00_initialize_adapter() local
621 struct req_que *req = ha->req_q_map[0]; in qla2x00_initialize_adapter()
625 ha->flags.chip_reset_done = 0; in qla2x00_initialize_adapter()
627 ha->flags.pci_channel_io_perm_failure = 0; in qla2x00_initialize_adapter()
628 ha->flags.eeh_busy = 0; in qla2x00_initialize_adapter()
636 ha->isp_abort_cnt = 0; in qla2x00_initialize_adapter()
637 ha->beacon_blink_led = 0; in qla2x00_initialize_adapter()
639 set_bit(0, ha->req_qid_map); in qla2x00_initialize_adapter()
640 set_bit(0, ha->rsp_qid_map); in qla2x00_initialize_adapter()
644 rval = ha->isp_ops->pci_config(vha); in qla2x00_initialize_adapter()
651 ha->isp_ops->reset_chip(vha); in qla2x00_initialize_adapter()
660 if (IS_QLA8044(ha)) { in qla2x00_initialize_adapter()
671 ha->isp_ops->get_flash_version(vha, req->ring); in qla2x00_initialize_adapter()
675 ha->isp_ops->nvram_config(vha); in qla2x00_initialize_adapter()
677 if (ha->flags.disable_serdes) { in qla2x00_initialize_adapter()
688 rval = ha->isp_ops->chip_diag(vha); in qla2x00_initialize_adapter()
696 if (IS_QLA84XX(ha)) { in qla2x00_initialize_adapter()
697 ha->cs84xx = qla84xx_get_chip(vha); in qla2x00_initialize_adapter()
698 if (!ha->cs84xx) { in qla2x00_initialize_adapter()
708 ha->flags.chip_reset_done = 1; in qla2x00_initialize_adapter()
710 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { in qla2x00_initialize_adapter()
721 if (IS_QLA8031(ha)) { in qla2x00_initialize_adapter()
728 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) in qla2x00_initialize_adapter()
731 if (IS_P3P_TYPE(ha)) in qla2x00_initialize_adapter()
750 struct qla_hw_data *ha = vha->hw; in qla2100_pci_config() local
751 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2100_pci_config()
753 pci_set_master(ha->pdev); in qla2100_pci_config()
754 pci_try_set_mwi(ha->pdev); in qla2100_pci_config()
756 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla2100_pci_config()
758 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla2100_pci_config()
760 pci_disable_rom(ha->pdev); in qla2100_pci_config()
763 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2100_pci_config()
764 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status); in qla2100_pci_config()
765 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2100_pci_config()
782 struct qla_hw_data *ha = vha->hw; in qla2300_pci_config() local
783 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2300_pci_config()
785 pci_set_master(ha->pdev); in qla2300_pci_config()
786 pci_try_set_mwi(ha->pdev); in qla2300_pci_config()
788 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla2300_pci_config()
791 if (IS_QLA2322(ha) || IS_QLA6322(ha)) in qla2300_pci_config()
793 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla2300_pci_config()
802 if (IS_QLA2300(ha)) { in qla2300_pci_config()
803 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2300_pci_config()
819 ha->fb_rev = RD_FB_CMD_REG(ha, reg); in qla2300_pci_config()
821 if (ha->fb_rev == FPM_2300) in qla2300_pci_config()
822 pci_clear_mwi(ha->pdev); in qla2300_pci_config()
837 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2300_pci_config()
840 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); in qla2300_pci_config()
842 pci_disable_rom(ha->pdev); in qla2300_pci_config()
845 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2300_pci_config()
846 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status); in qla2300_pci_config()
847 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2300_pci_config()
863 struct qla_hw_data *ha = vha->hw; in qla24xx_pci_config() local
864 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; in qla24xx_pci_config()
866 pci_set_master(ha->pdev); in qla24xx_pci_config()
867 pci_try_set_mwi(ha->pdev); in qla24xx_pci_config()
869 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla24xx_pci_config()
872 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla24xx_pci_config()
874 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); in qla24xx_pci_config()
877 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) in qla24xx_pci_config()
878 pcix_set_mmrbc(ha->pdev, 2048); in qla24xx_pci_config()
881 if (pci_is_pcie(ha->pdev)) in qla24xx_pci_config()
882 pcie_set_readrq(ha->pdev, 4096); in qla24xx_pci_config()
884 pci_disable_rom(ha->pdev); in qla24xx_pci_config()
886 ha->chip_revision = ha->pdev->revision; in qla24xx_pci_config()
889 spin_lock_irqsave(&ha->hardware_lock, flags); in qla24xx_pci_config()
890 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status); in qla24xx_pci_config()
891 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla24xx_pci_config()
906 struct qla_hw_data *ha = vha->hw; in qla25xx_pci_config() local
908 pci_set_master(ha->pdev); in qla25xx_pci_config()
909 pci_try_set_mwi(ha->pdev); in qla25xx_pci_config()
911 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla25xx_pci_config()
914 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla25xx_pci_config()
917 if (pci_is_pcie(ha->pdev)) in qla25xx_pci_config()
918 pcie_set_readrq(ha->pdev, 4096); in qla25xx_pci_config()
920 pci_disable_rom(ha->pdev); in qla25xx_pci_config()
922 ha->chip_revision = ha->pdev->revision; in qla25xx_pci_config()
939 struct qla_hw_data *ha = vha->hw; in qla2x00_isp_firmware() local
944 if (ha->flags.disable_risc_code_load) { in qla2x00_isp_firmware()
948 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); in qla2x00_isp_firmware()
973 struct qla_hw_data *ha = vha->hw; in qla2x00_reset_chip() local
974 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_reset_chip()
978 if (unlikely(pci_channel_offline(ha->pdev))) in qla2x00_reset_chip()
981 ha->isp_ops->disable_intrs(ha); in qla2x00_reset_chip()
983 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_reset_chip()
987 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); in qla2x00_reset_chip()
989 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); in qla2x00_reset_chip()
991 if (!IS_QLA2100(ha)) { in qla2x00_reset_chip()
994 if (IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_reset_chip()
1015 if (!IS_QLA2200(ha)) { in qla2x00_reset_chip()
1025 if (IS_QLA2200(ha)) { in qla2x00_reset_chip()
1026 WRT_FB_CMD_REG(ha, reg, 0xa000); in qla2x00_reset_chip()
1027 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ in qla2x00_reset_chip()
1029 WRT_FB_CMD_REG(ha, reg, 0x00fc); in qla2x00_reset_chip()
1033 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) in qla2x00_reset_chip()
1059 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_reset_chip()
1084 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_reset_chip()
1086 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) in qla2x00_reset_chip()
1096 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); in qla2x00_reset_chip()
1099 if (!IS_QLA2100(ha)) { in qla2x00_reset_chip()
1104 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_reset_chip()
1133 struct qla_hw_data *ha = vha->hw; in qla24xx_reset_risc() local
1134 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; in qla24xx_reset_risc()
1140 spin_lock_irqsave(&ha->hardware_lock, flags); in qla24xx_reset_risc()
1152 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
1162 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); in qla24xx_reset_risc()
1178 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
1196 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
1239 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
1246 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla24xx_reset_risc()
1250 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling"); in qla24xx_reset_risc()
1252 if (IS_NOPOLLING_TYPE(ha)) in qla24xx_reset_risc()
1253 ha->isp_ops->enable_intrs(ha); in qla24xx_reset_risc()
1347 struct qla_hw_data *ha = vha->hw; in qla24xx_reset_chip() local
1349 if (pci_channel_offline(ha->pdev) && in qla24xx_reset_chip()
1350 ha->flags.pci_channel_io_perm_failure) { in qla24xx_reset_chip()
1354 ha->isp_ops->disable_intrs(ha); in qla24xx_reset_chip()
1372 struct qla_hw_data *ha = vha->hw; in qla2x00_chip_diag() local
1373 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_chip_diag()
1378 struct req_que *req = ha->req_q_map[0]; in qla2x00_chip_diag()
1386 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_chip_diag()
1414 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_chip_diag()
1415 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); in qla2x00_chip_diag()
1418 data = RD_MAILBOX_REG(ha, reg, 0); in qla2x00_chip_diag()
1430 mb[1] = RD_MAILBOX_REG(ha, reg, 1); in qla2x00_chip_diag()
1431 mb[2] = RD_MAILBOX_REG(ha, reg, 2); in qla2x00_chip_diag()
1432 mb[3] = RD_MAILBOX_REG(ha, reg, 3); in qla2x00_chip_diag()
1433 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); in qla2x00_chip_diag()
1442 ha->product_id[0] = mb[1]; in qla2x00_chip_diag()
1443 ha->product_id[1] = mb[2]; in qla2x00_chip_diag()
1444 ha->product_id[2] = mb[3]; in qla2x00_chip_diag()
1445 ha->product_id[3] = mb[4]; in qla2x00_chip_diag()
1449 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; in qla2x00_chip_diag()
1451 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * in qla2x00_chip_diag()
1454 if (IS_QLA2200(ha) && in qla2x00_chip_diag()
1455 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { in qla2x00_chip_diag()
1459 ha->device_type |= DT_ISP2200A; in qla2x00_chip_diag()
1460 ha->fw_transfer_size = 128; in qla2x00_chip_diag()
1464 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_chip_diag()
1474 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_chip_diag()
1481 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_chip_diag()
1496 struct qla_hw_data *ha = vha->hw; in qla24xx_chip_diag() local
1497 struct req_que *req = ha->req_q_map[0]; in qla24xx_chip_diag()
1499 if (IS_P3P_TYPE(ha)) in qla24xx_chip_diag()
1502 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; in qla24xx_chip_diag()
1524 struct qla_hw_data *ha = vha->hw; in qla2x00_alloc_fw_dump() local
1525 struct req_que *req = ha->req_q_map[0]; in qla2x00_alloc_fw_dump()
1526 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla2x00_alloc_fw_dump()
1528 if (ha->fw_dump) { in qla2x00_alloc_fw_dump()
1534 ha->fw_dumped = 0; in qla2x00_alloc_fw_dump()
1535 ha->fw_dump_cap_flags = 0; in qla2x00_alloc_fw_dump()
1539 if (IS_QLA27XX(ha)) in qla2x00_alloc_fw_dump()
1542 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { in qla2x00_alloc_fw_dump()
1544 } else if (IS_QLA23XX(ha)) { in qla2x00_alloc_fw_dump()
1546 mem_size = (ha->fw_memory_size - 0x11000 + 1) * in qla2x00_alloc_fw_dump()
1548 } else if (IS_FWI2_CAPABLE(ha)) { in qla2x00_alloc_fw_dump()
1549 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) in qla2x00_alloc_fw_dump()
1551 else if (IS_QLA81XX(ha)) in qla2x00_alloc_fw_dump()
1553 else if (IS_QLA25XX(ha)) in qla2x00_alloc_fw_dump()
1558 mem_size = (ha->fw_memory_size - 0x100000 + 1) * in qla2x00_alloc_fw_dump()
1560 if (ha->mqenable) { in qla2x00_alloc_fw_dump()
1561 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) in qla2x00_alloc_fw_dump()
1567 mq_size += ha->max_req_queues * in qla2x00_alloc_fw_dump()
1569 mq_size += ha->max_rsp_queues * in qla2x00_alloc_fw_dump()
1572 if (ha->tgt.atio_ring) in qla2x00_alloc_fw_dump()
1573 mq_size += ha->tgt.atio_q_length * sizeof(request_t); in qla2x00_alloc_fw_dump()
1575 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && in qla2x00_alloc_fw_dump()
1576 !IS_QLA27XX(ha)) in qla2x00_alloc_fw_dump()
1580 if (ha->fce) in qla2x00_alloc_fw_dump()
1581 dma_free_coherent(&ha->pdev->dev, in qla2x00_alloc_fw_dump()
1582 FCE_SIZE, ha->fce, ha->fce_dma); in qla2x00_alloc_fw_dump()
1585 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, in qla2x00_alloc_fw_dump()
1595 ha->fce_mb, &ha->fce_bufs); in qla2x00_alloc_fw_dump()
1599 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, in qla2x00_alloc_fw_dump()
1601 ha->flags.fce_enabled = 0; in qla2x00_alloc_fw_dump()
1608 ha->flags.fce_enabled = 1; in qla2x00_alloc_fw_dump()
1609 ha->fce_dma = tc_dma; in qla2x00_alloc_fw_dump()
1610 ha->fce = tc; in qla2x00_alloc_fw_dump()
1613 if (ha->eft) in qla2x00_alloc_fw_dump()
1614 dma_free_coherent(&ha->pdev->dev, in qla2x00_alloc_fw_dump()
1615 EFT_SIZE, ha->eft, ha->eft_dma); in qla2x00_alloc_fw_dump()
1618 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, in qla2x00_alloc_fw_dump()
1631 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, in qla2x00_alloc_fw_dump()
1639 ha->eft_dma = tc_dma; in qla2x00_alloc_fw_dump()
1640 ha->eft = tc; in qla2x00_alloc_fw_dump()
1644 if (IS_QLA27XX(ha)) { in qla2x00_alloc_fw_dump()
1645 if (!ha->fw_dump_template) { in qla2x00_alloc_fw_dump()
1660 ha->chain_offset = dump_size; in qla2x00_alloc_fw_dump()
1664 ha->fw_dump = vmalloc(dump_size); in qla2x00_alloc_fw_dump()
1665 if (!ha->fw_dump) { in qla2x00_alloc_fw_dump()
1670 if (ha->fce) { in qla2x00_alloc_fw_dump()
1671 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, in qla2x00_alloc_fw_dump()
1672 ha->fce_dma); in qla2x00_alloc_fw_dump()
1673 ha->fce = NULL; in qla2x00_alloc_fw_dump()
1674 ha->fce_dma = 0; in qla2x00_alloc_fw_dump()
1677 if (ha->eft) { in qla2x00_alloc_fw_dump()
1678 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft, in qla2x00_alloc_fw_dump()
1679 ha->eft_dma); in qla2x00_alloc_fw_dump()
1680 ha->eft = NULL; in qla2x00_alloc_fw_dump()
1681 ha->eft_dma = 0; in qla2x00_alloc_fw_dump()
1685 ha->fw_dump_len = dump_size; in qla2x00_alloc_fw_dump()
1689 if (IS_QLA27XX(ha)) in qla2x00_alloc_fw_dump()
1692 ha->fw_dump->signature[0] = 'Q'; in qla2x00_alloc_fw_dump()
1693 ha->fw_dump->signature[1] = 'L'; in qla2x00_alloc_fw_dump()
1694 ha->fw_dump->signature[2] = 'G'; in qla2x00_alloc_fw_dump()
1695 ha->fw_dump->signature[3] = 'C'; in qla2x00_alloc_fw_dump()
1696 ha->fw_dump->version = htonl(1); in qla2x00_alloc_fw_dump()
1698 ha->fw_dump->fixed_size = htonl(fixed_size); in qla2x00_alloc_fw_dump()
1699 ha->fw_dump->mem_size = htonl(mem_size); in qla2x00_alloc_fw_dump()
1700 ha->fw_dump->req_q_size = htonl(req_q_size); in qla2x00_alloc_fw_dump()
1701 ha->fw_dump->rsp_q_size = htonl(rsp_q_size); in qla2x00_alloc_fw_dump()
1703 ha->fw_dump->eft_size = htonl(eft_size); in qla2x00_alloc_fw_dump()
1704 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma)); in qla2x00_alloc_fw_dump()
1705 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma)); in qla2x00_alloc_fw_dump()
1707 ha->fw_dump->header_size = in qla2x00_alloc_fw_dump()
1759 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) in qla2x00_alloc_outstanding_cmds() argument
1765 if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase && in qla2x00_alloc_outstanding_cmds()
1769 if (ha->fw_xcb_count <= ha->fw_iocb_count) in qla2x00_alloc_outstanding_cmds()
1770 req->num_outstanding_cmds = ha->fw_xcb_count; in qla2x00_alloc_outstanding_cmds()
1772 req->num_outstanding_cmds = ha->fw_iocb_count; in qla2x00_alloc_outstanding_cmds()
1810 struct qla_hw_data *ha = vha->hw; in qla2x00_setup_chip() local
1811 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_setup_chip()
1815 if (IS_P3P_TYPE(ha)) { in qla2x00_setup_chip()
1816 rval = ha->isp_ops->load_risc(vha, &srisc_address); in qla2x00_setup_chip()
1824 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { in qla2x00_setup_chip()
1826 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_setup_chip()
1829 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_setup_chip()
1835 rval = ha->isp_ops->load_risc(vha, &srisc_address); in qla2x00_setup_chip()
1850 fw_major_version = ha->fw_major_version; in qla2x00_setup_chip()
1851 if (IS_P3P_TYPE(ha)) in qla2x00_setup_chip()
1857 ha->flags.npiv_supported = 0; in qla2x00_setup_chip()
1858 if (IS_QLA2XXX_MIDTYPE(ha) && in qla2x00_setup_chip()
1859 (ha->fw_attributes & BIT_2)) { in qla2x00_setup_chip()
1860 ha->flags.npiv_supported = 1; in qla2x00_setup_chip()
1861 if ((!ha->max_npiv_vports) || in qla2x00_setup_chip()
1862 ((ha->max_npiv_vports + 1) % in qla2x00_setup_chip()
1864 ha->max_npiv_vports = in qla2x00_setup_chip()
1868 &ha->fw_xcb_count, NULL, &ha->fw_iocb_count, in qla2x00_setup_chip()
1869 &ha->max_npiv_vports, NULL); in qla2x00_setup_chip()
1875 rval = qla2x00_alloc_outstanding_cmds(ha, in qla2x00_setup_chip()
1881 && !(IS_P3P_TYPE(ha))) in qla2x00_setup_chip()
1894 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { in qla2x00_setup_chip()
1896 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_setup_chip()
1897 if (IS_QLA2300(ha)) in qla2x00_setup_chip()
1904 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_setup_chip()
1907 if (IS_QLA27XX(ha)) in qla2x00_setup_chip()
1908 ha->flags.fac_supported = 1; in qla2x00_setup_chip()
1909 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { in qla2x00_setup_chip()
1914 ha->flags.fac_supported = 1; in qla2x00_setup_chip()
1915 ha->fdt_block_size = size << 2; in qla2x00_setup_chip()
1919 ha->fw_major_version, ha->fw_minor_version, in qla2x00_setup_chip()
1920 ha->fw_subminor_version); in qla2x00_setup_chip()
1922 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { in qla2x00_setup_chip()
1923 ha->flags.fac_supported = 0; in qla2x00_setup_chip()
1972 struct qla_hw_data *ha = vha->hw; in qla2x00_update_fw_options() local
1974 memset(ha->fw_options, 0, sizeof(ha->fw_options)); in qla2x00_update_fw_options()
1975 qla2x00_get_fw_options(vha, ha->fw_options); in qla2x00_update_fw_options()
1977 if (IS_QLA2100(ha) || IS_QLA2200(ha)) in qla2x00_update_fw_options()
1984 (uint8_t *)&ha->fw_seriallink_options, in qla2x00_update_fw_options()
1985 sizeof(ha->fw_seriallink_options)); in qla2x00_update_fw_options()
1987 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; in qla2x00_update_fw_options()
1988 if (ha->fw_seriallink_options[3] & BIT_2) { in qla2x00_update_fw_options()
1989 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; in qla2x00_update_fw_options()
1992 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); in qla2x00_update_fw_options()
1993 emphasis = (ha->fw_seriallink_options[2] & in qla2x00_update_fw_options()
1995 tx_sens = ha->fw_seriallink_options[0] & in qla2x00_update_fw_options()
1997 rx_sens = (ha->fw_seriallink_options[0] & in qla2x00_update_fw_options()
1999 ha->fw_options[10] = (emphasis << 14) | (swing << 8); in qla2x00_update_fw_options()
2000 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { in qla2x00_update_fw_options()
2003 ha->fw_options[10] |= (tx_sens << 4) | rx_sens; in qla2x00_update_fw_options()
2004 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) in qla2x00_update_fw_options()
2005 ha->fw_options[10] |= BIT_5 | in qla2x00_update_fw_options()
2010 swing = (ha->fw_seriallink_options[2] & in qla2x00_update_fw_options()
2012 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); in qla2x00_update_fw_options()
2013 tx_sens = ha->fw_seriallink_options[1] & in qla2x00_update_fw_options()
2015 rx_sens = (ha->fw_seriallink_options[1] & in qla2x00_update_fw_options()
2017 ha->fw_options[11] = (emphasis << 14) | (swing << 8); in qla2x00_update_fw_options()
2018 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { in qla2x00_update_fw_options()
2021 ha->fw_options[11] |= (tx_sens << 4) | rx_sens; in qla2x00_update_fw_options()
2022 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) in qla2x00_update_fw_options()
2023 ha->fw_options[11] |= BIT_5 | in qla2x00_update_fw_options()
2030 ha->fw_options[3] |= BIT_13; in qla2x00_update_fw_options()
2033 if (ha->flags.enable_led_scheme) in qla2x00_update_fw_options()
2034 ha->fw_options[2] |= BIT_12; in qla2x00_update_fw_options()
2037 if (IS_QLA6312(ha)) in qla2x00_update_fw_options()
2038 ha->fw_options[2] |= BIT_13; in qla2x00_update_fw_options()
2041 qla2x00_set_fw_options(vha, ha->fw_options); in qla2x00_update_fw_options()
2048 struct qla_hw_data *ha = vha->hw; in qla24xx_update_fw_options() local
2050 if (IS_P3P_TYPE(ha)) in qla24xx_update_fw_options()
2054 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) in qla24xx_update_fw_options()
2058 le16_to_cpu(ha->fw_seriallink_options24[1]), in qla24xx_update_fw_options()
2059 le16_to_cpu(ha->fw_seriallink_options24[2]), in qla24xx_update_fw_options()
2060 le16_to_cpu(ha->fw_seriallink_options24[3])); in qla24xx_update_fw_options()
2070 struct qla_hw_data *ha = vha->hw; in qla2x00_config_rings() local
2071 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_config_rings()
2072 struct req_que *req = ha->req_q_map[0]; in qla2x00_config_rings()
2073 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla2x00_config_rings()
2076 ha->init_cb->request_q_outpointer = cpu_to_le16(0); in qla2x00_config_rings()
2077 ha->init_cb->response_q_inpointer = cpu_to_le16(0); in qla2x00_config_rings()
2078 ha->init_cb->request_q_length = cpu_to_le16(req->length); in qla2x00_config_rings()
2079 ha->init_cb->response_q_length = cpu_to_le16(rsp->length); in qla2x00_config_rings()
2080 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); in qla2x00_config_rings()
2081 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); in qla2x00_config_rings()
2082 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); in qla2x00_config_rings()
2083 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); in qla2x00_config_rings()
2085 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); in qla2x00_config_rings()
2086 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); in qla2x00_config_rings()
2087 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0); in qla2x00_config_rings()
2088 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0); in qla2x00_config_rings()
2089 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ in qla2x00_config_rings()
2095 struct qla_hw_data *ha = vha->hw; in qla24xx_config_rings() local
2096 device_reg_t *reg = ISP_QUE_REG(ha, 0); in qla24xx_config_rings()
2097 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; in qla24xx_config_rings()
2101 struct req_que *req = ha->req_q_map[0]; in qla24xx_config_rings()
2102 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla24xx_config_rings()
2105 icb = (struct init_cb_24xx *)ha->init_cb; in qla24xx_config_rings()
2117 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); in qla24xx_config_rings()
2118 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); in qla24xx_config_rings()
2119 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); in qla24xx_config_rings()
2121 if (IS_SHADOW_REG_CAPABLE(ha)) in qla24xx_config_rings()
2124 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { in qla24xx_config_rings()
2127 if (ha->flags.msix_enabled) { in qla24xx_config_rings()
2128 msix = &ha->msix_entries[1]; in qla24xx_config_rings()
2142 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && in qla24xx_config_rings()
2143 (ha->flags.msix_enabled)) { in qla24xx_config_rings()
2145 ha->flags.disable_msix_handshake = 1; in qla24xx_config_rings()
2184 struct qla_hw_data *ha = vha->hw; in qla2x00_init_rings() local
2188 (struct mid_init_cb_24xx *) ha->init_cb; in qla2x00_init_rings()
2190 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_init_rings()
2193 for (que = 0; que < ha->max_req_queues; que++) { in qla2x00_init_rings()
2194 req = ha->req_q_map[que]; in qla2x00_init_rings()
2195 if (!req || !test_bit(que, ha->req_qid_map)) in qla2x00_init_rings()
2210 for (que = 0; que < ha->max_rsp_queues; que++) { in qla2x00_init_rings()
2211 rsp = ha->rsp_q_map[que]; in qla2x00_init_rings()
2212 if (!rsp || !test_bit(que, ha->rsp_qid_map)) in qla2x00_init_rings()
2217 if (IS_QLAFX00(ha)) in qla2x00_init_rings()
2223 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; in qla2x00_init_rings()
2224 ha->tgt.atio_ring_index = 0; in qla2x00_init_rings()
2228 ha->isp_ops->config_rings(vha); in qla2x00_init_rings()
2230 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_init_rings()
2234 if (IS_QLAFX00(ha)) { in qla2x00_init_rings()
2235 rval = qlafx00_init_firmware(vha, ha->init_cb_size); in qla2x00_init_rings()
2240 ha->isp_ops->update_fw_options(vha); in qla2x00_init_rings()
2242 if (ha->flags.npiv_supported) { in qla2x00_init_rings()
2243 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) in qla2x00_init_rings()
2244 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; in qla2x00_init_rings()
2245 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); in qla2x00_init_rings()
2248 if (IS_FWI2_CAPABLE(ha)) { in qla2x00_init_rings()
2251 cpu_to_le16(ha->fw_xcb_count); in qla2x00_init_rings()
2253 if (IS_DPORT_CAPABLE(ha)) in qla2x00_init_rings()
2257 ha->flags.fawwpn_enabled = in qla2x00_init_rings()
2260 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); in qla2x00_init_rings()
2263 rval = qla2x00_init_firmware(vha, ha->init_cb_size); in qla2x00_init_rings()
2290 struct qla_hw_data *ha = vha->hw; in qla2x00_fw_ready() local
2298 if (IS_P3P_TYPE(ha)) in qla2x00_fw_ready()
2307 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { in qla2x00_fw_ready()
2329 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { in qla2x00_fw_ready()
2360 qla2x00_get_retry_cnt(vha, &ha->retry_count, in qla2x00_fw_ready()
2361 &ha->login_timeout, &ha->r_a_tov); in qla2x00_fw_ready()
2385 ha->flags.isp82xx_fw_hung) in qla2x00_fw_ready()
2432 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_hba() local
2434 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); in qla2x00_configure_hba()
2440 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || in qla2x00_configure_hba()
2441 IS_CNA_CAPABLE(ha) || in qla2x00_configure_hba()
2448 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) && in qla2x00_configure_hba()
2469 ha->min_external_loopid = SNS_FIRST_LOOP_ID; in qla2x00_configure_hba()
2470 ha->operating_mode = LOOP; in qla2x00_configure_hba()
2471 ha->switch_cap = 0; in qla2x00_configure_hba()
2476 ha->current_topology = ISP_CFG_NL; in qla2x00_configure_hba()
2482 ha->switch_cap = sw_cap; in qla2x00_configure_hba()
2483 ha->current_topology = ISP_CFG_FL; in qla2x00_configure_hba()
2489 ha->operating_mode = P2P; in qla2x00_configure_hba()
2490 ha->current_topology = ISP_CFG_N; in qla2x00_configure_hba()
2496 ha->switch_cap = sw_cap; in qla2x00_configure_hba()
2497 ha->operating_mode = P2P; in qla2x00_configure_hba()
2498 ha->current_topology = ISP_CFG_F; in qla2x00_configure_hba()
2505 ha->current_topology = ISP_CFG_NL; in qla2x00_configure_hba()
2516 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_configure_hba()
2518 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_configure_hba()
2534 struct qla_hw_data *ha = vha->hw; in qla2x00_set_model_info() local
2535 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && in qla2x00_set_model_info()
2536 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha); in qla2x00_set_model_info()
2539 strncpy(ha->model_number, model, len); in qla2x00_set_model_info()
2540 st = en = ha->model_number; in qla2x00_set_model_info()
2548 index = (ha->pdev->subsystem_device & 0xff); in qla2x00_set_model_info()
2550 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && in qla2x00_set_model_info()
2552 strncpy(ha->model_desc, in qla2x00_set_model_info()
2554 sizeof(ha->model_desc) - 1); in qla2x00_set_model_info()
2556 index = (ha->pdev->subsystem_device & 0xff); in qla2x00_set_model_info()
2558 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && in qla2x00_set_model_info()
2560 strcpy(ha->model_number, in qla2x00_set_model_info()
2562 strncpy(ha->model_desc, in qla2x00_set_model_info()
2564 sizeof(ha->model_desc) - 1); in qla2x00_set_model_info()
2566 strcpy(ha->model_number, def); in qla2x00_set_model_info()
2569 if (IS_FWI2_CAPABLE(ha)) in qla2x00_set_model_info()
2570 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, in qla2x00_set_model_info()
2571 sizeof(ha->model_desc)); in qla2x00_set_model_info()
2580 struct qla_hw_data *ha = vha->hw; in qla2xxx_nvram_wwn_from_ofw() local
2581 struct pci_dev *pdev = ha->pdev; in qla2xxx_nvram_wwn_from_ofw()
2616 struct qla_hw_data *ha = vha->hw; in qla2x00_nvram_config() local
2617 init_cb_t *icb = ha->init_cb; in qla2x00_nvram_config()
2618 nvram_t *nv = ha->nvram; in qla2x00_nvram_config()
2619 uint8_t *ptr = ha->nvram; in qla2x00_nvram_config()
2620 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_nvram_config()
2625 ha->nvram_size = sizeof(nvram_t); in qla2x00_nvram_config()
2626 ha->nvram_base = 0; in qla2x00_nvram_config()
2627 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) in qla2x00_nvram_config()
2629 ha->nvram_base = 0x80; in qla2x00_nvram_config()
2632 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); in qla2x00_nvram_config()
2633 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) in qla2x00_nvram_config()
2639 (uint8_t *)nv, ha->nvram_size); in qla2x00_nvram_config()
2656 memset(nv, 0, ha->nvram_size); in qla2x00_nvram_config()
2659 if (IS_QLA23XX(ha)) { in qla2x00_nvram_config()
2666 } else if (IS_QLA2200(ha)) { in qla2x00_nvram_config()
2672 } else if (IS_QLA2100(ha)) { in qla2x00_nvram_config()
2712 if (IS_QLA23XX(ha)) in qla2x00_nvram_config()
2718 memset(icb, 0, ha->init_cb_size); in qla2x00_nvram_config()
2728 if (IS_QLA23XX(ha)) { in qla2x00_nvram_config()
2734 if (IS_QLA2300(ha)) { in qla2x00_nvram_config()
2735 if (ha->fb_rev == FPM_2310) { in qla2x00_nvram_config()
2736 strcpy(ha->model_number, "QLA2310"); in qla2x00_nvram_config()
2738 strcpy(ha->model_number, "QLA2300"); in qla2x00_nvram_config()
2744 } else if (IS_QLA2200(ha)) { in qla2x00_nvram_config()
2756 strcpy(ha->model_number, "QLA22xx"); in qla2x00_nvram_config()
2758 strcpy(ha->model_number, "QLA2100"); in qla2x00_nvram_config()
2802 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); in qla2x00_nvram_config()
2804 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) in qla2x00_nvram_config()
2805 ha->flags.disable_risc_code_load = 0; in qla2x00_nvram_config()
2806 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); in qla2x00_nvram_config()
2807 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); in qla2x00_nvram_config()
2808 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); in qla2x00_nvram_config()
2809 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; in qla2x00_nvram_config()
2810 ha->flags.disable_serdes = 0; in qla2x00_nvram_config()
2812 ha->operating_mode = in qla2x00_nvram_config()
2815 memcpy(ha->fw_seriallink_options, nv->seriallink_options, in qla2x00_nvram_config()
2816 sizeof(ha->fw_seriallink_options)); in qla2x00_nvram_config()
2819 ha->serial0 = icb->port_name[5]; in qla2x00_nvram_config()
2820 ha->serial1 = icb->port_name[6]; in qla2x00_nvram_config()
2821 ha->serial2 = icb->port_name[7]; in qla2x00_nvram_config()
2827 ha->retry_count = nv->retry_count; in qla2x00_nvram_config()
2834 ha->login_timeout = nv->login_timeout; in qla2x00_nvram_config()
2838 ha->r_a_tov = 100; in qla2x00_nvram_config()
2840 ha->loop_reset_delay = nv->reset_delay; in qla2x00_nvram_config()
2853 ha->loop_down_abort_time = in qla2x00_nvram_config()
2856 ha->link_down_timeout = nv->link_down_timeout; in qla2x00_nvram_config()
2857 ha->loop_down_abort_time = in qla2x00_nvram_config()
2858 (LOOP_DOWN_TIME - ha->link_down_timeout); in qla2x00_nvram_config()
2864 ha->port_down_retry_count = nv->port_down_retry_count; in qla2x00_nvram_config()
2866 ha->port_down_retry_count = qlport_down_retry; in qla2x00_nvram_config()
2868 ha->login_retry_count = nv->retry_count; in qla2x00_nvram_config()
2869 if (ha->port_down_retry_count == nv->port_down_retry_count && in qla2x00_nvram_config()
2870 ha->port_down_retry_count > 3) in qla2x00_nvram_config()
2871 ha->login_retry_count = ha->port_down_retry_count; in qla2x00_nvram_config()
2872 else if (ha->port_down_retry_count > (int)ha->login_retry_count) in qla2x00_nvram_config()
2873 ha->login_retry_count = ha->port_down_retry_count; in qla2x00_nvram_config()
2875 ha->login_retry_count = ql2xloginretrycount; in qla2x00_nvram_config()
2882 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { in qla2x00_nvram_config()
2895 ha->zio_mode = icb->add_firmware_options[0] & in qla2x00_nvram_config()
2897 ha->zio_timer = icb->interrupt_delay_timer ? in qla2x00_nvram_config()
2903 if (ha->zio_mode != QLA_ZIO_DISABLED) { in qla2x00_nvram_config()
2904 ha->zio_mode = QLA_ZIO_MODE_6; in qla2x00_nvram_config()
2908 ha->zio_mode, ha->zio_timer * 100); in qla2x00_nvram_config()
2910 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; in qla2x00_nvram_config()
2911 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; in qla2x00_nvram_config()
2981 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_loop() local
3008 if (ha->current_topology == ISP_CFG_FL && in qla2x00_configure_loop()
3013 } else if (ha->current_topology == ISP_CFG_F && in qla2x00_configure_loop()
3019 } else if (ha->current_topology == ISP_CFG_N) { in qla2x00_configure_loop()
3104 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_local_loop() local
3111 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); in qla2x00_configure_local_loop()
3112 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, in qla2x00_configure_local_loop()
3120 (uint8_t *)ha->gid_list, in qla2x00_configure_local_loop()
3150 id_iter = (char *)ha->gid_list; in qla2x00_configure_local_loop()
3155 if (IS_QLA2100(ha) || IS_QLA2200(ha)) in qla2x00_configure_local_loop()
3161 id_iter += ha->gid_list_info_size; in qla2x00_configure_local_loop()
3231 fcport->fp_speed = ha->link_data_rate; in qla2x00_configure_local_loop()
3254 struct qla_hw_data *ha = vha->hw; in qla2x00_iidma_fcport() local
3256 if (!IS_IIDMA_CAPABLE(ha)) in qla2x00_iidma_fcport()
3263 fcport->fp_speed > ha->link_data_rate) in qla2x00_iidma_fcport()
3275 qla2x00_get_link_speed_str(ha, fcport->fp_speed), in qla2x00_iidma_fcport()
3381 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_fabric() local
3382 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); in qla2x00_configure_fabric()
3386 if (IS_FWI2_CAPABLE(ha)) in qla2x00_configure_fabric()
3407 if (IS_FWI2_CAPABLE(ha)) in qla2x00_configure_fabric()
3411 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, in qla2x00_configure_fabric()
3486 ha->isp_ops->fabric_logout(vha, in qla2x00_configure_fabric()
3520 next_loopid = ha->min_external_loopid; in qla2x00_configure_fabric()
3660 struct qla_hw_data *ha = vha->hw; in qla2x00_find_all_fabric_devs() local
3661 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); in qla2x00_find_all_fabric_devs()
3666 if (!ha->swl) in qla2x00_find_all_fabric_devs()
3667 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t), in qla2x00_find_all_fabric_devs()
3669 swl = ha->swl; in qla2x00_find_all_fabric_devs()
3675 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); in qla2x00_find_all_fabric_devs()
3706 loop_id = ha->min_external_loopid; in qla2x00_find_all_fabric_devs()
3707 for (; loop_id <= ha->max_loop_id; loop_id++) { in qla2x00_find_all_fabric_devs()
3711 if (ha->current_topology == ISP_CFG_FL && in qla2x00_find_all_fabric_devs()
3779 (vha->d_id.b24 & 0xffff00)) && ha->current_topology == in qla2x00_find_all_fabric_devs()
3858 ha->isp_ops->fabric_logout(vha, fcport->loop_id, in qla2x00_find_all_fabric_devs()
3908 struct qla_hw_data *ha = vha->hw; in qla2x00_find_new_loop_id() local
3913 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_find_new_loop_id()
3915 dev->loop_id = find_first_zero_bit(ha->loop_id_map, in qla2x00_find_new_loop_id()
3922 set_bit(dev->loop_id, ha->loop_id_map); in qla2x00_find_new_loop_id()
3924 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_find_new_loop_id()
3960 struct qla_hw_data *ha = vha->hw; in qla2x00_fabric_dev_login() local
3964 if (IS_ALOGIO_CAPABLE(ha)) { in qla2x00_fabric_dev_login()
3982 ha->isp_ops->fabric_logout(vha, fcport->loop_id, in qla2x00_fabric_dev_login()
4019 struct qla_hw_data *ha = vha->hw; in qla2x00_fabric_login() local
4032 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id, in qla2x00_fabric_login()
4085 if (IS_FWI2_CAPABLE(ha)) { in qla2x00_fabric_login()
4110 ha->isp_ops->fabric_logout(vha, fcport->loop_id, in qla2x00_fabric_login()
4128 ha->isp_ops->fabric_logout(vha, fcport->loop_id, in qla2x00_fabric_login()
4249 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) in qla2x00_perform_loop_resync() argument
4253 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) { in qla2x00_perform_loop_resync()
4255 atomic_set(&ha->loop_down_timer, 0); in qla2x00_perform_loop_resync()
4256 if (!(ha->device_flags & DFLG_NO_CABLE)) { in qla2x00_perform_loop_resync()
4257 atomic_set(&ha->loop_state, LOOP_UP); in qla2x00_perform_loop_resync()
4258 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); in qla2x00_perform_loop_resync()
4259 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); in qla2x00_perform_loop_resync()
4260 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); in qla2x00_perform_loop_resync()
4262 rval = qla2x00_loop_resync(ha); in qla2x00_perform_loop_resync()
4264 atomic_set(&ha->loop_state, LOOP_DEAD); in qla2x00_perform_loop_resync()
4266 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); in qla2x00_perform_loop_resync()
4277 struct qla_hw_data *ha = base_vha->hw; in qla2x00_update_fcports() local
4280 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_update_fcports()
4287 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_update_fcports()
4297 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_update_fcports()
4302 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_update_fcports()
4309 struct qla_hw_data *ha = vha->hw; in qla83xx_reset_ownership() local
4315 if (IS_QLA8044(ha)) { in qla83xx_reset_ownership()
4330 (i != ha->portnum)) { in qla83xx_reset_ownership()
4340 ((i + 8) != ha->portnum)) { in qla83xx_reset_ownership()
4350 drv_presence_mask = ~((1 << (ha->portnum)) | in qla83xx_reset_ownership()
4358 (ha->portnum < fcoe_other_function)) { in qla83xx_reset_ownership()
4361 ha->flags.nic_core_reset_owner = 1; in qla83xx_reset_ownership()
4369 struct qla_hw_data *ha = vha->hw; in __qla83xx_set_drv_ack() local
4374 drv_ack |= (1 << ha->portnum); in __qla83xx_set_drv_ack()
4385 struct qla_hw_data *ha = vha->hw; in __qla83xx_clear_drv_ack() local
4390 drv_ack &= ~(1 << ha->portnum); in __qla83xx_clear_drv_ack()
4424 struct qla_hw_data *ha = vha->hw; in qla83xx_idc_audit() local
4429 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000); in qla83xx_idc_audit()
4430 idc_audit_reg = (ha->portnum) | in qla83xx_idc_audit()
4431 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8); in qla83xx_idc_audit()
4437 jiffies_to_msecs(ha->idc_audit_ts)) / 1000); in qla83xx_idc_audit()
4438 idc_audit_reg = (ha->portnum) | in qla83xx_idc_audit()
4454 struct qla_hw_data *ha = vha->hw; in qla83xx_initiating_reset() local
4467 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) { in qla83xx_initiating_reset()
4508 struct qla_hw_data *ha = vha->hw; in qla83xx_check_driver_presence() local
4511 if (drv_presence & (1 << ha->portnum)) in qla83xx_check_driver_presence()
4521 struct qla_hw_data *ha = vha->hw; in qla83xx_nic_core_reset() local
4537 ha->portnum); in qla83xx_nic_core_reset()
4554 ha->flags.nic_core_hung = 0; in qla83xx_nic_core_reset()
4569 struct qla_hw_data *ha = vha->hw; in qla2xxx_mctp_dump() local
4572 if (!IS_MCTP_CAPABLE(ha)) { in qla2xxx_mctp_dump()
4579 if (!ha->mctp_dump) { in qla2xxx_mctp_dump()
4580 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev, in qla2xxx_mctp_dump()
4581 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL); in qla2xxx_mctp_dump()
4583 if (!ha->mctp_dump) { in qla2xxx_mctp_dump()
4591 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, in qla2xxx_mctp_dump()
4599 vha->host_no, ha->mctp_dump); in qla2xxx_mctp_dump()
4600 ha->mctp_dumped = 1; in qla2xxx_mctp_dump()
4603 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) { in qla2xxx_mctp_dump()
4604 ha->flags.nic_core_reset_hdlr_active = 1; in qla2xxx_mctp_dump()
4613 ha->flags.nic_core_reset_hdlr_active = 0; in qla2xxx_mctp_dump()
4631 struct qla_hw_data *ha = vha->hw; in qla2x00_quiesce_io() local
4635 "Quiescing I/O - ha=%p.\n", ha); in qla2x00_quiesce_io()
4637 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); in qla2x00_quiesce_io()
4641 list_for_each_entry(vp, &ha->vp_list, list) in qla2x00_quiesce_io()
4655 struct qla_hw_data *ha = vha->hw; in qla2x00_abort_isp_cleanup() local
4663 if (!(IS_P3P_TYPE(ha))) in qla2x00_abort_isp_cleanup()
4665 ha->flags.chip_reset_done = 0; in qla2x00_abort_isp_cleanup()
4670 "Performing ISP error recovery - ha=%p.\n", ha); in qla2x00_abort_isp_cleanup()
4676 if (!(IS_P3P_TYPE(ha))) in qla2x00_abort_isp_cleanup()
4677 ha->isp_ops->reset_chip(vha); in qla2x00_abort_isp_cleanup()
4684 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4685 list_for_each_entry(vp, &ha->vp_list, list) { in qla2x00_abort_isp_cleanup()
4687 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4691 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4694 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4704 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4705 list_for_each_entry(vp, &ha->vp_list, list) { in qla2x00_abort_isp_cleanup()
4707 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4712 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4715 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
4717 if (!ha->flags.eeh_busy) { in qla2x00_abort_isp_cleanup()
4719 if (IS_P3P_TYPE(ha)) { in qla2x00_abort_isp_cleanup()
4734 ha->chip_reset++; in qla2x00_abort_isp_cleanup()
4754 struct qla_hw_data *ha = vha->hw; in qla2x00_abort_isp() local
4756 struct req_que *req = ha->req_q_map[0]; in qla2x00_abort_isp()
4762 if (IS_QLA8031(ha)) { in qla2x00_abort_isp()
4770 if (unlikely(pci_channel_offline(ha->pdev) && in qla2x00_abort_isp()
4771 ha->flags.pci_channel_io_perm_failure)) { in qla2x00_abort_isp()
4777 ha->isp_ops->get_flash_version(vha, req->ring); in qla2x00_abort_isp()
4779 ha->isp_ops->nvram_config(vha); in qla2x00_abort_isp()
4794 ha->isp_ops->enable_intrs(ha); in qla2x00_abort_isp()
4796 ha->isp_abort_cnt = 0; in qla2x00_abort_isp()
4799 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) in qla2x00_abort_isp()
4801 if (ha->fce) { in qla2x00_abort_isp()
4802 ha->flags.fce_enabled = 1; in qla2x00_abort_isp()
4803 memset(ha->fce, 0, in qla2x00_abort_isp()
4804 fce_calc_size(ha->fce_bufs)); in qla2x00_abort_isp()
4806 ha->fce_dma, ha->fce_bufs, ha->fce_mb, in qla2x00_abort_isp()
4807 &ha->fce_bufs); in qla2x00_abort_isp()
4812 ha->flags.fce_enabled = 0; in qla2x00_abort_isp()
4816 if (ha->eft) { in qla2x00_abort_isp()
4817 memset(ha->eft, 0, EFT_SIZE); in qla2x00_abort_isp()
4819 ha->eft_dma, EFT_NUM_BUFFERS); in qla2x00_abort_isp()
4829 if (ha->isp_abort_cnt == 0) { in qla2x00_abort_isp()
4837 ha->isp_ops->reset_adapter(vha); in qla2x00_abort_isp()
4843 ha->isp_abort_cnt--; in qla2x00_abort_isp()
4846 ha->isp_abort_cnt); in qla2x00_abort_isp()
4850 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; in qla2x00_abort_isp()
4853 "more times.\n", ha->isp_abort_cnt); in qla2x00_abort_isp()
4864 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp()
4865 list_for_each_entry(vp, &ha->vp_list, list) { in qla2x00_abort_isp()
4868 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp()
4872 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp()
4876 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp()
4878 if (IS_QLA8031(ha)) { in qla2x00_abort_isp()
4907 struct qla_hw_data *ha = vha->hw; in qla2x00_restart_isp() local
4908 struct req_que *req = ha->req_q_map[0]; in qla2x00_restart_isp()
4909 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla2x00_restart_isp()
4915 status = ha->isp_ops->chip_diag(vha); in qla2x00_restart_isp()
4922 ha->flags.chip_reset_done = 1; in qla2x00_restart_isp()
4925 qla25xx_init_queues(ha); in qla2x00_restart_isp()
4938 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_restart_isp()
4941 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_restart_isp()
4954 qla25xx_init_queues(struct qla_hw_data *ha) in qla25xx_init_queues() argument
4958 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); in qla25xx_init_queues()
4962 for (i = 1; i < ha->max_rsp_queues; i++) { in qla25xx_init_queues()
4963 rsp = ha->rsp_q_map[i]; in qla25xx_init_queues()
4964 if (rsp && test_bit(i, ha->rsp_qid_map)) { in qla25xx_init_queues()
4977 for (i = 1; i < ha->max_req_queues; i++) { in qla25xx_init_queues()
4978 req = ha->req_q_map[i]; in qla25xx_init_queues()
4979 if (req && test_bit(i, ha->req_qid_map)) { in qla25xx_init_queues()
5007 struct qla_hw_data *ha = vha->hw; in qla2x00_reset_adapter() local
5008 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_reset_adapter()
5011 ha->isp_ops->disable_intrs(ha); in qla2x00_reset_adapter()
5013 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_reset_adapter()
5018 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_reset_adapter()
5025 struct qla_hw_data *ha = vha->hw; in qla24xx_reset_adapter() local
5026 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; in qla24xx_reset_adapter()
5028 if (IS_P3P_TYPE(ha)) in qla24xx_reset_adapter()
5032 ha->isp_ops->disable_intrs(ha); in qla24xx_reset_adapter()
5034 spin_lock_irqsave(&ha->hardware_lock, flags); in qla24xx_reset_adapter()
5039 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla24xx_reset_adapter()
5041 if (IS_NOPOLLING_TYPE(ha)) in qla24xx_reset_adapter()
5042 ha->isp_ops->enable_intrs(ha); in qla24xx_reset_adapter()
5052 struct qla_hw_data *ha = vha->hw; in qla24xx_nvram_wwn_from_ofw() local
5053 struct pci_dev *pdev = ha->pdev; in qla24xx_nvram_wwn_from_ofw()
5078 struct qla_hw_data *ha = vha->hw; in qla24xx_nvram_config() local
5081 icb = (struct init_cb_24xx *)ha->init_cb; in qla24xx_nvram_config()
5082 nv = ha->nvram; in qla24xx_nvram_config()
5085 if (ha->port_no == 0) { in qla24xx_nvram_config()
5086 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; in qla24xx_nvram_config()
5087 ha->vpd_base = FA_NVRAM_VPD0_ADDR; in qla24xx_nvram_config()
5089 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; in qla24xx_nvram_config()
5090 ha->vpd_base = FA_NVRAM_VPD1_ADDR; in qla24xx_nvram_config()
5093 ha->nvram_size = sizeof(struct nvram_24xx); in qla24xx_nvram_config()
5094 ha->vpd_size = FA_NVRAM_VPD_SIZE; in qla24xx_nvram_config()
5097 ha->vpd = ha->nvram + VPD_OFFSET; in qla24xx_nvram_config()
5098 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, in qla24xx_nvram_config()
5099 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); in qla24xx_nvram_config()
5103 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base, in qla24xx_nvram_config()
5104 ha->nvram_size); in qla24xx_nvram_config()
5105 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) in qla24xx_nvram_config()
5111 (uint8_t *)nv, ha->nvram_size); in qla24xx_nvram_config()
5128 memset(nv, 0, ha->nvram_size); in qla24xx_nvram_config()
5136 nv->port_name[1] = 0x00 + ha->port_no + 1; in qla24xx_nvram_config()
5180 memset(icb, 0, ha->init_cb_size); in qla24xx_nvram_config()
5225 ha->flags.disable_risc_code_load = 0; in qla24xx_nvram_config()
5226 ha->flags.enable_lip_reset = 0; in qla24xx_nvram_config()
5227 ha->flags.enable_lip_full_login = in qla24xx_nvram_config()
5229 ha->flags.enable_target_reset = in qla24xx_nvram_config()
5231 ha->flags.enable_led_scheme = 0; in qla24xx_nvram_config()
5232 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; in qla24xx_nvram_config()
5234 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & in qla24xx_nvram_config()
5237 memcpy(ha->fw_seriallink_options24, nv->seriallink_options, in qla24xx_nvram_config()
5238 sizeof(ha->fw_seriallink_options24)); in qla24xx_nvram_config()
5241 ha->serial0 = icb->port_name[5]; in qla24xx_nvram_config()
5242 ha->serial1 = icb->port_name[6]; in qla24xx_nvram_config()
5243 ha->serial2 = icb->port_name[7]; in qla24xx_nvram_config()
5249 ha->retry_count = le16_to_cpu(nv->login_retry_count); in qla24xx_nvram_config()
5256 ha->login_timeout = le16_to_cpu(nv->login_timeout); in qla24xx_nvram_config()
5260 ha->r_a_tov = 100; in qla24xx_nvram_config()
5262 ha->loop_reset_delay = nv->reset_delay; in qla24xx_nvram_config()
5275 ha->loop_down_abort_time = in qla24xx_nvram_config()
5278 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); in qla24xx_nvram_config()
5279 ha->loop_down_abort_time = in qla24xx_nvram_config()
5280 (LOOP_DOWN_TIME - ha->link_down_timeout); in qla24xx_nvram_config()
5284 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); in qla24xx_nvram_config()
5286 ha->port_down_retry_count = qlport_down_retry; in qla24xx_nvram_config()
5289 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); in qla24xx_nvram_config()
5290 if (ha->port_down_retry_count == in qla24xx_nvram_config()
5292 ha->port_down_retry_count > 3) in qla24xx_nvram_config()
5293 ha->login_retry_count = ha->port_down_retry_count; in qla24xx_nvram_config()
5294 else if (ha->port_down_retry_count > (int)ha->login_retry_count) in qla24xx_nvram_config()
5295 ha->login_retry_count = ha->port_down_retry_count; in qla24xx_nvram_config()
5297 ha->login_retry_count = ql2xloginretrycount; in qla24xx_nvram_config()
5301 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & in qla24xx_nvram_config()
5303 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? in qla24xx_nvram_config()
5309 if (ha->zio_mode != QLA_ZIO_DISABLED) { in qla24xx_nvram_config()
5310 ha->zio_mode = QLA_ZIO_MODE_6; in qla24xx_nvram_config()
5314 ha->zio_mode, ha->zio_timer * 100); in qla24xx_nvram_config()
5317 (uint32_t)ha->zio_mode); in qla24xx_nvram_config()
5318 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); in qla24xx_nvram_config()
5339 struct qla_hw_data *ha = vha->hw; in qla24xx_load_risc_flash() local
5340 struct req_que *req = ha->req_q_map[0]; in qla24xx_load_risc_flash()
5379 dlen = (uint32_t)(ha->fw_transfer_size >> 2); in qla24xx_load_risc_flash()
5411 if (!IS_QLA27XX(ha)) in qla24xx_load_risc_flash()
5414 if (ha->fw_dump_template) in qla24xx_load_risc_flash()
5415 vfree(ha->fw_dump_template); in qla24xx_load_risc_flash()
5416 ha->fw_dump_template = NULL; in qla24xx_load_risc_flash()
5417 ha->fw_dump_template_len = 0; in qla24xx_load_risc_flash()
5431 ha->fw_dump_template = vmalloc(dlen); in qla24xx_load_risc_flash()
5432 if (!ha->fw_dump_template) { in qla24xx_load_risc_flash()
5440 dcode = ha->fw_dump_template; in qla24xx_load_risc_flash()
5460 ha->fw_dump_template_len = dlen; in qla24xx_load_risc_flash()
5465 if (ha->fw_dump_template) in qla24xx_load_risc_flash()
5466 vfree(ha->fw_dump_template); in qla24xx_load_risc_flash()
5467 ha->fw_dump_template = NULL; in qla24xx_load_risc_flash()
5468 ha->fw_dump_template_len = 0; in qla24xx_load_risc_flash()
5473 ha->fw_dump_template = vmalloc(dlen); in qla24xx_load_risc_flash()
5474 if (!ha->fw_dump_template) { in qla24xx_load_risc_flash()
5480 dcode = ha->fw_dump_template; in qla24xx_load_risc_flash()
5486 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { in qla24xx_load_risc_flash()
5492 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); in qla24xx_load_risc_flash()
5495 ha->fw_dump_template_len = dlen; in qla24xx_load_risc_flash()
5500 if (ha->fw_dump_template) in qla24xx_load_risc_flash()
5501 vfree(ha->fw_dump_template); in qla24xx_load_risc_flash()
5502 ha->fw_dump_template = NULL; in qla24xx_load_risc_flash()
5503 ha->fw_dump_template_len = 0; in qla24xx_load_risc_flash()
5517 struct qla_hw_data *ha = vha->hw; in qla2x00_load_risc() local
5518 struct req_que *req = ha->req_q_map[0]; in qla2x00_load_risc()
5574 wlen = (uint16_t)(ha->fw_transfer_size >> 1); in qla2x00_load_risc()
5620 struct qla_hw_data *ha = vha->hw; in qla24xx_load_risc_blob() local
5621 struct req_que *req = ha->req_q_map[0]; in qla24xx_load_risc_blob()
5684 dlen = (uint32_t)(ha->fw_transfer_size >> 2); in qla24xx_load_risc_blob()
5714 if (!IS_QLA27XX(ha)) in qla24xx_load_risc_blob()
5717 if (ha->fw_dump_template) in qla24xx_load_risc_blob()
5718 vfree(ha->fw_dump_template); in qla24xx_load_risc_blob()
5719 ha->fw_dump_template = NULL; in qla24xx_load_risc_blob()
5720 ha->fw_dump_template_len = 0; in qla24xx_load_risc_blob()
5734 ha->fw_dump_template = vmalloc(dlen); in qla24xx_load_risc_blob()
5735 if (!ha->fw_dump_template) { in qla24xx_load_risc_blob()
5743 dcode = ha->fw_dump_template; in qla24xx_load_risc_blob()
5762 ha->fw_dump_template_len = dlen; in qla24xx_load_risc_blob()
5767 if (ha->fw_dump_template) in qla24xx_load_risc_blob()
5768 vfree(ha->fw_dump_template); in qla24xx_load_risc_blob()
5769 ha->fw_dump_template = NULL; in qla24xx_load_risc_blob()
5770 ha->fw_dump_template_len = 0; in qla24xx_load_risc_blob()
5775 ha->fw_dump_template = vmalloc(dlen); in qla24xx_load_risc_blob()
5776 if (!ha->fw_dump_template) { in qla24xx_load_risc_blob()
5782 dcode = ha->fw_dump_template; in qla24xx_load_risc_blob()
5788 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { in qla24xx_load_risc_blob()
5794 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); in qla24xx_load_risc_blob()
5797 ha->fw_dump_template_len = dlen; in qla24xx_load_risc_blob()
5802 if (ha->fw_dump_template) in qla24xx_load_risc_blob()
5803 vfree(ha->fw_dump_template); in qla24xx_load_risc_blob()
5804 ha->fw_dump_template = NULL; in qla24xx_load_risc_blob()
5805 ha->fw_dump_template_len = 0; in qla24xx_load_risc_blob()
5834 struct qla_hw_data *ha = vha->hw; in qla81xx_load_risc() local
5845 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); in qla81xx_load_risc()
5851 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) in qla81xx_load_risc()
5856 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); in qla81xx_load_risc()
5861 ha->flags.running_gold_fw = 1; in qla81xx_load_risc()
5869 struct qla_hw_data *ha = vha->hw; in qla2x00_try_to_stop_firmware() local
5871 if (ha->flags.pci_channel_io_perm_failure) in qla2x00_try_to_stop_firmware()
5873 if (!IS_FWI2_CAPABLE(ha)) in qla2x00_try_to_stop_firmware()
5875 if (!ha->fw_major_version) in qla2x00_try_to_stop_firmware()
5881 ha->isp_ops->reset_chip(vha); in qla2x00_try_to_stop_firmware()
5882 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) in qla2x00_try_to_stop_firmware()
5898 struct qla_hw_data *ha = vha->hw; in qla24xx_configure_vhba() local
5899 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); in qla24xx_configure_vhba()
5907 if (ha->flags.cpu_affinity_enabled) in qla24xx_configure_vhba()
5908 req = ha->req_q_map[0]; in qla24xx_configure_vhba()
5921 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, in qla24xx_configure_vhba()
5954 struct qla_hw_data *ha = vha->hw; in qla84xx_get_chip() local
5960 if (cs84xx->bus == ha->pdev->bus) { in qla84xx_get_chip()
5973 cs84xx->bus = ha->pdev->bus; in qla84xx_get_chip()
5996 struct qla_hw_data *ha = vha->hw; in qla84xx_put_chip() local
5997 if (ha->cs84xx) in qla84xx_put_chip()
5998 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); in qla84xx_put_chip()
6006 struct qla_hw_data *ha = vha->hw; in qla84xx_init_chip() local
6008 mutex_lock(&ha->cs84xx->fw_update_mutex); in qla84xx_init_chip()
6012 mutex_unlock(&ha->cs84xx->fw_update_mutex); in qla84xx_init_chip()
6030 struct qla_hw_data *ha = vha->hw; in qla81xx_nvram_config() local
6033 icb = (struct init_cb_81xx *)ha->init_cb; in qla81xx_nvram_config()
6034 nv = ha->nvram; in qla81xx_nvram_config()
6037 ha->nvram_size = sizeof(struct nvram_81xx); in qla81xx_nvram_config()
6038 ha->vpd_size = FA_NVRAM_VPD_SIZE; in qla81xx_nvram_config()
6039 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) in qla81xx_nvram_config()
6040 ha->vpd_size = FA_VPD_SIZE_82XX; in qla81xx_nvram_config()
6043 ha->vpd = ha->nvram + VPD_OFFSET; in qla81xx_nvram_config()
6044 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, in qla81xx_nvram_config()
6045 ha->vpd_size); in qla81xx_nvram_config()
6048 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, in qla81xx_nvram_config()
6049 ha->nvram_size); in qla81xx_nvram_config()
6051 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) in qla81xx_nvram_config()
6057 (uint8_t *)nv, ha->nvram_size); in qla81xx_nvram_config()
6075 memset(nv, 0, ha->nvram_size); in qla81xx_nvram_config()
6082 nv->port_name[1] = 0x00 + ha->port_no + 1; in qla81xx_nvram_config()
6116 nv->enode_mac[5] = 0x06 + ha->port_no + 1; in qla81xx_nvram_config()
6121 if (IS_T10_PI_CAPABLE(ha)) in qla81xx_nvram_config()
6127 memset(icb, 0, ha->init_cb_size); in qla81xx_nvram_config()
6154 icb->enode_mac[5] = 0x06 + ha->port_no + 1; in qla81xx_nvram_config()
6158 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); in qla81xx_nvram_config()
6185 ha->flags.disable_risc_code_load = 0; in qla81xx_nvram_config()
6186 ha->flags.enable_lip_reset = 0; in qla81xx_nvram_config()
6187 ha->flags.enable_lip_full_login = in qla81xx_nvram_config()
6189 ha->flags.enable_target_reset = in qla81xx_nvram_config()
6191 ha->flags.enable_led_scheme = 0; in qla81xx_nvram_config()
6192 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; in qla81xx_nvram_config()
6194 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & in qla81xx_nvram_config()
6198 ha->serial0 = icb->port_name[5]; in qla81xx_nvram_config()
6199 ha->serial1 = icb->port_name[6]; in qla81xx_nvram_config()
6200 ha->serial2 = icb->port_name[7]; in qla81xx_nvram_config()
6206 ha->retry_count = le16_to_cpu(nv->login_retry_count); in qla81xx_nvram_config()
6213 ha->login_timeout = le16_to_cpu(nv->login_timeout); in qla81xx_nvram_config()
6217 ha->r_a_tov = 100; in qla81xx_nvram_config()
6219 ha->loop_reset_delay = nv->reset_delay; in qla81xx_nvram_config()
6232 ha->loop_down_abort_time = in qla81xx_nvram_config()
6235 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); in qla81xx_nvram_config()
6236 ha->loop_down_abort_time = in qla81xx_nvram_config()
6237 (LOOP_DOWN_TIME - ha->link_down_timeout); in qla81xx_nvram_config()
6241 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); in qla81xx_nvram_config()
6243 ha->port_down_retry_count = qlport_down_retry; in qla81xx_nvram_config()
6246 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); in qla81xx_nvram_config()
6247 if (ha->port_down_retry_count == in qla81xx_nvram_config()
6249 ha->port_down_retry_count > 3) in qla81xx_nvram_config()
6250 ha->login_retry_count = ha->port_down_retry_count; in qla81xx_nvram_config()
6251 else if (ha->port_down_retry_count > (int)ha->login_retry_count) in qla81xx_nvram_config()
6252 ha->login_retry_count = ha->port_down_retry_count; in qla81xx_nvram_config()
6254 ha->login_retry_count = ql2xloginretrycount; in qla81xx_nvram_config()
6257 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha))) in qla81xx_nvram_config()
6262 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & in qla81xx_nvram_config()
6264 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? in qla81xx_nvram_config()
6270 if (ha->zio_mode != QLA_ZIO_DISABLED) { in qla81xx_nvram_config()
6271 ha->zio_mode = QLA_ZIO_MODE_6; in qla81xx_nvram_config()
6275 ha->zio_mode, in qla81xx_nvram_config()
6276 ha->zio_timer * 100); in qla81xx_nvram_config()
6279 (uint32_t)ha->zio_mode); in qla81xx_nvram_config()
6280 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); in qla81xx_nvram_config()
6295 struct qla_hw_data *ha = vha->hw; in qla82xx_restart_isp() local
6296 struct req_que *req = ha->req_q_map[0]; in qla82xx_restart_isp()
6297 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla82xx_restart_isp()
6304 ha->flags.chip_reset_done = 1; in qla82xx_restart_isp()
6330 ha->isp_ops->enable_intrs(ha); in qla82xx_restart_isp()
6332 ha->isp_abort_cnt = 0; in qla82xx_restart_isp()
6338 if (ha->fce) { in qla82xx_restart_isp()
6339 ha->flags.fce_enabled = 1; in qla82xx_restart_isp()
6340 memset(ha->fce, 0, in qla82xx_restart_isp()
6341 fce_calc_size(ha->fce_bufs)); in qla82xx_restart_isp()
6343 ha->fce_dma, ha->fce_bufs, ha->fce_mb, in qla82xx_restart_isp()
6344 &ha->fce_bufs); in qla82xx_restart_isp()
6349 ha->flags.fce_enabled = 0; in qla82xx_restart_isp()
6353 if (ha->eft) { in qla82xx_restart_isp()
6354 memset(ha->eft, 0, EFT_SIZE); in qla82xx_restart_isp()
6356 ha->eft_dma, EFT_NUM_BUFFERS); in qla82xx_restart_isp()
6369 spin_lock_irqsave(&ha->vport_slock, flags); in qla82xx_restart_isp()
6370 list_for_each_entry(vp, &ha->vp_list, list) { in qla82xx_restart_isp()
6373 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla82xx_restart_isp()
6377 spin_lock_irqsave(&ha->vport_slock, flags); in qla82xx_restart_isp()
6381 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla82xx_restart_isp()
6394 struct qla_hw_data *ha = vha->hw; in qla81xx_update_fw_options() local
6400 memset(ha->fw_options, 0, sizeof(ha->fw_options)); in qla81xx_update_fw_options()
6401 ha->fw_options[2] |= BIT_9; in qla81xx_update_fw_options()
6402 qla2x00_set_fw_options(vha, ha->fw_options); in qla81xx_update_fw_options()
6432 struct qla_hw_data *ha = vha->hw; in qla24xx_get_fcp_prio() local
6434 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) in qla24xx_get_fcp_prio()
6438 entries = ha->fcp_prio_cfg->num_entries; in qla24xx_get_fcp_prio()
6439 pri_entry = &ha->fcp_prio_cfg->entry[0]; in qla24xx_get_fcp_prio()