hba 111 arch/parisc/include/asm/pci.h u8 (*inb) (struct pci_hba_data *hba, u16 port); hba 112 arch/parisc/include/asm/pci.h u16 (*inw) (struct pci_hba_data *hba, u16 port); hba 113 arch/parisc/include/asm/pci.h u32 (*inl) (struct pci_hba_data *hba, u16 port); hba 114 arch/parisc/include/asm/pci.h void (*outb) (struct pci_hba_data *hba, u16 port, u8 data); hba 115 arch/parisc/include/asm/pci.h void (*outw) (struct pci_hba_data *hba, u16 port, u16 data); hba 116 arch/parisc/include/asm/pci.h void (*outl) (struct pci_hba_data *hba, u16 port, u32 data); hba 208 arch/parisc/include/asm/ropes.h struct pci_hba_data hba; hba 257 arch/parisc/kernel/pci.c void pcibios_register_hba(struct pci_hba_data *hba) hba 264 arch/parisc/kernel/pci.c parisc_pci_hba[pci_hba_count] = hba; hba 265 arch/parisc/kernel/pci.c hba->hba_num = pci_hba_count++; hba 411 drivers/char/agp/parisc-agp.c parisc_agp_setup(sba_list->ioc[0].ioc_hpa, lbadev->hba.base_addr); hba 3281 drivers/message/fusion/mptsas.c struct mptsas_portinfo *port_info, *hba; hba 3284 drivers/message/fusion/mptsas.c hba = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL); hba 3285 drivers/message/fusion/mptsas.c if (! hba) hba 3288 drivers/message/fusion/mptsas.c error = mptsas_sas_io_unit_pg0(ioc, hba); hba 3296 drivers/message/fusion/mptsas.c ioc->hba_port_info = port_info = hba; hba 3300 drivers/message/fusion/mptsas.c for (i = 0; i < hba->num_phys; i++) { hba 3302 drivers/message/fusion/mptsas.c hba->phy_info[i].negotiated_link_rate; hba 3304 drivers/message/fusion/mptsas.c hba->phy_info[i].handle; hba 3306 drivers/message/fusion/mptsas.c hba->phy_info[i].port_id; hba 3308 drivers/message/fusion/mptsas.c kfree(hba->phy_info); hba 3309 drivers/message/fusion/mptsas.c kfree(hba); hba 3310 drivers/message/fusion/mptsas.c hba = NULL; hba 3348 drivers/message/fusion/mptsas.c kfree(hba); hba 1518 drivers/parisc/ccio-dma.c struct pci_hba_data *hba; hba 1546 drivers/parisc/ccio-dma.c hba = kzalloc(sizeof(*hba), GFP_KERNEL); hba 1548 drivers/parisc/ccio-dma.c BUG_ON(hba == NULL); hba 1550 drivers/parisc/ccio-dma.c hba->iommu = ioc; hba 1551 drivers/parisc/ccio-dma.c dev->dev.platform_data = hba; hba 143 drivers/parisc/dino.c struct pci_hba_data hba; /* 'C' inheritance - must be first */ hba 154 drivers/parisc/dino.c static inline struct dino_device *DINO_DEV(struct pci_hba_data *hba) hba 156 drivers/parisc/dino.c return container_of(hba, struct dino_device, hba); hba 165 drivers/parisc/dino.c return is_card_dino(&dino_dev->hba.dev->id); hba 187 drivers/parisc/dino.c void __iomem *base_addr = d->hba.base_addr; hba 222 drivers/parisc/dino.c void __iomem *base_addr = d->hba.base_addr; hba 315 drivers/parisc/dino.c __raw_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR); hba 332 drivers/parisc/dino.c __raw_readl(dino_dev->hba.base_addr+DINO_IPR); hba 336 drivers/parisc/dino.c __raw_writel( dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR); hba 347 drivers/parisc/dino.c tmp = __raw_readl(dino_dev->hba.base_addr+DINO_ILR); hba 378 drivers/parisc/dino.c mask = __raw_readl(dino_dev->hba.base_addr+DINO_IRR0) & DINO_IRR_MASK; hba 400 drivers/parisc/dino.c mask = __raw_readl(dino_dev->hba.base_addr+DINO_ILR) & dino_dev->imr; hba 405 drivers/parisc/dino.c dino_dev->hba.base_addr, mask); hba 488 drivers/parisc/dino.c res = &dino_dev->hba.lmmio_space; hba 496 drivers/parisc/dino.c res->name = dino_dev->hba.lmmio_space.name; hba 499 drivers/parisc/dino.c if (ccio_allocate_resource(dino_dev->hba.dev, res, _8MB, hba 514 drivers/parisc/dino.c bus->resource[0] = &(dino_dev->hba.io_space); hba 580 drivers/parisc/dino.c if (is_card_dino(&dino_dev->hba.dev->id)) { hba 581 drivers/parisc/dino.c dino_card_setup(bus, dino_dev->hba.base_addr); hba 617 drivers/parisc/dino.c if (is_card_dino(&dino_dev->hba.dev->id)) hba 682 drivers/parisc/dino.c status = __raw_readl(dino_dev->hba.base_addr+DINO_IO_STATUS); hba 685 drivers/parisc/dino.c dino_dev->hba.base_addr+DINO_IO_COMMAND); hba 689 drivers/parisc/dino.c __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_GMASK); hba 690 drivers/parisc/dino.c __raw_writel(0x00000001, dino_dev->hba.base_addr+DINO_IO_FBB_EN); hba 691 drivers/parisc/dino.c __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_ICR); hba 701 drivers/parisc/dino.c __raw_writel( brdg_feat, dino_dev->hba.base_addr+DINO_BRDG_FEAT); hba 708 drivers/parisc/dino.c __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_IO_ADDR_EN); hba 710 drivers/parisc/dino.c __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_DAMODE); hba 711 drivers/parisc/dino.c __raw_writel(0x00222222, dino_dev->hba.base_addr+DINO_PCIROR); hba 712 drivers/parisc/dino.c __raw_writel(0x00222222, dino_dev->hba.base_addr+DINO_PCIWOR); hba 714 drivers/parisc/dino.c __raw_writel(0x00000040, dino_dev->hba.base_addr+DINO_MLTIM); hba 715 drivers/parisc/dino.c __raw_writel(0x00000080, dino_dev->hba.base_addr+DINO_IO_CONTROL); hba 716 drivers/parisc/dino.c __raw_writel(0x0000008c, dino_dev->hba.base_addr+DINO_TLTIM); hba 719 drivers/parisc/dino.c __raw_writel(0x0000007e, dino_dev->hba.base_addr+DINO_PAMR); hba 720 drivers/parisc/dino.c __raw_writel(0x0000007f, dino_dev->hba.base_addr+DINO_PAPR); hba 721 drivers/parisc/dino.c __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_PAMR); hba 728 drivers/parisc/dino.c __raw_writel(0x0000004f, dino_dev->hba.base_addr+DINO_PCICMD); hba 749 drivers/parisc/dino.c io_addr = __raw_readl(dino_dev->hba.base_addr + DINO_IO_ADDR_EN); hba 755 drivers/parisc/dino.c res = &dino_dev->hba.lmmio_space; hba 788 drivers/parisc/dino.c res = &dino_dev->hba.lmmio_space; hba 794 drivers/parisc/dino.c result = ccio_request_resource(dino_dev->hba.dev, &res[i]); hba 812 drivers/parisc/dino.c pcibios_register_hba(&dino_dev->hba); hba 856 drivers/parisc/dino.c __raw_writel(eim, dino_dev->hba.base_addr+DINO_IAR0); hba 862 drivers/parisc/dino.c __raw_readl(dino_dev->hba.base_addr+DINO_IRR0); hba 865 drivers/parisc/dino.c res = &dino_dev->hba.io_space; hba 871 drivers/parisc/dino.c res->start = HBA_PORT_BASE(dino_dev->hba.hba_num); hba 878 drivers/parisc/dino.c dino_dev->hba.base_addr); hba 976 drivers/parisc/dino.c dino_dev->hba.dev = dev; hba 977 drivers/parisc/dino.c dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096); hba 978 drivers/parisc/dino.c dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND; hba 980 drivers/parisc/dino.c dino_dev->hba.iommu = ccio_get_iommu(dev); hba 993 drivers/parisc/dino.c pci_add_resource_offset(&resources, &dino_dev->hba.io_space, hba 994 drivers/parisc/dino.c HBA_PORT_BASE(dino_dev->hba.hba_num)); hba 995 drivers/parisc/dino.c if (dino_dev->hba.lmmio_space.flags) hba 996 drivers/parisc/dino.c pci_add_resource_offset(&resources, &dino_dev->hba.lmmio_space, hba 997 drivers/parisc/dino.c dino_dev->hba.lmmio_space_offset); hba 998 drivers/parisc/dino.c if (dino_dev->hba.elmmio_space.flags) hba 999 drivers/parisc/dino.c pci_add_resource_offset(&resources, &dino_dev->hba.elmmio_space, hba 1000 drivers/parisc/dino.c dino_dev->hba.lmmio_space_offset); hba 1001 drivers/parisc/dino.c if (dino_dev->hba.gmmio_space.flags) hba 1002 drivers/parisc/dino.c pci_add_resource(&resources, &dino_dev->hba.gmmio_space); hba 1004 drivers/parisc/dino.c dino_dev->hba.bus_num.start = dino_current_bus; hba 1005 drivers/parisc/dino.c dino_dev->hba.bus_num.end = 255; hba 1006 drivers/parisc/dino.c dino_dev->hba.bus_num.flags = IORESOURCE_BUS; hba 1007 drivers/parisc/dino.c pci_add_resource(&resources, &dino_dev->hba.bus_num); hba 1012 drivers/parisc/dino.c dino_dev->hba.hba_bus = bus = pci_create_root_bus(&dev->dev, hba 62 drivers/parisc/eisa.c struct pci_hba_data hba; hba 308 drivers/parisc/eisa.c eisa_dev.hba.dev = dev; hba 309 drivers/parisc/eisa.c eisa_dev.hba.iommu = ccio_get_iommu(dev); hba 311 drivers/parisc/eisa.c eisa_dev.hba.lmmio_space.name = "EISA"; hba 312 drivers/parisc/eisa.c eisa_dev.hba.lmmio_space.start = F_EXTEND(0xfc000000); hba 313 drivers/parisc/eisa.c eisa_dev.hba.lmmio_space.end = F_EXTEND(0xffbfffff); hba 314 drivers/parisc/eisa.c eisa_dev.hba.lmmio_space.flags = IORESOURCE_MEM; hba 315 drivers/parisc/eisa.c result = ccio_request_resource(dev, &eisa_dev.hba.lmmio_space); hba 320 drivers/parisc/eisa.c eisa_dev.hba.io_space.name = "EISA"; hba 321 drivers/parisc/eisa.c eisa_dev.hba.io_space.start = 0; hba 322 drivers/parisc/eisa.c eisa_dev.hba.io_space.end = 0xffff; hba 323 drivers/parisc/eisa.c eisa_dev.hba.lmmio_space.flags = IORESOURCE_IO; hba 324 drivers/parisc/eisa.c result = request_resource(&ioport_resource, &eisa_dev.hba.io_space); hba 329 drivers/parisc/eisa.c pcibios_register_hba(&eisa_dev.hba); hba 363 drivers/parisc/eisa.c result = eisa_enumerator(eisa_dev.eeprom_addr, &eisa_dev.hba.io_space, hba 364 drivers/parisc/eisa.c &eisa_dev.hba.lmmio_space); hba 372 drivers/parisc/eisa.c eisa_dev.root.res = &eisa_dev.hba.io_space; hba 389 drivers/parisc/eisa.c release_resource(&eisa_dev.hba.io_space); hba 111 drivers/parisc/lba_pci.c static inline struct lba_device *LBA_DEV(struct pci_hba_data *hba) hba 113 drivers/parisc/lba_pci.c return container_of(hba, struct lba_device, hba); hba 191 drivers/parisc/lba_pci.c u8 first_bus = d->hba.hba_bus->busn_res.start; hba 192 drivers/parisc/lba_pci.c u8 last_sub_bus = d->hba.hba_bus->busn_res.end; hba 207 drivers/parisc/lba_pci.c error_config = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); \ hba 210 drivers/parisc/lba_pci.c status_control = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); \ hba 216 drivers/parisc/lba_pci.c arb_mask = READ_REG32(d->hba.base_addr + LBA_ARB_MASK); \ hba 222 drivers/parisc/lba_pci.c WRITE_REG32(0x1, d->hba.base_addr + LBA_ARB_MASK); \ hba 228 drivers/parisc/lba_pci.c WRITE_REG32(error_config | LBA_SMART_MODE, d->hba.base_addr + LBA_ERROR_CONFIG); \ hba 237 drivers/parisc/lba_pci.c WRITE_REG32(tok | PCI_VENDOR_ID, (d)->hba.base_addr + LBA_PCI_CFG_ADDR);\ hba 242 drivers/parisc/lba_pci.c lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ hba 247 drivers/parisc/lba_pci.c WRITE_REG32(~0, (d)->hba.base_addr + LBA_PCI_CFG_DATA); \ hba 252 drivers/parisc/lba_pci.c lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ hba 307 drivers/parisc/lba_pci.c WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); hba 310 drivers/parisc/lba_pci.c WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ hba 315 drivers/parisc/lba_pci.c lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ hba 347 drivers/parisc/lba_pci.c LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error); hba 349 drivers/parisc/lba_pci.c void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; hba 358 drivers/parisc/lba_pci.c LBA_CFG_RESTORE(d, d->hba.base_addr); hba 368 drivers/parisc/lba_pci.c void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; hba 411 drivers/parisc/lba_pci.c void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; hba 420 drivers/parisc/lba_pci.c LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error); hba 421 drivers/parisc/lba_pci.c LBA_CFG_RESTORE(d, d->hba.base_addr); hba 456 drivers/parisc/lba_pci.c case 1: WRITE_REG8 (data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 3)); hba 458 drivers/parisc/lba_pci.c case 2: WRITE_REG16(data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 2)); hba 460 drivers/parisc/lba_pci.c case 4: WRITE_REG32(data, d->hba.base_addr + LBA_PCI_CFG_DATA); hba 464 drivers/parisc/lba_pci.c lba_t32 = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_ADDR); hba 485 drivers/parisc/lba_pci.c void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; hba 515 drivers/parisc/lba_pci.c void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; hba 538 drivers/parisc/lba_pci.c lba_t32 = READ_U32(d->hba.base_addr + LBA_PCI_CFG_ADDR); hba 739 drivers/parisc/lba_pci.c ldev->hba.io_space.name, hba 740 drivers/parisc/lba_pci.c ldev->hba.io_space.start, ldev->hba.io_space.end, hba 741 drivers/parisc/lba_pci.c ldev->hba.io_space.flags); hba 743 drivers/parisc/lba_pci.c ldev->hba.lmmio_space.name, hba 744 drivers/parisc/lba_pci.c ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end, hba 745 drivers/parisc/lba_pci.c ldev->hba.lmmio_space.flags); hba 747 drivers/parisc/lba_pci.c err = request_resource(&ioport_resource, &(ldev->hba.io_space)); hba 753 drivers/parisc/lba_pci.c if (ldev->hba.elmmio_space.flags) { hba 755 drivers/parisc/lba_pci.c &(ldev->hba.elmmio_space)); hba 760 drivers/parisc/lba_pci.c (long)ldev->hba.elmmio_space.start, hba 761 drivers/parisc/lba_pci.c (long)ldev->hba.elmmio_space.end); hba 768 drivers/parisc/lba_pci.c if (ldev->hba.lmmio_space.flags) { hba 769 drivers/parisc/lba_pci.c err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space)); hba 773 drivers/parisc/lba_pci.c (long)ldev->hba.lmmio_space.start, hba 774 drivers/parisc/lba_pci.c (long)ldev->hba.lmmio_space.end); hba 780 drivers/parisc/lba_pci.c if (ldev->hba.gmmio_space.flags) { hba 781 drivers/parisc/lba_pci.c err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space)); hba 785 drivers/parisc/lba_pci.c (long)ldev->hba.gmmio_space.start, hba 786 drivers/parisc/lba_pci.c (long)ldev->hba.gmmio_space.end); hba 1073 drivers/parisc/lba_pci.c lba_dev->hba.bus_num.start = p->start; hba 1074 drivers/parisc/lba_pci.c lba_dev->hba.bus_num.end = p->end; hba 1075 drivers/parisc/lba_pci.c lba_dev->hba.bus_num.flags = IORESOURCE_BUS; hba 1080 drivers/parisc/lba_pci.c if (!lba_dev->hba.lmmio_space.flags) { hba 1083 drivers/parisc/lba_pci.c lba_len = ~READ_REG32(lba_dev->hba.base_addr hba 1089 drivers/parisc/lba_pci.c sprintf(lba_dev->hba.lmmio_name, hba 1091 drivers/parisc/lba_pci.c (int)lba_dev->hba.bus_num.start); hba 1092 drivers/parisc/lba_pci.c lba_dev->hba.lmmio_space_offset = p->start - hba 1094 drivers/parisc/lba_pci.c r = &lba_dev->hba.lmmio_space; hba 1095 drivers/parisc/lba_pci.c r->name = lba_dev->hba.lmmio_name; hba 1096 drivers/parisc/lba_pci.c } else if (!lba_dev->hba.elmmio_space.flags) { hba 1097 drivers/parisc/lba_pci.c sprintf(lba_dev->hba.elmmio_name, hba 1099 drivers/parisc/lba_pci.c (int)lba_dev->hba.bus_num.start); hba 1100 drivers/parisc/lba_pci.c r = &lba_dev->hba.elmmio_space; hba 1101 drivers/parisc/lba_pci.c r->name = lba_dev->hba.elmmio_name; hba 1116 drivers/parisc/lba_pci.c sprintf(lba_dev->hba.gmmio_name, "PCI%02x GMMIO", hba 1117 drivers/parisc/lba_pci.c (int)lba_dev->hba.bus_num.start); hba 1118 drivers/parisc/lba_pci.c r = &lba_dev->hba.gmmio_space; hba 1119 drivers/parisc/lba_pci.c r->name = lba_dev->hba.gmmio_name; hba 1139 drivers/parisc/lba_pci.c sprintf(lba_dev->hba.io_name, "PCI%02x Ports", hba 1140 drivers/parisc/lba_pci.c (int)lba_dev->hba.bus_num.start); hba 1141 drivers/parisc/lba_pci.c r = &lba_dev->hba.io_space; hba 1142 drivers/parisc/lba_pci.c r->name = lba_dev->hba.io_name; hba 1143 drivers/parisc/lba_pci.c r->start = HBA_PORT_BASE(lba_dev->hba.hba_num); hba 1177 drivers/parisc/lba_pci.c lba_dev->hba.lmmio_space_offset = PCI_F_EXTEND; hba 1186 drivers/parisc/lba_pci.c lba_num = READ_REG32(lba_dev->hba.base_addr + LBA_FW_SCRATCH); hba 1187 drivers/parisc/lba_pci.c r = &(lba_dev->hba.bus_num); hba 1196 drivers/parisc/lba_pci.c r = &(lba_dev->hba.lmmio_space); hba 1197 drivers/parisc/lba_pci.c sprintf(lba_dev->hba.lmmio_name, "PCI%02x LMMIO", hba 1198 drivers/parisc/lba_pci.c (int)lba_dev->hba.bus_num.start); hba 1199 drivers/parisc/lba_pci.c r->name = lba_dev->hba.lmmio_name; hba 1268 drivers/parisc/lba_pci.c r->start = READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_BASE); hba 1275 drivers/parisc/lba_pci.c r->start = PCI_HOST_ADDR(&lba_dev->hba, r->start); hba 1276 drivers/parisc/lba_pci.c rsize = ~ READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_MASK); hba 1305 drivers/parisc/lba_pci.c r = &(lba_dev->hba.elmmio_space); hba 1306 drivers/parisc/lba_pci.c sprintf(lba_dev->hba.elmmio_name, "PCI%02x ELMMIO", hba 1307 drivers/parisc/lba_pci.c (int)lba_dev->hba.bus_num.start); hba 1308 drivers/parisc/lba_pci.c r->name = lba_dev->hba.elmmio_name; hba 1314 drivers/parisc/lba_pci.c r->start = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_BASE); hba 1321 drivers/parisc/lba_pci.c r->start = PCI_HOST_ADDR(&lba_dev->hba, r->start); hba 1322 drivers/parisc/lba_pci.c rsize = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_MASK); hba 1327 drivers/parisc/lba_pci.c r = &(lba_dev->hba.io_space); hba 1328 drivers/parisc/lba_pci.c sprintf(lba_dev->hba.io_name, "PCI%02x Ports", hba 1329 drivers/parisc/lba_pci.c (int)lba_dev->hba.bus_num.start); hba 1330 drivers/parisc/lba_pci.c r->name = lba_dev->hba.io_name; hba 1332 drivers/parisc/lba_pci.c r->start = READ_REG32(lba_dev->hba.base_addr + LBA_IOS_BASE) & ~1L; hba 1333 drivers/parisc/lba_pci.c r->end = r->start + (READ_REG32(lba_dev->hba.base_addr + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1)); hba 1336 drivers/parisc/lba_pci.c lba_num = HBA_PORT_BASE(lba_dev->hba.hba_num); hba 1362 drivers/parisc/lba_pci.c d->hba.base_addr, hba 1363 drivers/parisc/lba_pci.c READ_REG64(d->hba.base_addr + LBA_STAT_CTL), hba 1364 drivers/parisc/lba_pci.c READ_REG64(d->hba.base_addr + LBA_ERROR_CONFIG), hba 1365 drivers/parisc/lba_pci.c READ_REG64(d->hba.base_addr + LBA_ERROR_STATUS), hba 1366 drivers/parisc/lba_pci.c READ_REG64(d->hba.base_addr + LBA_DMA_CTL) ); hba 1368 drivers/parisc/lba_pci.c READ_REG64(d->hba.base_addr + LBA_ARB_MASK), hba 1369 drivers/parisc/lba_pci.c READ_REG64(d->hba.base_addr + LBA_ARB_PRI), hba 1370 drivers/parisc/lba_pci.c READ_REG64(d->hba.base_addr + LBA_ARB_MODE), hba 1371 drivers/parisc/lba_pci.c READ_REG64(d->hba.base_addr + LBA_ARB_MTLT) ); hba 1373 drivers/parisc/lba_pci.c READ_REG64(d->hba.base_addr + LBA_HINT_CFG)); hba 1377 drivers/parisc/lba_pci.c printk(" %Lx", READ_REG64(d->hba.base_addr + i)); hba 1391 drivers/parisc/lba_pci.c bus_reset = READ_REG32(d->hba.base_addr + LBA_STAT_CTL + 4) & 1; hba 1396 drivers/parisc/lba_pci.c stat = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); hba 1400 drivers/parisc/lba_pci.c WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG); hba 1418 drivers/parisc/lba_pci.c stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); hba 1420 drivers/parisc/lba_pci.c WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL); hba 1422 drivers/parisc/lba_pci.c WRITE_REG32(stat & ~HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL); hba 1433 drivers/parisc/lba_pci.c if (0 == READ_REG32(d->hba.base_addr + LBA_ARB_MASK)) { hba 1444 drivers/parisc/lba_pci.c WRITE_REG32(0x3, d->hba.base_addr + LBA_ARB_MASK); hba 1554 drivers/parisc/lba_pci.c lba_dev->hba.base_addr = addr; hba 1555 drivers/parisc/lba_pci.c lba_dev->hba.dev = dev; hba 1557 drivers/parisc/lba_pci.c lba_dev->hba.iommu = sba_get_iommu(dev); /* get iommu data */ hba 1562 drivers/parisc/lba_pci.c pcibios_register_hba(&lba_dev->hba); hba 1586 drivers/parisc/lba_pci.c if (lba_dev->hba.bus_num.start < lba_next_bus) hba 1587 drivers/parisc/lba_pci.c lba_dev->hba.bus_num.start = lba_next_bus; hba 1599 drivers/parisc/lba_pci.c &(lba_dev->hba.lmmio_space))) { hba 1601 drivers/parisc/lba_pci.c (long)lba_dev->hba.lmmio_space.start, hba 1602 drivers/parisc/lba_pci.c (long)lba_dev->hba.lmmio_space.end); hba 1603 drivers/parisc/lba_pci.c lba_dev->hba.lmmio_space.flags = 0; hba 1606 drivers/parisc/lba_pci.c pci_add_resource_offset(&resources, &lba_dev->hba.io_space, hba 1607 drivers/parisc/lba_pci.c HBA_PORT_BASE(lba_dev->hba.hba_num)); hba 1608 drivers/parisc/lba_pci.c if (lba_dev->hba.elmmio_space.flags) hba 1609 drivers/parisc/lba_pci.c pci_add_resource_offset(&resources, &lba_dev->hba.elmmio_space, hba 1610 drivers/parisc/lba_pci.c lba_dev->hba.lmmio_space_offset); hba 1611 drivers/parisc/lba_pci.c if (lba_dev->hba.lmmio_space.flags) hba 1612 drivers/parisc/lba_pci.c pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space, hba 1613 drivers/parisc/lba_pci.c lba_dev->hba.lmmio_space_offset); hba 1614 drivers/parisc/lba_pci.c if (lba_dev->hba.gmmio_space.flags) { hba 1620 drivers/parisc/lba_pci.c pci_add_resource(&resources, &lba_dev->hba.bus_num); hba 1623 drivers/parisc/lba_pci.c lba_bus = lba_dev->hba.hba_bus = hba 1624 drivers/parisc/lba_pci.c pci_create_root_bus(&dev->dev, lba_dev->hba.bus_num.start, hba 1645 drivers/parisc/lba_pci.c lba_dump_res(&lba_dev->hba.io_space, 2); hba 1647 drivers/parisc/lba_pci.c lba_dump_res(&lba_dev->hba.lmmio_space, 2); hba 102 drivers/s390/scsi/zfcp_dbf.c debug_event(dbf->hba, level, rec, sizeof(*rec)); hba 119 drivers/s390/scsi/zfcp_dbf.c if (unlikely(!debug_level_enabled(dbf->hba, level))) hba 149 drivers/s390/scsi/zfcp_dbf.c debug_event(dbf->hba, level, rec, sizeof(*rec)); hba 166 drivers/s390/scsi/zfcp_dbf.c if (unlikely(!debug_level_enabled(dbf->hba, level))) hba 180 drivers/s390/scsi/zfcp_dbf.c debug_event(dbf->hba, level, rec, sizeof(*rec)); hba 236 drivers/s390/scsi/zfcp_dbf.c if (unlikely(!debug_level_enabled(dbf->hba, level))) hba 245 drivers/s390/scsi/zfcp_dbf.c debug_event(dbf->hba, level, rec, sizeof(*rec)); hba 745 drivers/s390/scsi/zfcp_dbf.c debug_unregister(dbf->hba); hba 779 drivers/s390/scsi/zfcp_dbf.c dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba)); hba 780 drivers/s390/scsi/zfcp_dbf.c if (!dbf->hba) hba 282 drivers/s390/scsi/zfcp_dbf.h debug_info_t *hba; hba 324 drivers/s390/scsi/zfcp_dbf.h if (debug_level_enabled(req->adapter->dbf->hba, level)) hba 155 drivers/scsi/bnx2fc/bnx2fc.h #define BNX2FC_STATS(hba, stat, cnt) \ hba 160 drivers/scsi/bnx2fc/bnx2fc.h if (hba->prev_stats.stat.cnt <= val) \ hba 161 drivers/scsi/bnx2fc/bnx2fc.h val -= hba->prev_stats.stat.cnt; \ hba 163 drivers/scsi/bnx2fc/bnx2fc.h val += (0xfffffff - hba->prev_stats.stat.cnt); \ hba 164 drivers/scsi/bnx2fc/bnx2fc.h hba->bfw_stats.cnt += val; \ hba 258 drivers/scsi/bnx2fc/bnx2fc.h struct bnx2fc_hba *hba; hba 282 drivers/scsi/bnx2fc/bnx2fc.h struct bnx2fc_hba *hba; hba 490 drivers/scsi/bnx2fc/bnx2fc.h struct bnx2fc_hba *hba; hba 500 drivers/scsi/bnx2fc/bnx2fc.h int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba); hba 501 drivers/scsi/bnx2fc/bnx2fc.h int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba); hba 508 drivers/scsi/bnx2fc/bnx2fc.h int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, hba 513 drivers/scsi/bnx2fc/bnx2fc.h int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba); hba 514 drivers/scsi/bnx2fc/bnx2fc.h void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba); hba 515 drivers/scsi/bnx2fc/bnx2fc.h int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba); hba 516 drivers/scsi/bnx2fc/bnx2fc.h void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba); hba 517 drivers/scsi/bnx2fc/bnx2fc.h struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba); hba 519 drivers/scsi/bnx2fc/bnx2fc.h void bnx2fc_get_link_state(struct bnx2fc_hba *hba); hba 586 drivers/scsi/bnx2fc/bnx2fc.h int bnx2fc_send_stat_req(struct bnx2fc_hba *hba); hba 775 drivers/scsi/bnx2fc/bnx2fc_els.c interface->hba->task_ctx[task_idx]; hba 76 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba); hba 77 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba); hba 78 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba); hba 79 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); hba 80 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); hba 91 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static int bnx2fc_fw_init(struct bnx2fc_hba *hba); hba 92 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba); hba 191 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba = interface->hba; hba 196 drivers/scsi/bnx2fc/bnx2fc_fcoe.c mutex_lock(&hba->hba_mutex); hba 197 drivers/scsi/bnx2fc/bnx2fc_fcoe.c spin_lock_bh(&hba->hba_lock); hba 199 drivers/scsi/bnx2fc/bnx2fc_fcoe.c tgt = hba->tgt_ofld_list[i]; hba 203 drivers/scsi/bnx2fc/bnx2fc_fcoe.c spin_unlock_bh(&hba->hba_lock); hba 206 drivers/scsi/bnx2fc/bnx2fc_fcoe.c spin_lock_bh(&hba->hba_lock); hba 210 drivers/scsi/bnx2fc/bnx2fc_fcoe.c spin_unlock_bh(&hba->hba_lock); hba 211 drivers/scsi/bnx2fc/bnx2fc_fcoe.c mutex_unlock(&hba->hba_mutex); hba 270 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba; hba 283 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba = interface->hba; hba 317 drivers/scsi/bnx2fc/bnx2fc_fcoe.c spin_lock_bh(&hba->hba_lock); hba 323 drivers/scsi/bnx2fc/bnx2fc_fcoe.c spin_unlock_bh(&hba->hba_lock); hba 330 drivers/scsi/bnx2fc/bnx2fc_fcoe.c spin_unlock_bh(&hba->hba_lock); hba 683 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba = interface->hba; hba 687 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer; hba 691 drivers/scsi/bnx2fc/bnx2fc_fcoe.c mutex_lock(&hba->hba_stats_mutex); hba 695 drivers/scsi/bnx2fc/bnx2fc_fcoe.c init_completion(&hba->stat_req_done); hba 696 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (bnx2fc_send_stat_req(hba)) hba 698 drivers/scsi/bnx2fc/bnx2fc_fcoe.c rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); hba 703 drivers/scsi/bnx2fc/bnx2fc_fcoe.c BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt); hba 704 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt; hba 705 drivers/scsi/bnx2fc/bnx2fc_fcoe.c BNX2FC_STATS(hba, tx_stat, fcoe_tx_pkt_cnt); hba 706 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_stats->tx_frames += hba->bfw_stats.fcoe_tx_pkt_cnt; hba 707 drivers/scsi/bnx2fc/bnx2fc_fcoe.c BNX2FC_STATS(hba, tx_stat, fcoe_tx_byte_cnt); hba 708 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_stats->tx_words += ((hba->bfw_stats.fcoe_tx_byte_cnt) / 4); hba 709 drivers/scsi/bnx2fc/bnx2fc_fcoe.c BNX2FC_STATS(hba, rx_stat0, fcoe_rx_pkt_cnt); hba 710 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_stats->rx_frames += hba->bfw_stats.fcoe_rx_pkt_cnt; hba 711 drivers/scsi/bnx2fc/bnx2fc_fcoe.c BNX2FC_STATS(hba, rx_stat0, fcoe_rx_byte_cnt); hba 712 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_stats->rx_words += ((hba->bfw_stats.fcoe_rx_byte_cnt) / 4); hba 721 drivers/scsi/bnx2fc/bnx2fc_fcoe.c memcpy(&hba->prev_stats, hba->stats_buffer, hba 725 drivers/scsi/bnx2fc/bnx2fc_fcoe.c mutex_unlock(&hba->hba_stats_mutex); hba 733 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba = interface->hba; hba 756 drivers/scsi/bnx2fc/bnx2fc_fcoe.c BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION, hba 766 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba = interface->hba; hba 767 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct net_device *dev = hba->phys_dev; hba 771 drivers/scsi/bnx2fc/bnx2fc_fcoe.c clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); hba 773 drivers/scsi/bnx2fc/bnx2fc_fcoe.c set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); hba 786 drivers/scsi/bnx2fc/bnx2fc_fcoe.c void bnx2fc_get_link_state(struct bnx2fc_hba *hba) hba 788 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (test_bit(__LINK_STATE_NOCARRIER, &hba->phys_dev->state)) hba 789 drivers/scsi/bnx2fc/bnx2fc_fcoe.c set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); hba 791 drivers/scsi/bnx2fc/bnx2fc_fcoe.c clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); hba 796 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba; hba 805 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba = interface->hba; hba 808 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!hba->phys_dev->ethtool_ops || hba 809 drivers/scsi/bnx2fc/bnx2fc_fcoe.c !hba->phys_dev->ethtool_ops->get_pauseparam) hba 841 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba = from_timer(hba, t, destroy_timer); hba 845 drivers/scsi/bnx2fc/bnx2fc_fcoe.c set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); hba 846 drivers/scsi/bnx2fc/bnx2fc_fcoe.c wake_up_interruptible(&hba->destroy_wait); hba 862 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; hba 876 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) hba 882 drivers/scsi/bnx2fc/bnx2fc_fcoe.c clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); hba 883 drivers/scsi/bnx2fc/bnx2fc_fcoe.c clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); hba 888 drivers/scsi/bnx2fc/bnx2fc_fcoe.c set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); hba 900 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (interface->hba == hba && hba 917 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (interface->hba != hba) hba 971 drivers/scsi/bnx2fc/bnx2fc_fcoe.c clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); hba 972 drivers/scsi/bnx2fc/bnx2fc_fcoe.c init_waitqueue_head(&hba->shutdown_wait); hba 975 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->num_ofld_sess); hba 976 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->wait_for_link_down = 1; hba 977 drivers/scsi/bnx2fc/bnx2fc_fcoe.c wait_event_interruptible(hba->shutdown_wait, hba 978 drivers/scsi/bnx2fc/bnx2fc_fcoe.c (hba->num_ofld_sess == 0)); hba 980 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->num_ofld_sess); hba 981 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->wait_for_link_down = 0; hba 1001 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba) hba 1005 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_min_xid = hba->max_xid + 1; hba 1007 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_max_xid = hba->max_xid + FCOE_XIDS_PER_CPU_OFFSET; hba 1009 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_max_xid = hba->max_xid + FCOE_MAX_XID_OFFSET; hba 1144 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) { hba 1175 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static void bnx2fc_free_vport(struct bnx2fc_hba *hba, struct fc_lport *lport) hba 1179 drivers/scsi/bnx2fc/bnx2fc_fcoe.c spin_lock_bh(&hba->hba_lock); hba 1180 drivers/scsi/bnx2fc/bnx2fc_fcoe.c list_for_each_entry_safe(blport, tmp, &hba->vports, list) { hba 1186 drivers/scsi/bnx2fc/bnx2fc_fcoe.c spin_unlock_bh(&hba->hba_lock); hba 1212 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_free_vport(interface->hba, port->lport); hba 1238 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct net_device *physdev = interface->hba->phys_dev; hba 1340 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba) hba 1343 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (hba->cmd_mgr) { hba 1344 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_cmd_mgr_free(hba->cmd_mgr); hba 1345 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->cmd_mgr = NULL; hba 1347 drivers/scsi/bnx2fc/bnx2fc_fcoe.c kfree(hba->tgt_ofld_list); hba 1348 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_unbind_pcidev(hba); hba 1349 drivers/scsi/bnx2fc/bnx2fc_fcoe.c kfree(hba); hba 1362 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba; hba 1366 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba = kzalloc(sizeof(*hba), GFP_KERNEL); hba 1367 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!hba) { hba 1371 drivers/scsi/bnx2fc/bnx2fc_fcoe.c spin_lock_init(&hba->hba_lock); hba 1372 drivers/scsi/bnx2fc/bnx2fc_fcoe.c mutex_init(&hba->hba_mutex); hba 1373 drivers/scsi/bnx2fc/bnx2fc_fcoe.c mutex_init(&hba->hba_stats_mutex); hba 1375 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->cnic = cnic; hba 1377 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->max_tasks = cnic->max_fcoe_exchanges; hba 1378 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->elstm_xids = (hba->max_tasks / 2); hba 1379 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->max_outstanding_cmds = hba->elstm_xids; hba 1380 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->max_xid = (hba->max_tasks - 1); hba 1382 drivers/scsi/bnx2fc/bnx2fc_fcoe.c rc = bnx2fc_bind_pcidev(hba); hba 1387 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->phys_dev = cnic->netdev; hba 1388 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->next_conn_id = 0; hba 1390 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->tgt_ofld_list = hba 1393 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!hba->tgt_ofld_list) { hba 1398 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->num_ofld_sess = 0; hba 1400 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba); hba 1401 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!hba->cmd_mgr) { hba 1405 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_cap = &hba->fcoe_cap; hba 1411 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_cap->capability2 = hba->max_outstanding_cmds << hba 1417 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_cap->capability3 |= hba->max_outstanding_cmds << hba 1421 drivers/scsi/bnx2fc/bnx2fc_fcoe.c init_waitqueue_head(&hba->shutdown_wait); hba 1422 drivers/scsi/bnx2fc/bnx2fc_fcoe.c init_waitqueue_head(&hba->destroy_wait); hba 1423 drivers/scsi/bnx2fc/bnx2fc_fcoe.c INIT_LIST_HEAD(&hba->vports); hba 1425 drivers/scsi/bnx2fc/bnx2fc_fcoe.c return hba; hba 1428 drivers/scsi/bnx2fc/bnx2fc_fcoe.c kfree(hba->tgt_ofld_list); hba 1430 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_unbind_pcidev(hba); hba 1432 drivers/scsi/bnx2fc/bnx2fc_fcoe.c kfree(hba); hba 1437 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_interface_create(struct bnx2fc_hba *hba, hba 1459 drivers/scsi/bnx2fc/bnx2fc_fcoe.c interface->hba = hba; hba 1499 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba = interface->hba; hba 1509 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_shost_template.can_queue = hba->max_outstanding_cmds; hba 1564 drivers/scsi/bnx2fc/bnx2fc_fcoe.c rc = bnx2fc_em_config(lport, hba); hba 1578 drivers/scsi/bnx2fc/bnx2fc_fcoe.c spin_lock_bh(&hba->hba_lock); hba 1580 drivers/scsi/bnx2fc/bnx2fc_fcoe.c list_add_tail(&blport->list, &hba->vports); hba 1581 drivers/scsi/bnx2fc/bnx2fc_fcoe.c spin_unlock_bh(&hba->hba_lock); hba 1607 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba = interface->hba; hba 1617 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_free_vport(hba, lport); hba 1707 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba) hba 1709 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_free_fw_resc(hba); hba 1710 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_free_task_ctx(hba); hba 1719 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba) hba 1721 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (bnx2fc_setup_task_ctx(hba)) hba 1724 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (bnx2fc_setup_fw_resc(hba)) hba 1729 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_unbind_adapter_devices(hba); hba 1733 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba) hba 1738 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!hba->cnic) { hba 1742 drivers/scsi/bnx2fc/bnx2fc_fcoe.c cnic = hba->cnic; hba 1743 drivers/scsi/bnx2fc/bnx2fc_fcoe.c pdev = hba->pcidev = cnic->pcidev; hba 1744 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!hba->pcidev) hba 1749 drivers/scsi/bnx2fc/bnx2fc_fcoe.c strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN); hba 1752 drivers/scsi/bnx2fc/bnx2fc_fcoe.c strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN); hba 1757 drivers/scsi/bnx2fc/bnx2fc_fcoe.c strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN); hba 1762 drivers/scsi/bnx2fc/bnx2fc_fcoe.c strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN); hba 1767 drivers/scsi/bnx2fc/bnx2fc_fcoe.c strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN); hba 1774 drivers/scsi/bnx2fc/bnx2fc_fcoe.c strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN); hba 1780 drivers/scsi/bnx2fc/bnx2fc_fcoe.c pci_dev_get(hba->pcidev); hba 1784 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) hba 1786 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (hba->pcidev) { hba 1787 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->chip_num[0] = '\0'; hba 1788 drivers/scsi/bnx2fc/bnx2fc_fcoe.c pci_dev_put(hba->pcidev); hba 1790 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->pcidev = NULL; hba 1800 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba = handle; hba 1804 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!hba) hba 1807 drivers/scsi/bnx2fc/bnx2fc_fcoe.c cnic = hba->cnic; hba 1836 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba = handle; hba 1843 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) hba 1844 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_fw_init(hba); hba 1849 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (interface->hba == hba) { hba 1875 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) hba 1891 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static int bnx2fc_fw_init(struct bnx2fc_hba *hba) hba 1897 drivers/scsi/bnx2fc/bnx2fc_fcoe.c rc = bnx2fc_bind_adapter_devices(hba); hba 1904 drivers/scsi/bnx2fc/bnx2fc_fcoe.c rc = bnx2fc_send_fw_fcoe_init_msg(hba); hba 1915 drivers/scsi/bnx2fc/bnx2fc_fcoe.c while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) hba 1918 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) { hba 1921 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->cnic->netdev->name); hba 1927 drivers/scsi/bnx2fc/bnx2fc_fcoe.c set_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags); hba 1931 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_unbind_adapter_devices(hba); hba 1936 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) hba 1938 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) { hba 1939 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) { hba 1940 drivers/scsi/bnx2fc/bnx2fc_fcoe.c timer_setup(&hba->destroy_timer, bnx2fc_destroy_timer, hba 1942 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT + hba 1944 drivers/scsi/bnx2fc/bnx2fc_fcoe.c add_timer(&hba->destroy_timer); hba 1945 drivers/scsi/bnx2fc/bnx2fc_fcoe.c wait_event_interruptible(hba->destroy_wait, hba 1947 drivers/scsi/bnx2fc/bnx2fc_fcoe.c &hba->flags)); hba 1948 drivers/scsi/bnx2fc/bnx2fc_fcoe.c clear_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); hba 1953 drivers/scsi/bnx2fc/bnx2fc_fcoe.c del_timer_sync(&hba->destroy_timer); hba 1955 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_unbind_adapter_devices(hba); hba 1969 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba = handle; hba 1975 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) hba 1978 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (interface->hba == hba) hba 1981 drivers/scsi/bnx2fc/bnx2fc_fcoe.c BUG_ON(hba->num_ofld_sess != 0); hba 1983 drivers/scsi/bnx2fc/bnx2fc_fcoe.c mutex_lock(&hba->hba_mutex); hba 1984 drivers/scsi/bnx2fc/bnx2fc_fcoe.c clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); hba 1986 drivers/scsi/bnx2fc/bnx2fc_fcoe.c &hba->adapter_state); hba 1988 drivers/scsi/bnx2fc/bnx2fc_fcoe.c clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); hba 1989 drivers/scsi/bnx2fc/bnx2fc_fcoe.c mutex_unlock(&hba->hba_mutex); hba 1991 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_fw_destroy(hba); hba 2004 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) { hba 2016 drivers/scsi/bnx2fc/bnx2fc_fcoe.c set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); hba 2046 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba; hba 2059 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba = bnx2fc_hba_create(dev); hba 2060 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!hba) { hba 2069 drivers/scsi/bnx2fc/bnx2fc_fcoe.c list_add_tail(&hba->list, &adapter_list); hba 2073 drivers/scsi/bnx2fc/bnx2fc_fcoe.c dev->fcoe_cap = &hba->fcoe_cap; hba 2074 drivers/scsi/bnx2fc/bnx2fc_fcoe.c clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); hba 2076 drivers/scsi/bnx2fc/bnx2fc_fcoe.c (void *) hba); hba 2080 drivers/scsi/bnx2fc/bnx2fc_fcoe.c set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); hba 2182 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba; hba 2197 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba = interface->hba; hba 2200 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!hba) hba 2203 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!hba->cnic) hba 2212 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!hba->cnic->get_fc_npiv_tbl) hba 2219 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (hba->cnic->get_fc_npiv_tbl(hba->cnic, npiv_tbl)) hba 2307 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba; hba 2349 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba = bnx2fc_hba_lookup(phys_dev); hba 2350 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!hba) { hba 2361 drivers/scsi/bnx2fc/bnx2fc_fcoe.c interface = bnx2fc_interface_create(hba, netdev, fip_mode); hba 2411 drivers/scsi/bnx2fc/bnx2fc_fcoe.c set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); hba 2483 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba; hba 2486 drivers/scsi/bnx2fc/bnx2fc_fcoe.c list_for_each_entry(hba, &adapter_list, list) { hba 2487 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (hba->cnic == cnic) hba 2488 drivers/scsi/bnx2fc/bnx2fc_fcoe.c return hba; hba 2509 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba; hba 2512 drivers/scsi/bnx2fc/bnx2fc_fcoe.c list_for_each_entry(hba, &adapter_list, list) { hba 2513 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (hba->phys_dev == phys_dev) hba 2514 drivers/scsi/bnx2fc/bnx2fc_fcoe.c return hba; hba 2527 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba; hba 2539 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba = bnx2fc_find_hba_for_cnic(dev); hba 2540 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!hba) { hba 2547 drivers/scsi/bnx2fc/bnx2fc_fcoe.c list_del_init(&hba->list); hba 2552 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (interface->hba == hba) hba 2559 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_ulp_stop(hba); hba 2561 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) hba 2562 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); hba 2563 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_hba_destroy(hba); hba 2764 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_hba *hba, *next; hba 2781 drivers/scsi/bnx2fc/bnx2fc_fcoe.c list_for_each_entry_safe(hba, next, &to_be_deleted, list) { hba 2782 drivers/scsi/bnx2fc/bnx2fc_fcoe.c list_del_init(&hba->list); hba 2784 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba); hba 2785 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_ulp_stop(hba); hba 2788 drivers/scsi/bnx2fc/bnx2fc_fcoe.c &hba->reg_with_cnic)) hba 2789 drivers/scsi/bnx2fc/bnx2fc_fcoe.c hba->cnic->unregister_device(hba->cnic, hba 2791 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_hba_destroy(hba); hba 20 drivers/scsi/bnx2fc/bnx2fc_hwi.c static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, hba 22 drivers/scsi/bnx2fc/bnx2fc_hwi.c static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, hba 24 drivers/scsi/bnx2fc/bnx2fc_hwi.c static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, hba 26 drivers/scsi/bnx2fc/bnx2fc_hwi.c static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); hba 27 drivers/scsi/bnx2fc/bnx2fc_hwi.c static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, hba 30 drivers/scsi/bnx2fc/bnx2fc_hwi.c int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) hba 42 drivers/scsi/bnx2fc/bnx2fc_hwi.c stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma; hba 43 drivers/scsi/bnx2fc/bnx2fc_hwi.c stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32); hba 47 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->cnic && hba->cnic->submit_kwqes) hba 48 drivers/scsi/bnx2fc/bnx2fc_hwi.c rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); hba 62 drivers/scsi/bnx2fc/bnx2fc_hwi.c int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) hba 71 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (!hba->cnic) { hba 82 drivers/scsi/bnx2fc/bnx2fc_hwi.c fcoe_init1.num_tasks = hba->max_tasks; hba 87 drivers/scsi/bnx2fc/bnx2fc_hwi.c fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; hba 88 drivers/scsi/bnx2fc/bnx2fc_hwi.c fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32); hba 89 drivers/scsi/bnx2fc/bnx2fc_hwi.c fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; hba 91 drivers/scsi/bnx2fc/bnx2fc_hwi.c (u32) ((u64) hba->task_ctx_bd_dma >> 32); hba 109 drivers/scsi/bnx2fc/bnx2fc_hwi.c fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; hba 111 drivers/scsi/bnx2fc/bnx2fc_hwi.c ((u64) hba->hash_tbl_pbl_dma >> 32); hba 113 drivers/scsi/bnx2fc/bnx2fc_hwi.c fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma; hba 115 drivers/scsi/bnx2fc/bnx2fc_hwi.c ((u64) hba->t2_hash_tbl_dma >> 32); hba 117 drivers/scsi/bnx2fc/bnx2fc_hwi.c fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma; hba 119 drivers/scsi/bnx2fc/bnx2fc_hwi.c ((u64) hba->t2_hash_tbl_ptr_dma >> 32); hba 141 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->cnic && hba->cnic->submit_kwqes) hba 142 drivers/scsi/bnx2fc/bnx2fc_hwi.c rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); hba 146 drivers/scsi/bnx2fc/bnx2fc_hwi.c int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba) hba 160 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->cnic && hba->cnic->submit_kwqes) hba 161 drivers/scsi/bnx2fc/bnx2fc_hwi.c rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); hba 177 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct bnx2fc_hba *hba = interface->hba; hba 344 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->cnic && hba->cnic->submit_kwqes) hba 345 drivers/scsi/bnx2fc/bnx2fc_hwi.c rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); hba 362 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct bnx2fc_hba *hba = interface->hba; hba 415 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->cnic && hba->cnic->submit_kwqes) hba 416 drivers/scsi/bnx2fc/bnx2fc_hwi.c rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); hba 431 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct bnx2fc_hba *hba = interface->hba; hba 479 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->cnic && hba->cnic->submit_kwqes) hba 480 drivers/scsi/bnx2fc/bnx2fc_hwi.c rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); hba 491 drivers/scsi/bnx2fc/bnx2fc_hwi.c int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, hba 509 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->cnic && hba->cnic->submit_kwqes) hba 510 drivers/scsi/bnx2fc/bnx2fc_hwi.c rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); hba 515 drivers/scsi/bnx2fc/bnx2fc_hwi.c static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport) hba 519 drivers/scsi/bnx2fc/bnx2fc_hwi.c spin_lock_bh(&hba->hba_lock); hba 520 drivers/scsi/bnx2fc/bnx2fc_hwi.c list_for_each_entry(blport, &hba->vports, list) { hba 522 drivers/scsi/bnx2fc/bnx2fc_hwi.c spin_unlock_bh(&hba->hba_lock); hba 526 drivers/scsi/bnx2fc/bnx2fc_hwi.c spin_unlock_bh(&hba->hba_lock); hba 536 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct bnx2fc_hba *hba; hba 542 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba = unsol_els->hba; hba 543 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (is_valid_lport(hba, lport)) hba 615 drivers/scsi/bnx2fc/bnx2fc_hwi.c unsol_els->hba = interface->hba; hba 637 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct bnx2fc_hba *hba = interface->hba; hba 705 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (xid > hba->max_xid) { hba 714 drivers/scsi/bnx2fc/bnx2fc_hwi.c io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; hba 820 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (xid > hba->max_xid) { hba 838 drivers/scsi/bnx2fc/bnx2fc_hwi.c io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; hba 872 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct bnx2fc_hba *hba = interface->hba; hba 882 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (xid >= hba->max_tasks) { hba 889 drivers/scsi/bnx2fc/bnx2fc_hwi.c task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; hba 896 drivers/scsi/bnx2fc/bnx2fc_hwi.c io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; hba 1091 drivers/scsi/bnx2fc/bnx2fc_hwi.c static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, hba 1095 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; hba 1114 drivers/scsi/bnx2fc/bnx2fc_hwi.c static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, hba 1124 drivers/scsi/bnx2fc/bnx2fc_hwi.c tgt = hba->tgt_ofld_list[conn_id]; hba 1132 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba != interface->hba) { hba 1166 drivers/scsi/bnx2fc/bnx2fc_hwi.c static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, hba 1176 drivers/scsi/bnx2fc/bnx2fc_hwi.c tgt = hba->tgt_ofld_list[conn_id]; hba 1194 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba != interface->hba) { hba 1207 drivers/scsi/bnx2fc/bnx2fc_hwi.c static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba, hba 1215 drivers/scsi/bnx2fc/bnx2fc_hwi.c tgt = hba->tgt_ofld_list[conn_id]; hba 1240 drivers/scsi/bnx2fc/bnx2fc_hwi.c static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, hba 1247 drivers/scsi/bnx2fc/bnx2fc_hwi.c tgt = hba->tgt_ofld_list[conn_id]; hba 1269 drivers/scsi/bnx2fc/bnx2fc_hwi.c static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code) hba 1306 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; hba 1315 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_fastpath_notification(hba, kcqe); hba 1319 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_process_ofld_cmpl(hba, kcqe); hba 1323 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_process_enable_conn_cmpl(hba, kcqe); hba 1329 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_init_failure(hba, hba 1332 drivers/scsi/bnx2fc/bnx2fc_hwi.c set_bit(ADAPTER_STATE_UP, &hba->adapter_state); hba 1333 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_get_link_state(hba); hba 1335 drivers/scsi/bnx2fc/bnx2fc_hwi.c (u8)hba->pcidev->bus->number); hba 1347 drivers/scsi/bnx2fc/bnx2fc_hwi.c set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); hba 1348 drivers/scsi/bnx2fc/bnx2fc_hwi.c wake_up_interruptible(&hba->destroy_wait); hba 1352 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_process_conn_disable_cmpl(hba, kcqe); hba 1356 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_process_conn_destroy_cmpl(hba, kcqe); hba 1363 drivers/scsi/bnx2fc/bnx2fc_hwi.c complete(&hba->stat_req_done); hba 1412 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct bnx2fc_hba *hba = interface->hba; hba 1414 drivers/scsi/bnx2fc/bnx2fc_hwi.c reg_base = pci_resource_start(hba->pcidev, hba 1828 drivers/scsi/bnx2fc/bnx2fc_hwi.c int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) hba 1842 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, hba 1844 drivers/scsi/bnx2fc/bnx2fc_hwi.c &hba->task_ctx_bd_dma, hba 1846 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (!hba->task_ctx_bd_tbl) { hba 1856 drivers/scsi/bnx2fc/bnx2fc_hwi.c task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); hba 1857 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)), hba 1859 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (!hba->task_ctx) { hba 1868 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx_dma = kmalloc((task_ctx_arr_sz * hba 1870 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (!hba->task_ctx_dma) { hba 1876 drivers/scsi/bnx2fc/bnx2fc_hwi.c task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; hba 1879 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, hba 1881 drivers/scsi/bnx2fc/bnx2fc_hwi.c &hba->task_ctx_dma[i], hba 1883 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (!hba->task_ctx[i]) { hba 1888 drivers/scsi/bnx2fc/bnx2fc_hwi.c addr = (u64)hba->task_ctx_dma[i]; hba 1897 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->task_ctx[i]) { hba 1899 drivers/scsi/bnx2fc/bnx2fc_hwi.c dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba 1900 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx[i], hba->task_ctx_dma[i]); hba 1901 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx[i] = NULL; hba 1905 drivers/scsi/bnx2fc/bnx2fc_hwi.c kfree(hba->task_ctx_dma); hba 1906 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx_dma = NULL; hba 1908 drivers/scsi/bnx2fc/bnx2fc_hwi.c kfree(hba->task_ctx); hba 1909 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx = NULL; hba 1911 drivers/scsi/bnx2fc/bnx2fc_hwi.c dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba 1912 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma); hba 1913 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx_bd_tbl = NULL; hba 1918 drivers/scsi/bnx2fc/bnx2fc_hwi.c void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) hba 1923 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->task_ctx_bd_tbl) { hba 1924 drivers/scsi/bnx2fc/bnx2fc_hwi.c dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba 1925 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx_bd_tbl, hba 1926 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx_bd_dma); hba 1927 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx_bd_tbl = NULL; hba 1930 drivers/scsi/bnx2fc/bnx2fc_hwi.c task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); hba 1931 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->task_ctx) { hba 1933 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->task_ctx[i]) { hba 1934 drivers/scsi/bnx2fc/bnx2fc_hwi.c dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba 1935 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx[i], hba 1936 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx_dma[i]); hba 1937 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx[i] = NULL; hba 1940 drivers/scsi/bnx2fc/bnx2fc_hwi.c kfree(hba->task_ctx); hba 1941 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx = NULL; hba 1944 drivers/scsi/bnx2fc/bnx2fc_hwi.c kfree(hba->task_ctx_dma); hba 1945 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->task_ctx_dma = NULL; hba 1948 drivers/scsi/bnx2fc/bnx2fc_hwi.c static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) hba 1954 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->hash_tbl_segments) { hba 1956 drivers/scsi/bnx2fc/bnx2fc_hwi.c pbl = hba->hash_tbl_pbl; hba 1958 drivers/scsi/bnx2fc/bnx2fc_hwi.c segment_count = hba->hash_tbl_segment_count; hba 1966 drivers/scsi/bnx2fc/bnx2fc_hwi.c dma_free_coherent(&hba->pcidev->dev, hba 1968 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->hash_tbl_segments[i], hba 1973 drivers/scsi/bnx2fc/bnx2fc_hwi.c kfree(hba->hash_tbl_segments); hba 1974 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->hash_tbl_segments = NULL; hba 1977 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->hash_tbl_pbl) { hba 1978 drivers/scsi/bnx2fc/bnx2fc_hwi.c dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba 1979 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->hash_tbl_pbl, hba 1980 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->hash_tbl_pbl_dma); hba 1981 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->hash_tbl_pbl = NULL; hba 1985 drivers/scsi/bnx2fc/bnx2fc_hwi.c static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) hba 2000 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->hash_tbl_segment_count = segment_count; hba 2002 drivers/scsi/bnx2fc/bnx2fc_hwi.c segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments); hba 2003 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL); hba 2004 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (!hba->hash_tbl_segments) { hba 2016 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev, hba 2020 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (!hba->hash_tbl_segments[i]) { hba 2026 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, hba 2027 drivers/scsi/bnx2fc/bnx2fc_hwi.c &hba->hash_tbl_pbl_dma, hba 2029 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (!hba->hash_tbl_pbl) { hba 2034 drivers/scsi/bnx2fc/bnx2fc_hwi.c pbl = hba->hash_tbl_pbl; hba 2042 drivers/scsi/bnx2fc/bnx2fc_hwi.c pbl = hba->hash_tbl_pbl; hba 2058 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->hash_tbl_segments[i]) hba 2059 drivers/scsi/bnx2fc/bnx2fc_hwi.c dma_free_coherent(&hba->pcidev->dev, hba 2061 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->hash_tbl_segments[i], hba 2068 drivers/scsi/bnx2fc/bnx2fc_hwi.c kfree(hba->hash_tbl_segments); hba 2069 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->hash_tbl_segments = NULL; hba 2079 drivers/scsi/bnx2fc/bnx2fc_hwi.c int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) hba 2085 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (bnx2fc_allocate_hash_table(hba)) hba 2089 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, hba 2090 drivers/scsi/bnx2fc/bnx2fc_hwi.c &hba->t2_hash_tbl_ptr_dma, hba 2092 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (!hba->t2_hash_tbl_ptr) { hba 2094 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_free_fw_resc(hba); hba 2100 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, hba 2101 drivers/scsi/bnx2fc/bnx2fc_hwi.c &hba->t2_hash_tbl_dma, hba 2103 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (!hba->t2_hash_tbl) { hba 2105 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_free_fw_resc(hba); hba 2109 drivers/scsi/bnx2fc/bnx2fc_hwi.c addr = (unsigned long) hba->t2_hash_tbl_dma + hba 2111 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff; hba 2112 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->t2_hash_tbl[i].next.hi = addr >> 32; hba 2115 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, hba 2116 drivers/scsi/bnx2fc/bnx2fc_hwi.c PAGE_SIZE, &hba->dummy_buf_dma, hba 2118 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (!hba->dummy_buffer) { hba 2120 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_free_fw_resc(hba); hba 2124 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, hba 2125 drivers/scsi/bnx2fc/bnx2fc_hwi.c &hba->stats_buf_dma, hba 2127 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (!hba->stats_buffer) { hba 2129 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_free_fw_resc(hba); hba 2136 drivers/scsi/bnx2fc/bnx2fc_hwi.c void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba) hba 2140 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->stats_buffer) { hba 2141 drivers/scsi/bnx2fc/bnx2fc_hwi.c dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba 2142 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->stats_buffer, hba->stats_buf_dma); hba 2143 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->stats_buffer = NULL; hba 2146 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->dummy_buffer) { hba 2147 drivers/scsi/bnx2fc/bnx2fc_hwi.c dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba 2148 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->dummy_buffer, hba->dummy_buf_dma); hba 2149 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->dummy_buffer = NULL; hba 2152 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->t2_hash_tbl_ptr) { hba 2154 drivers/scsi/bnx2fc/bnx2fc_hwi.c dma_free_coherent(&hba->pcidev->dev, mem_size, hba 2155 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->t2_hash_tbl_ptr, hba 2156 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->t2_hash_tbl_ptr_dma); hba 2157 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->t2_hash_tbl_ptr = NULL; hba 2160 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (hba->t2_hash_tbl) { hba 2163 drivers/scsi/bnx2fc/bnx2fc_hwi.c dma_free_coherent(&hba->pcidev->dev, mem_size, hba 2164 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->t2_hash_tbl, hba->t2_hash_tbl_dma); hba 2165 drivers/scsi/bnx2fc/bnx2fc_hwi.c hba->t2_hash_tbl = NULL; hba 2167 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_free_hash_table(hba); hba 211 drivers/scsi/bnx2fc/bnx2fc_io.c struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) hba 224 drivers/scsi/bnx2fc/bnx2fc_io.c u16 max_xid = hba->max_xid; hba 243 drivers/scsi/bnx2fc/bnx2fc_io.c cmgr->hba = hba; hba 273 drivers/scsi/bnx2fc/bnx2fc_io.c num_pri_ios = num_ios - hba->elstm_xids; hba 318 drivers/scsi/bnx2fc/bnx2fc_io.c bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, hba 339 drivers/scsi/bnx2fc/bnx2fc_io.c struct bnx2fc_hba *hba = cmgr->hba; hba 342 drivers/scsi/bnx2fc/bnx2fc_io.c u16 max_xid = hba->max_xid; hba 356 drivers/scsi/bnx2fc/bnx2fc_io.c dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz, hba 398 drivers/scsi/bnx2fc/bnx2fc_io.c struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; hba 468 drivers/scsi/bnx2fc/bnx2fc_io.c struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; hba 550 drivers/scsi/bnx2fc/bnx2fc_io.c struct bnx2fc_hba *hba = interface->hba; hba 556 drivers/scsi/bnx2fc/bnx2fc_io.c dma_free_coherent(&hba->pcidev->dev, sz, hba 562 drivers/scsi/bnx2fc/bnx2fc_io.c dma_free_coherent(&hba->pcidev->dev, sz, hba 568 drivers/scsi/bnx2fc/bnx2fc_io.c dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba 574 drivers/scsi/bnx2fc/bnx2fc_io.c dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba 587 drivers/scsi/bnx2fc/bnx2fc_io.c struct bnx2fc_hba *hba = interface->hba; hba 600 drivers/scsi/bnx2fc/bnx2fc_io.c mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba 609 drivers/scsi/bnx2fc/bnx2fc_io.c mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba 622 drivers/scsi/bnx2fc/bnx2fc_io.c mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, hba 630 drivers/scsi/bnx2fc/bnx2fc_io.c mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, hba 764 drivers/scsi/bnx2fc/bnx2fc_io.c interface->hba->task_ctx[task_idx]; hba 902 drivers/scsi/bnx2fc/bnx2fc_io.c interface->hba->task_ctx[task_idx]; hba 978 drivers/scsi/bnx2fc/bnx2fc_io.c interface->hba->task_ctx[task_idx]; hba 1036 drivers/scsi/bnx2fc/bnx2fc_io.c interface->hba->task_ctx[task_idx]; hba 1647 drivers/scsi/bnx2fc/bnx2fc_io.c struct bnx2fc_hba *hba = interface->hba; hba 1664 drivers/scsi/bnx2fc/bnx2fc_io.c sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc), hba 1724 drivers/scsi/bnx2fc/bnx2fc_io.c struct bnx2fc_hba *hba = interface->hba; hba 1731 drivers/scsi/bnx2fc/bnx2fc_io.c dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc), hba 2040 drivers/scsi/bnx2fc/bnx2fc_io.c struct bnx2fc_hba *hba = interface->hba; hba 2083 drivers/scsi/bnx2fc/bnx2fc_io.c task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; hba 22 drivers/scsi/bnx2fc/bnx2fc_tgt.c static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, hba 24 drivers/scsi/bnx2fc/bnx2fc_tgt.c static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, hba 26 drivers/scsi/bnx2fc/bnx2fc_tgt.c static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, hba 28 drivers/scsi/bnx2fc/bnx2fc_tgt.c static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id); hba 86 drivers/scsi/bnx2fc/bnx2fc_tgt.c struct bnx2fc_hba *hba = interface->hba; hba 100 drivers/scsi/bnx2fc/bnx2fc_tgt.c rval = bnx2fc_alloc_session_resc(hba, tgt); hba 159 drivers/scsi/bnx2fc/bnx2fc_tgt.c bnx2fc_free_session_resc(hba, tgt); hba 162 drivers/scsi/bnx2fc/bnx2fc_tgt.c bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); hba 295 drivers/scsi/bnx2fc/bnx2fc_tgt.c struct bnx2fc_hba *hba = interface->hba; hba 326 drivers/scsi/bnx2fc/bnx2fc_tgt.c bnx2fc_send_session_destroy_req(hba, tgt); hba 346 drivers/scsi/bnx2fc/bnx2fc_tgt.c bnx2fc_free_session_resc(hba, tgt); hba 347 drivers/scsi/bnx2fc/bnx2fc_tgt.c bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); hba 357 drivers/scsi/bnx2fc/bnx2fc_tgt.c struct bnx2fc_hba *hba = interface->hba; hba 365 drivers/scsi/bnx2fc/bnx2fc_tgt.c if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) { hba 371 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt); hba 445 drivers/scsi/bnx2fc/bnx2fc_tgt.c struct bnx2fc_hba *hba = interface->hba; hba 488 drivers/scsi/bnx2fc/bnx2fc_tgt.c mutex_lock(&hba->hba_mutex); hba 494 drivers/scsi/bnx2fc/bnx2fc_tgt.c mutex_unlock(&hba->hba_mutex); hba 505 drivers/scsi/bnx2fc/bnx2fc_tgt.c hba->num_ofld_sess); hba 511 drivers/scsi/bnx2fc/bnx2fc_tgt.c hba->num_ofld_sess++; hba 523 drivers/scsi/bnx2fc/bnx2fc_tgt.c mutex_unlock(&hba->hba_mutex); hba 538 drivers/scsi/bnx2fc/bnx2fc_tgt.c mutex_lock(&hba->hba_mutex); hba 546 drivers/scsi/bnx2fc/bnx2fc_tgt.c mutex_unlock(&hba->hba_mutex); hba 552 drivers/scsi/bnx2fc/bnx2fc_tgt.c hba->num_ofld_sess--; hba 554 drivers/scsi/bnx2fc/bnx2fc_tgt.c hba->num_ofld_sess); hba 559 drivers/scsi/bnx2fc/bnx2fc_tgt.c if ((hba->wait_for_link_down) && hba 560 drivers/scsi/bnx2fc/bnx2fc_tgt.c (hba->num_ofld_sess == 0)) { hba 561 drivers/scsi/bnx2fc/bnx2fc_tgt.c wake_up_interruptible(&hba->shutdown_wait); hba 563 drivers/scsi/bnx2fc/bnx2fc_tgt.c mutex_unlock(&hba->hba_mutex); hba 582 drivers/scsi/bnx2fc/bnx2fc_tgt.c struct bnx2fc_hba *hba = interface->hba; hba 588 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt = hba->tgt_ofld_list[i]; hba 615 drivers/scsi/bnx2fc/bnx2fc_tgt.c static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, hba 628 drivers/scsi/bnx2fc/bnx2fc_tgt.c spin_lock_bh(&hba->hba_lock); hba 629 drivers/scsi/bnx2fc/bnx2fc_tgt.c next = hba->next_conn_id; hba 630 drivers/scsi/bnx2fc/bnx2fc_tgt.c conn_id = hba->next_conn_id++; hba 631 drivers/scsi/bnx2fc/bnx2fc_tgt.c if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS) hba 632 drivers/scsi/bnx2fc/bnx2fc_tgt.c hba->next_conn_id = 0; hba 634 drivers/scsi/bnx2fc/bnx2fc_tgt.c while (hba->tgt_ofld_list[conn_id] != NULL) { hba 641 drivers/scsi/bnx2fc/bnx2fc_tgt.c spin_unlock_bh(&hba->hba_lock); hba 645 drivers/scsi/bnx2fc/bnx2fc_tgt.c hba->tgt_ofld_list[conn_id] = tgt; hba 647 drivers/scsi/bnx2fc/bnx2fc_tgt.c spin_unlock_bh(&hba->hba_lock); hba 651 drivers/scsi/bnx2fc/bnx2fc_tgt.c static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id) hba 654 drivers/scsi/bnx2fc/bnx2fc_tgt.c spin_lock_bh(&hba->hba_lock); hba 655 drivers/scsi/bnx2fc/bnx2fc_tgt.c hba->tgt_ofld_list[conn_id] = NULL; hba 656 drivers/scsi/bnx2fc/bnx2fc_tgt.c spin_unlock_bh(&hba->hba_lock); hba 663 drivers/scsi/bnx2fc/bnx2fc_tgt.c static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, hba 675 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, hba 688 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, hba 701 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, hba 713 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, hba 738 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, hba 752 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, hba 766 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, hba 790 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev, hba 805 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, hba 830 drivers/scsi/bnx2fc/bnx2fc_tgt.c static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, hba 843 drivers/scsi/bnx2fc/bnx2fc_tgt.c dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, hba 849 drivers/scsi/bnx2fc/bnx2fc_tgt.c dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size, hba 855 drivers/scsi/bnx2fc/bnx2fc_tgt.c dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size, hba 860 drivers/scsi/bnx2fc/bnx2fc_tgt.c dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size, hba 866 drivers/scsi/bnx2fc/bnx2fc_tgt.c dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, hba 872 drivers/scsi/bnx2fc/bnx2fc_tgt.c dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, hba 877 drivers/scsi/bnx2fc/bnx2fc_tgt.c dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size, hba 883 drivers/scsi/bnx2fc/bnx2fc_tgt.c dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, hba 889 drivers/scsi/bnx2fc/bnx2fc_tgt.c dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, hba 285 drivers/scsi/bnx2i/bnx2i.h struct bnx2i_hba *hba; hba 750 drivers/scsi/bnx2i/bnx2i.h struct bnx2i_hba *hba; hba 805 drivers/scsi/bnx2i/bnx2i.h extern void bnx2i_identify_device(struct bnx2i_hba *hba, struct cnic_dev *dev); hba 815 drivers/scsi/bnx2i/bnx2i.h struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, hba 820 drivers/scsi/bnx2i/bnx2i.h struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba); hba 821 drivers/scsi/bnx2i/bnx2i.h struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba); hba 826 drivers/scsi/bnx2i/bnx2i.h void bnx2i_free_hba(struct bnx2i_hba *hba); hba 835 drivers/scsi/bnx2i/bnx2i.h extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba); hba 849 drivers/scsi/bnx2i/bnx2i.h extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, hba 851 drivers/scsi/bnx2i/bnx2i.h extern int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, hba 854 drivers/scsi/bnx2i/bnx2i.h extern int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, hba 857 drivers/scsi/bnx2i/bnx2i.h extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, hba 859 drivers/scsi/bnx2i/bnx2i.h extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba, hba 863 drivers/scsi/bnx2i/bnx2i.h struct bnx2i_hba *hba, u32 iscsi_cid); hba 865 drivers/scsi/bnx2i/bnx2i.h struct bnx2i_hba *hba, u32 iscsi_cid); hba 34 drivers/scsi/bnx2i/bnx2i_hwi.c if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) hba 48 drivers/scsi/bnx2i/bnx2i_hwi.c static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) hba 52 drivers/scsi/bnx2i/bnx2i_hwi.c if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) || hba 53 drivers/scsi/bnx2i/bnx2i_hwi.c test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) || hba 54 drivers/scsi/bnx2i/bnx2i_hwi.c test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { hba 55 drivers/scsi/bnx2i/bnx2i_hwi.c if (!is_power_of_2(hba->max_sqes)) hba 56 drivers/scsi/bnx2i/bnx2i_hwi.c hba->max_sqes = rounddown_pow_of_two(hba->max_sqes); hba 58 drivers/scsi/bnx2i/bnx2i_hwi.c if (!is_power_of_2(hba->max_rqes)) hba 59 drivers/scsi/bnx2i/bnx2i_hwi.c hba->max_rqes = rounddown_pow_of_two(hba->max_rqes); hba 67 drivers/scsi/bnx2i/bnx2i_hwi.c if (hba->max_sqes < num_elements_per_pg) hba 68 drivers/scsi/bnx2i/bnx2i_hwi.c hba->max_sqes = num_elements_per_pg; hba 69 drivers/scsi/bnx2i/bnx2i_hwi.c else if (hba->max_sqes % num_elements_per_pg) hba 70 drivers/scsi/bnx2i/bnx2i_hwi.c hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) & hba 75 drivers/scsi/bnx2i/bnx2i_hwi.c if (hba->max_cqes < num_elements_per_pg) hba 76 drivers/scsi/bnx2i/bnx2i_hwi.c hba->max_cqes = num_elements_per_pg; hba 77 drivers/scsi/bnx2i/bnx2i_hwi.c else if (hba->max_cqes % num_elements_per_pg) hba 78 drivers/scsi/bnx2i/bnx2i_hwi.c hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) & hba 83 drivers/scsi/bnx2i/bnx2i_hwi.c if (hba->max_rqes < num_elements_per_pg) hba 84 drivers/scsi/bnx2i/bnx2i_hwi.c hba->max_rqes = num_elements_per_pg; hba 85 drivers/scsi/bnx2i/bnx2i_hwi.c else if (hba->max_rqes % num_elements_per_pg) hba 86 drivers/scsi/bnx2i/bnx2i_hwi.c hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) & hba 97 drivers/scsi/bnx2i/bnx2i_hwi.c static void bnx2i_get_link_state(struct bnx2i_hba *hba) hba 99 drivers/scsi/bnx2i/bnx2i_hwi.c if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state)) hba 100 drivers/scsi/bnx2i/bnx2i_hwi.c set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); hba 102 drivers/scsi/bnx2i/bnx2i_hwi.c clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); hba 114 drivers/scsi/bnx2i/bnx2i_hwi.c static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code) hba 119 drivers/scsi/bnx2i/bnx2i_hwi.c hba->netdev->name); hba 124 drivers/scsi/bnx2i/bnx2i_hwi.c hba->netdev->name); hba 125 drivers/scsi/bnx2i/bnx2i_hwi.c set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state); hba 146 drivers/scsi/bnx2i/bnx2i_hwi.c if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) hba 241 drivers/scsi/bnx2i/bnx2i_hwi.c if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) { hba 242 drivers/scsi/bnx2i/bnx2i_hwi.c ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes; hba 248 drivers/scsi/bnx2i/bnx2i_hwi.c if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { hba 275 drivers/scsi/bnx2i/bnx2i_hwi.c if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { hba 438 drivers/scsi/bnx2i/bnx2i_hwi.c tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; hba 440 drivers/scsi/bnx2i/bnx2i_hwi.c ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); hba 548 drivers/scsi/bnx2i/bnx2i_hwi.c if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) hba 568 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_conn->hba->mp_bd_dma; hba 570 drivers/scsi/bnx2i/bnx2i_hwi.c (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); hba 610 drivers/scsi/bnx2i/bnx2i_hwi.c logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; hba 612 drivers/scsi/bnx2i/bnx2i_hwi.c ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); hba 632 drivers/scsi/bnx2i/bnx2i_hwi.c struct bnx2i_hba *hba = bnx2i_conn->hba; hba 644 drivers/scsi/bnx2i/bnx2i_hwi.c if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type)) hba 674 drivers/scsi/bnx2i/bnx2i_hwi.c if (hba->cnic && hba->cnic->submit_kwqes) hba 675 drivers/scsi/bnx2i/bnx2i_hwi.c hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); hba 726 drivers/scsi/bnx2i/bnx2i_hwi.c void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) hba 750 drivers/scsi/bnx2i/bnx2i_hwi.c int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) hba 762 drivers/scsi/bnx2i/bnx2i_hwi.c if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) hba 770 drivers/scsi/bnx2i/bnx2i_hwi.c if (hba->cnic && hba->cnic->submit_kwqes) hba 771 drivers/scsi/bnx2i/bnx2i_hwi.c rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); hba 784 drivers/scsi/bnx2i/bnx2i_hwi.c static int bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, hba 830 drivers/scsi/bnx2i/bnx2i_hwi.c if (hba->cnic && hba->cnic->submit_kwqes) hba 831 drivers/scsi/bnx2i/bnx2i_hwi.c rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); hba 844 drivers/scsi/bnx2i/bnx2i_hwi.c static int bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, hba 899 drivers/scsi/bnx2i/bnx2i_hwi.c if (hba->cnic && hba->cnic->submit_kwqes) hba 900 drivers/scsi/bnx2i/bnx2i_hwi.c rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); hba 913 drivers/scsi/bnx2i/bnx2i_hwi.c int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) hba 917 drivers/scsi/bnx2i/bnx2i_hwi.c if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) hba 918 drivers/scsi/bnx2i/bnx2i_hwi.c rc = bnx2i_5771x_send_conn_ofld_req(hba, ep); hba 920 drivers/scsi/bnx2i/bnx2i_hwi.c rc = bnx2i_570x_send_conn_ofld_req(hba, ep); hba 941 drivers/scsi/bnx2i/bnx2i_hwi.c if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) hba 1042 drivers/scsi/bnx2i/bnx2i_hwi.c int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) hba 1046 drivers/scsi/bnx2i/bnx2i_hwi.c ep->hba = hba; hba 1051 drivers/scsi/bnx2i/bnx2i_hwi.c ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; hba 1060 drivers/scsi/bnx2i/bnx2i_hwi.c dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, hba 1070 drivers/scsi/bnx2i/bnx2i_hwi.c dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, hba 1081 drivers/scsi/bnx2i/bnx2i_hwi.c ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1]; hba 1084 drivers/scsi/bnx2i/bnx2i_hwi.c ep->qp.sqe_left = hba->max_sqes; hba 1087 drivers/scsi/bnx2i/bnx2i_hwi.c ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; hba 1096 drivers/scsi/bnx2i/bnx2i_hwi.c dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, hba 1106 drivers/scsi/bnx2i/bnx2i_hwi.c dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, hba 1117 drivers/scsi/bnx2i/bnx2i_hwi.c ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1]; hba 1120 drivers/scsi/bnx2i/bnx2i_hwi.c ep->qp.cqe_left = hba->max_cqes; hba 1122 drivers/scsi/bnx2i/bnx2i_hwi.c ep->qp.cqe_size = hba->max_cqes; hba 1129 drivers/scsi/bnx2i/bnx2i_hwi.c ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; hba 1138 drivers/scsi/bnx2i/bnx2i_hwi.c dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, hba 1148 drivers/scsi/bnx2i/bnx2i_hwi.c dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, hba 1159 drivers/scsi/bnx2i/bnx2i_hwi.c ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1]; hba 1162 drivers/scsi/bnx2i/bnx2i_hwi.c ep->qp.rqe_left = hba->max_rqes; hba 1169 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_free_qp_resc(hba, ep); hba 1182 drivers/scsi/bnx2i/bnx2i_hwi.c void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) hba 1190 drivers/scsi/bnx2i/bnx2i_hwi.c dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, hba 1196 drivers/scsi/bnx2i/bnx2i_hwi.c dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, hba 1204 drivers/scsi/bnx2i/bnx2i_hwi.c dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, hba 1210 drivers/scsi/bnx2i/bnx2i_hwi.c dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, hba 1218 drivers/scsi/bnx2i/bnx2i_hwi.c dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, hba 1224 drivers/scsi/bnx2i/bnx2i_hwi.c dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, hba 1243 drivers/scsi/bnx2i/bnx2i_hwi.c int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) hba 1254 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_adjust_qp_size(hba); hba 1266 drivers/scsi/bnx2i/bnx2i_hwi.c iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; hba 1268 drivers/scsi/bnx2i/bnx2i_hwi.c (u32) ((u64) hba->dummy_buf_dma >> 32); hba 1270 drivers/scsi/bnx2i/bnx2i_hwi.c hba->num_ccell = hba->max_sqes >> 1; hba 1271 drivers/scsi/bnx2i/bnx2i_hwi.c hba->ctx_ccell_tasks = hba 1272 drivers/scsi/bnx2i/bnx2i_hwi.c ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); hba 1273 drivers/scsi/bnx2i/bnx2i_hwi.c iscsi_init.num_ccells_per_conn = hba->num_ccell; hba 1274 drivers/scsi/bnx2i/bnx2i_hwi.c iscsi_init.num_tasks_per_conn = hba->max_sqes; hba 1276 drivers/scsi/bnx2i/bnx2i_hwi.c iscsi_init.sq_num_wqes = hba->max_sqes; hba 1279 drivers/scsi/bnx2i/bnx2i_hwi.c iscsi_init.cq_num_wqes = hba->max_cqes; hba 1280 drivers/scsi/bnx2i/bnx2i_hwi.c iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + hba 1282 drivers/scsi/bnx2i/bnx2i_hwi.c iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + hba 1285 drivers/scsi/bnx2i/bnx2i_hwi.c iscsi_init.rq_num_wqes = hba->max_rqes; hba 1291 drivers/scsi/bnx2i/bnx2i_hwi.c iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1; hba 1321 drivers/scsi/bnx2i/bnx2i_hwi.c if (hba->cnic && hba->cnic->submit_kwqes) hba 1322 drivers/scsi/bnx2i/bnx2i_hwi.c rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2); hba 1340 drivers/scsi/bnx2i/bnx2i_hwi.c struct bnx2i_hba *hba = bnx2i_conn->hba; hba 1361 drivers/scsi/bnx2i/bnx2i_hwi.c ADD_STATS_64(hba, rx_pdus, hba 1363 drivers/scsi/bnx2i/bnx2i_hwi.c ADD_STATS_64(hba, rx_bytes, hba 1372 drivers/scsi/bnx2i/bnx2i_hwi.c ADD_STATS_64(hba, tx_pdus, hba 1374 drivers/scsi/bnx2i/bnx2i_hwi.c ADD_STATS_64(hba, tx_bytes, hba 1376 drivers/scsi/bnx2i/bnx2i_hwi.c ADD_STATS_64(hba, rx_pdus, hba 1956 drivers/scsi/bnx2i/bnx2i_hwi.c struct bnx2i_hba *hba = bnx2i_conn->hba; hba 1969 drivers/scsi/bnx2i/bnx2i_hwi.c hba->netdev->name); hba 1983 drivers/scsi/bnx2i/bnx2i_hwi.c hba->netdev->name); hba 2041 drivers/scsi/bnx2i/bnx2i_hwi.c ADD_STATS_64(hba, rx_pdus, 1); hba 2042 drivers/scsi/bnx2i/bnx2i_hwi.c ADD_STATS_64(hba, rx_bytes, nopin->data_length); hba 2048 drivers/scsi/bnx2i/bnx2i_hwi.c hba->netdev->name, hba 2083 drivers/scsi/bnx2i/bnx2i_hwi.c static void bnx2i_fastpath_notification(struct bnx2i_hba *hba, hba 2091 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); hba 2117 drivers/scsi/bnx2i/bnx2i_hwi.c static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba, hba 2124 drivers/scsi/bnx2i/bnx2i_hwi.c conn = bnx2i_get_conn_from_id(hba, iscsi_cid); hba 2152 drivers/scsi/bnx2i/bnx2i_hwi.c static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba, hba 2168 drivers/scsi/bnx2i/bnx2i_hwi.c static void bnx2i_process_tcp_error(struct bnx2i_hba *hba, hba 2175 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); hba 2184 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn); hba 2201 drivers/scsi/bnx2i/bnx2i_hwi.c static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba, hba 2214 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); hba 2367 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_conn->hba->shost->host_no, hba 2370 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn); hba 2388 drivers/scsi/bnx2i/bnx2i_hwi.c static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba, hba 2393 drivers/scsi/bnx2i/bnx2i_hwi.c ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id); hba 2400 drivers/scsi/bnx2i/bnx2i_hwi.c if (hba != ep->hba) { hba 2422 drivers/scsi/bnx2i/bnx2i_hwi.c static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba, hba 2428 drivers/scsi/bnx2i/bnx2i_hwi.c ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id); hba 2434 drivers/scsi/bnx2i/bnx2i_hwi.c if (hba != ep->hba) { hba 2445 drivers/scsi/bnx2i/bnx2i_hwi.c hba->netdev->name); hba 2449 drivers/scsi/bnx2i/bnx2i_hwi.c "opcode\n", hba->netdev->name); hba 2456 drivers/scsi/bnx2i/bnx2i_hwi.c "error code %d\n", hba->netdev->name, hba 2477 drivers/scsi/bnx2i/bnx2i_hwi.c struct bnx2i_hba *hba = context; hba 2486 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_fastpath_notification(hba, ikcqe); hba 2488 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_process_ofld_cmpl(hba, ikcqe); hba 2490 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_process_update_conn_cmpl(hba, ikcqe); hba 2494 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_iscsi_license_error(hba, ikcqe->\ hba 2497 drivers/scsi/bnx2i/bnx2i_hwi.c set_bit(ADAPTER_STATE_UP, &hba->adapter_state); hba 2498 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_get_link_state(hba); hba 2501 drivers/scsi/bnx2i/bnx2i_hwi.c (u8)hba->pcidev->bus->number, hba 2502 drivers/scsi/bnx2i/bnx2i_hwi.c hba->pci_devno, hba 2503 drivers/scsi/bnx2i/bnx2i_hwi.c (u8)hba->pci_func); hba 2508 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_process_conn_destroy_cmpl(hba, ikcqe); hba 2510 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_process_iscsi_error(hba, ikcqe); hba 2512 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_process_tcp_error(hba, ikcqe); hba 2532 drivers/scsi/bnx2i/bnx2i_hwi.c struct bnx2i_hba *hba = context; hba 2540 drivers/scsi/bnx2i/bnx2i_hwi.c if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) hba 2541 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_send_fw_iscsi_init_msg(hba); hba 2544 drivers/scsi/bnx2i/bnx2i_hwi.c clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); hba 2545 drivers/scsi/bnx2i/bnx2i_hwi.c clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); hba 2548 drivers/scsi/bnx2i/bnx2i_hwi.c set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); hba 2549 drivers/scsi/bnx2i/bnx2i_hwi.c iscsi_host_for_each_session(hba->shost, hba 2553 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_get_link_state(hba); hba 2572 drivers/scsi/bnx2i/bnx2i_hwi.c if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state)) hba 2629 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_recovery_que_add_conn(ep->hba, ep->conn); hba 2650 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_recovery_que_add_conn(ep->hba, ep->conn); hba 2657 drivers/scsi/bnx2i/bnx2i_hwi.c struct bnx2i_hba *hba = context; hba 2660 drivers/scsi/bnx2i/bnx2i_hwi.c if (!hba) hba 2663 drivers/scsi/bnx2i/bnx2i_hwi.c rc = iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport, hba 2714 drivers/scsi/bnx2i/bnx2i_hwi.c if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { hba 2715 drivers/scsi/bnx2i/bnx2i_hwi.c reg_base = pci_resource_start(ep->hba->pcidev, hba 2724 drivers/scsi/bnx2i/bnx2i_hwi.c if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) && hba 2725 drivers/scsi/bnx2i/bnx2i_hwi.c (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) { hba 2726 drivers/scsi/bnx2i/bnx2i_hwi.c config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2); hba 2739 drivers/scsi/bnx2i/bnx2i_hwi.c ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off, hba 82 drivers/scsi/bnx2i/bnx2i_init.c void bnx2i_identify_device(struct bnx2i_hba *hba, struct cnic_dev *dev) hba 84 drivers/scsi/bnx2i/bnx2i_init.c hba->cnic_dev_type = 0; hba 86 drivers/scsi/bnx2i/bnx2i_init.c if (hba->pci_did == PCI_DEVICE_ID_NX2_5706 || hba 87 drivers/scsi/bnx2i/bnx2i_init.c hba->pci_did == PCI_DEVICE_ID_NX2_5706S) { hba 88 drivers/scsi/bnx2i/bnx2i_init.c set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type); hba 89 drivers/scsi/bnx2i/bnx2i_init.c } else if (hba->pci_did == PCI_DEVICE_ID_NX2_5708 || hba 90 drivers/scsi/bnx2i/bnx2i_init.c hba->pci_did == PCI_DEVICE_ID_NX2_5708S) { hba 91 drivers/scsi/bnx2i/bnx2i_init.c set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type); hba 92 drivers/scsi/bnx2i/bnx2i_init.c } else if (hba->pci_did == PCI_DEVICE_ID_NX2_5709 || hba 93 drivers/scsi/bnx2i/bnx2i_init.c hba->pci_did == PCI_DEVICE_ID_NX2_5709S) { hba 94 drivers/scsi/bnx2i/bnx2i_init.c set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type); hba 95 drivers/scsi/bnx2i/bnx2i_init.c hba->mail_queue_access = BNX2I_MQ_BIN_MODE; hba 98 drivers/scsi/bnx2i/bnx2i_init.c set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type); hba 101 drivers/scsi/bnx2i/bnx2i_init.c hba->pci_did); hba 111 drivers/scsi/bnx2i/bnx2i_init.c struct bnx2i_hba *hba = NULL; hba 120 drivers/scsi/bnx2i/bnx2i_init.c hba = tmp_hba; hba 126 drivers/scsi/bnx2i/bnx2i_init.c return hba; hba 137 drivers/scsi/bnx2i/bnx2i_init.c struct bnx2i_hba *hba, *temp; hba 140 drivers/scsi/bnx2i/bnx2i_init.c list_for_each_entry_safe(hba, temp, &adapter_list, link) { hba 141 drivers/scsi/bnx2i/bnx2i_init.c if (hba->cnic == cnic) { hba 143 drivers/scsi/bnx2i/bnx2i_init.c return hba; hba 166 drivers/scsi/bnx2i/bnx2i_init.c struct bnx2i_hba *hba = handle; hba 174 drivers/scsi/bnx2i/bnx2i_init.c bnx2i_send_fw_iscsi_init_msg(hba); hba 175 drivers/scsi/bnx2i/bnx2i_init.c while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && hba 176 drivers/scsi/bnx2i/bnx2i_init.c !test_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state) && i--) hba 188 drivers/scsi/bnx2i/bnx2i_init.c static void bnx2i_chip_cleanup(struct bnx2i_hba *hba) hba 193 drivers/scsi/bnx2i/bnx2i_init.c if (hba->ofld_conns_active) { hba 199 drivers/scsi/bnx2i/bnx2i_init.c "connections\n", hba->netdev->name, hba 200 drivers/scsi/bnx2i/bnx2i_init.c hba->ofld_conns_active); hba 201 drivers/scsi/bnx2i/bnx2i_init.c mutex_lock(&hba->net_dev_lock); hba 202 drivers/scsi/bnx2i/bnx2i_init.c list_for_each_safe(pos, tmp, &hba->ep_active_list) { hba 208 drivers/scsi/bnx2i/bnx2i_init.c mutex_unlock(&hba->net_dev_lock); hba 222 drivers/scsi/bnx2i/bnx2i_init.c struct bnx2i_hba *hba = handle; hba 228 drivers/scsi/bnx2i/bnx2i_init.c &hba->adapter_state)) { hba 229 drivers/scsi/bnx2i/bnx2i_init.c iscsi_host_for_each_session(hba->shost, hba 231 drivers/scsi/bnx2i/bnx2i_init.c wait_delay = hba->hba_shutdown_tmo; hba 238 drivers/scsi/bnx2i/bnx2i_init.c wait_event_interruptible_timeout(hba->eh_wait, hba 239 drivers/scsi/bnx2i/bnx2i_init.c (list_empty(&hba->ep_ofld_list) && hba 240 drivers/scsi/bnx2i/bnx2i_init.c list_empty(&hba->ep_destroy_list)), hba 246 drivers/scsi/bnx2i/bnx2i_init.c while (hba->ofld_conns_active) { hba 247 drivers/scsi/bnx2i/bnx2i_init.c conns_active = hba->ofld_conns_active; hba 248 drivers/scsi/bnx2i/bnx2i_init.c wait_event_interruptible_timeout(hba->eh_wait, hba 249 drivers/scsi/bnx2i/bnx2i_init.c (hba->ofld_conns_active != conns_active), hba 251 drivers/scsi/bnx2i/bnx2i_init.c if (hba->ofld_conns_active == conns_active) hba 254 drivers/scsi/bnx2i/bnx2i_init.c bnx2i_chip_cleanup(hba); hba 259 drivers/scsi/bnx2i/bnx2i_init.c clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); hba 260 drivers/scsi/bnx2i/bnx2i_init.c clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); hba 273 drivers/scsi/bnx2i/bnx2i_init.c static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic) hba 280 drivers/scsi/bnx2i/bnx2i_init.c "iSCSI\n", hba->netdev->name); hba 285 drivers/scsi/bnx2i/bnx2i_init.c hba->cnic = cnic; hba 286 drivers/scsi/bnx2i/bnx2i_init.c rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba); hba 288 drivers/scsi/bnx2i/bnx2i_init.c hba->age++; hba 289 drivers/scsi/bnx2i/bnx2i_init.c set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); hba 290 drivers/scsi/bnx2i/bnx2i_init.c list_add_tail(&hba->link, &adapter_list); hba 294 drivers/scsi/bnx2i/bnx2i_init.c "hba=%p, cnic=%p\n", hba, cnic); hba 319 drivers/scsi/bnx2i/bnx2i_init.c struct bnx2i_hba *hba; hba 322 drivers/scsi/bnx2i/bnx2i_init.c hba = bnx2i_alloc_hba(dev); hba 323 drivers/scsi/bnx2i/bnx2i_init.c if (!hba) { hba 329 drivers/scsi/bnx2i/bnx2i_init.c clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); hba 330 drivers/scsi/bnx2i/bnx2i_init.c if (bnx2i_init_one(hba, dev)) { hba 331 drivers/scsi/bnx2i/bnx2i_init.c printk(KERN_ERR "bnx2i - hba %p init failed\n", hba); hba 332 drivers/scsi/bnx2i/bnx2i_init.c bnx2i_free_hba(hba); hba 344 drivers/scsi/bnx2i/bnx2i_init.c struct bnx2i_hba *hba; hba 346 drivers/scsi/bnx2i/bnx2i_init.c hba = bnx2i_find_hba_for_cnic(dev); hba 347 drivers/scsi/bnx2i/bnx2i_init.c if (!hba) { hba 353 drivers/scsi/bnx2i/bnx2i_init.c list_del_init(&hba->link); hba 356 drivers/scsi/bnx2i/bnx2i_init.c if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { hba 357 drivers/scsi/bnx2i/bnx2i_init.c hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); hba 358 drivers/scsi/bnx2i/bnx2i_init.c clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); hba 362 drivers/scsi/bnx2i/bnx2i_init.c bnx2i_free_hba(hba); hba 375 drivers/scsi/bnx2i/bnx2i_init.c struct bnx2i_hba *hba = handle; hba 378 drivers/scsi/bnx2i/bnx2i_init.c if (!hba) hba 381 drivers/scsi/bnx2i/bnx2i_init.c stats = (struct iscsi_stats_info *)hba->cnic->stats_addr; hba 387 drivers/scsi/bnx2i/bnx2i_init.c memcpy(stats->mac_add1 + 2, hba->cnic->mac_addr, ETH_ALEN); hba 389 drivers/scsi/bnx2i/bnx2i_init.c stats->max_frame_size = hba->netdev->mtu; hba 390 drivers/scsi/bnx2i/bnx2i_init.c stats->txq_size = hba->max_sqes; hba 391 drivers/scsi/bnx2i/bnx2i_init.c stats->rxq_size = hba->max_cqes; hba 396 drivers/scsi/bnx2i/bnx2i_init.c GET_STATS_64(hba, stats, rx_pdus); hba 397 drivers/scsi/bnx2i/bnx2i_init.c GET_STATS_64(hba, stats, rx_bytes); hba 399 drivers/scsi/bnx2i/bnx2i_init.c GET_STATS_64(hba, stats, tx_pdus); hba 400 drivers/scsi/bnx2i/bnx2i_init.c GET_STATS_64(hba, stats, tx_bytes); hba 527 drivers/scsi/bnx2i/bnx2i_init.c struct bnx2i_hba *hba; hba 531 drivers/scsi/bnx2i/bnx2i_init.c hba = list_entry(adapter_list.next, struct bnx2i_hba, link); hba 532 drivers/scsi/bnx2i/bnx2i_init.c list_del(&hba->link); hba 535 drivers/scsi/bnx2i/bnx2i_init.c if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { hba 536 drivers/scsi/bnx2i/bnx2i_init.c bnx2i_chip_cleanup(hba); hba 537 drivers/scsi/bnx2i/bnx2i_init.c hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); hba 538 drivers/scsi/bnx2i/bnx2i_init.c clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); hba 541 drivers/scsi/bnx2i/bnx2i_init.c bnx2i_free_hba(hba); hba 34 drivers/scsi/bnx2i/bnx2i_iscsi.c static int bnx2i_adapter_ready(struct bnx2i_hba *hba) hba 38 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || hba 39 drivers/scsi/bnx2i/bnx2i_iscsi.c test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || hba 40 drivers/scsi/bnx2i/bnx2i_iscsi.c test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) hba 143 drivers/scsi/bnx2i/bnx2i_iscsi.c static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) hba 190 drivers/scsi/bnx2i/bnx2i_iscsi.c bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd); hba 238 drivers/scsi/bnx2i/bnx2i_iscsi.c static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba, hba 242 drivers/scsi/bnx2i/bnx2i_iscsi.c if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) { hba 248 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn; hba 258 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, hba 261 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!hba->cid_que.conn_cid_tbl) { hba 265 drivers/scsi/bnx2i/bnx2i_iscsi.c } else if (iscsi_cid >= hba->max_active_conns) { hba 269 drivers/scsi/bnx2i/bnx2i_iscsi.c return hba->cid_que.conn_cid_tbl[iscsi_cid]; hba 277 drivers/scsi/bnx2i/bnx2i_iscsi.c static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba) hba 281 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!hba->cid_que.cid_free_cnt) hba 284 drivers/scsi/bnx2i/bnx2i_iscsi.c idx = hba->cid_que.cid_q_cons_idx; hba 285 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_q_cons_idx++; hba 286 drivers/scsi/bnx2i/bnx2i_iscsi.c if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx) hba 287 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_q_cons_idx = 0; hba 289 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_free_cnt--; hba 290 drivers/scsi/bnx2i/bnx2i_iscsi.c return hba->cid_que.cid_que[idx]; hba 299 drivers/scsi/bnx2i/bnx2i_iscsi.c static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid) hba 306 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_free_cnt++; hba 308 drivers/scsi/bnx2i/bnx2i_iscsi.c idx = hba->cid_que.cid_q_prod_idx; hba 309 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_que[idx] = iscsi_cid; hba 310 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL; hba 311 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_q_prod_idx++; hba 312 drivers/scsi/bnx2i/bnx2i_iscsi.c if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx) hba 313 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_q_prod_idx = 0; hba 324 drivers/scsi/bnx2i/bnx2i_iscsi.c static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba) hba 329 drivers/scsi/bnx2i/bnx2i_iscsi.c mem_size = hba->max_active_conns * sizeof(u32); hba 332 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL); hba 333 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!hba->cid_que.cid_que_base) hba 336 drivers/scsi/bnx2i/bnx2i_iscsi.c mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *); hba 338 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL); hba 339 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!hba->cid_que.conn_cid_tbl) { hba 340 drivers/scsi/bnx2i/bnx2i_iscsi.c kfree(hba->cid_que.cid_que_base); hba 341 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_que_base = NULL; hba 345 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base; hba 346 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_q_prod_idx = 0; hba 347 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_q_cons_idx = 0; hba 348 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_q_max_idx = hba->max_active_conns; hba 349 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_free_cnt = hba->max_active_conns; hba 351 drivers/scsi/bnx2i/bnx2i_iscsi.c for (i = 0; i < hba->max_active_conns; i++) { hba 352 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_que[i] = i; hba 353 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.conn_cid_tbl[i] = NULL; hba 363 drivers/scsi/bnx2i/bnx2i_iscsi.c static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba) hba 365 drivers/scsi/bnx2i/bnx2i_iscsi.c kfree(hba->cid_que.cid_que_base); hba 366 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.cid_que_base = NULL; hba 368 drivers/scsi/bnx2i/bnx2i_iscsi.c kfree(hba->cid_que.conn_cid_tbl); hba 369 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cid_que.conn_cid_tbl = NULL; hba 381 drivers/scsi/bnx2i/bnx2i_iscsi.c static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba) hba 398 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep->hba = hba; hba 399 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep->hba_age = hba->age; hba 405 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->ofld_conns_active++; hba 422 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep->hba->ofld_conns_active--; hba 425 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid); hba 432 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep->hba = NULL; hba 444 drivers/scsi/bnx2i/bnx2i_iscsi.c static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session, hba 450 drivers/scsi/bnx2i/bnx2i_iscsi.c io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, hba 468 drivers/scsi/bnx2i/bnx2i_iscsi.c static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba, hba 478 drivers/scsi/bnx2i/bnx2i_iscsi.c dma_free_coherent(&hba->pcidev->dev, hba 493 drivers/scsi/bnx2i/bnx2i_iscsi.c static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba, hba 505 drivers/scsi/bnx2i/bnx2i_iscsi.c if (bnx2i_alloc_bdt(hba, session, cmd)) hba 512 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_destroy_cmd_pool(hba, session); hba 524 drivers/scsi/bnx2i/bnx2i_iscsi.c static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) hba 530 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba 531 drivers/scsi/bnx2i/bnx2i_iscsi.c &hba->mp_bd_dma, GFP_KERNEL); hba 532 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!hba->mp_bd_tbl) { hba 538 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, hba 540 drivers/scsi/bnx2i/bnx2i_iscsi.c &hba->dummy_buf_dma, GFP_KERNEL); hba 541 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!hba->dummy_buffer) { hba 543 drivers/scsi/bnx2i/bnx2i_iscsi.c dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba 544 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->mp_bd_tbl, hba->mp_bd_dma); hba 545 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->mp_bd_tbl = NULL; hba 550 drivers/scsi/bnx2i/bnx2i_iscsi.c mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl; hba 551 drivers/scsi/bnx2i/bnx2i_iscsi.c addr = (unsigned long) hba->dummy_buf_dma; hba 568 drivers/scsi/bnx2i/bnx2i_iscsi.c static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) hba 570 drivers/scsi/bnx2i/bnx2i_iscsi.c if (hba->mp_bd_tbl) { hba 571 drivers/scsi/bnx2i/bnx2i_iscsi.c dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba 572 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->mp_bd_tbl, hba->mp_bd_dma); hba 573 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->mp_bd_tbl = NULL; hba 575 drivers/scsi/bnx2i/bnx2i_iscsi.c if (hba->dummy_buffer) { hba 576 drivers/scsi/bnx2i/bnx2i_iscsi.c dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba 577 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->dummy_buffer, hba->dummy_buf_dma); hba 578 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->dummy_buffer = NULL; hba 606 drivers/scsi/bnx2i/bnx2i_iscsi.c static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba, hba 609 drivers/scsi/bnx2i/bnx2i_iscsi.c write_lock_bh(&hba->ep_rdwr_lock); hba 610 drivers/scsi/bnx2i/bnx2i_iscsi.c list_add_tail(&ep->link, &hba->ep_destroy_list); hba 611 drivers/scsi/bnx2i/bnx2i_iscsi.c write_unlock_bh(&hba->ep_rdwr_lock); hba 623 drivers/scsi/bnx2i/bnx2i_iscsi.c static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba, hba 626 drivers/scsi/bnx2i/bnx2i_iscsi.c write_lock_bh(&hba->ep_rdwr_lock); hba 628 drivers/scsi/bnx2i/bnx2i_iscsi.c write_unlock_bh(&hba->ep_rdwr_lock); hba 640 drivers/scsi/bnx2i/bnx2i_iscsi.c static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba, hba 643 drivers/scsi/bnx2i/bnx2i_iscsi.c write_lock_bh(&hba->ep_rdwr_lock); hba 644 drivers/scsi/bnx2i/bnx2i_iscsi.c list_add_tail(&ep->link, &hba->ep_ofld_list); hba 645 drivers/scsi/bnx2i/bnx2i_iscsi.c write_unlock_bh(&hba->ep_rdwr_lock); hba 656 drivers/scsi/bnx2i/bnx2i_iscsi.c static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba, hba 659 drivers/scsi/bnx2i/bnx2i_iscsi.c write_lock_bh(&hba->ep_rdwr_lock); hba 661 drivers/scsi/bnx2i/bnx2i_iscsi.c write_unlock_bh(&hba->ep_rdwr_lock); hba 674 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid) hba 680 drivers/scsi/bnx2i/bnx2i_iscsi.c read_lock_bh(&hba->ep_rdwr_lock); hba 681 drivers/scsi/bnx2i/bnx2i_iscsi.c list_for_each_safe(list, tmp, &hba->ep_ofld_list) { hba 688 drivers/scsi/bnx2i/bnx2i_iscsi.c read_unlock_bh(&hba->ep_rdwr_lock); hba 702 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid) hba 708 drivers/scsi/bnx2i/bnx2i_iscsi.c read_lock_bh(&hba->ep_rdwr_lock); hba 709 drivers/scsi/bnx2i/bnx2i_iscsi.c list_for_each_safe(list, tmp, &hba->ep_destroy_list) { hba 716 drivers/scsi/bnx2i/bnx2i_iscsi.c read_unlock_bh(&hba->ep_rdwr_lock); hba 731 drivers/scsi/bnx2i/bnx2i_iscsi.c static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba, hba 734 drivers/scsi/bnx2i/bnx2i_iscsi.c write_lock_bh(&hba->ep_rdwr_lock); hba 735 drivers/scsi/bnx2i/bnx2i_iscsi.c list_add_tail(&ep->link, &hba->ep_active_list); hba 736 drivers/scsi/bnx2i/bnx2i_iscsi.c write_unlock_bh(&hba->ep_rdwr_lock); hba 747 drivers/scsi/bnx2i/bnx2i_iscsi.c static void bnx2i_ep_active_list_del(struct bnx2i_hba *hba, hba 750 drivers/scsi/bnx2i/bnx2i_iscsi.c write_lock_bh(&hba->ep_rdwr_lock); hba 752 drivers/scsi/bnx2i/bnx2i_iscsi.c write_unlock_bh(&hba->ep_rdwr_lock); hba 765 drivers/scsi/bnx2i/bnx2i_iscsi.c static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba, hba 768 drivers/scsi/bnx2i/bnx2i_iscsi.c if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type)) hba 770 drivers/scsi/bnx2i/bnx2i_iscsi.c else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) hba 772 drivers/scsi/bnx2i/bnx2i_iscsi.c else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) hba 789 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba; hba 791 drivers/scsi/bnx2i/bnx2i_iscsi.c shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0); hba 801 drivers/scsi/bnx2i/bnx2i_iscsi.c hba = iscsi_host_priv(shost); hba 802 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->shost = shost; hba 803 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->netdev = cnic->netdev; hba 805 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->pcidev = cnic->pcidev; hba 806 drivers/scsi/bnx2i/bnx2i_iscsi.c pci_dev_get(hba->pcidev); hba 807 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->pci_did = hba->pcidev->device; hba 808 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->pci_vid = hba->pcidev->vendor; hba 809 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->pci_sdid = hba->pcidev->subsystem_device; hba 810 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->pci_svid = hba->pcidev->subsystem_vendor; hba 811 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->pci_func = PCI_FUNC(hba->pcidev->devfn); hba 812 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->pci_devno = PCI_SLOT(hba->pcidev->devfn); hba 814 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_identify_device(hba, cnic); hba 815 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_setup_host_queue_size(hba, shost); hba 817 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->reg_base = pci_resource_start(hba->pcidev, 0); hba 818 drivers/scsi/bnx2i/bnx2i_iscsi.c if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { hba 819 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2); hba 820 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!hba->regview) hba 822 drivers/scsi/bnx2i/bnx2i_iscsi.c } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { hba 823 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->regview = pci_iomap(hba->pcidev, 0, 4096); hba 824 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!hba->regview) hba 828 drivers/scsi/bnx2i/bnx2i_iscsi.c if (bnx2i_setup_mp_bdt(hba)) hba 831 drivers/scsi/bnx2i/bnx2i_iscsi.c INIT_LIST_HEAD(&hba->ep_ofld_list); hba 832 drivers/scsi/bnx2i/bnx2i_iscsi.c INIT_LIST_HEAD(&hba->ep_active_list); hba 833 drivers/scsi/bnx2i/bnx2i_iscsi.c INIT_LIST_HEAD(&hba->ep_destroy_list); hba 834 drivers/scsi/bnx2i/bnx2i_iscsi.c rwlock_init(&hba->ep_rdwr_lock); hba 836 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED; hba 839 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA; hba 841 drivers/scsi/bnx2i/bnx2i_iscsi.c if (bnx2i_setup_free_cid_que(hba)) hba 845 drivers/scsi/bnx2i/bnx2i_iscsi.c if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { hba 847 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->max_sqes = sq_size; hba 849 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT; hba 852 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->max_sqes = sq_size; hba 854 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT; hba 857 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->max_rqes = rq_size; hba 858 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->max_cqes = hba->max_sqes + rq_size; hba 859 drivers/scsi/bnx2i/bnx2i_iscsi.c if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { hba 860 drivers/scsi/bnx2i/bnx2i_iscsi.c if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX) hba 861 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX; hba 862 drivers/scsi/bnx2i/bnx2i_iscsi.c } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX) hba 863 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->max_cqes = BNX2I_570X_CQ_WQES_MAX; hba 865 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->num_ccell = hba->max_sqes / 2; hba 867 drivers/scsi/bnx2i/bnx2i_iscsi.c spin_lock_init(&hba->lock); hba 868 drivers/scsi/bnx2i/bnx2i_iscsi.c mutex_init(&hba->net_dev_lock); hba 869 drivers/scsi/bnx2i/bnx2i_iscsi.c init_waitqueue_head(&hba->eh_wait); hba 870 drivers/scsi/bnx2i/bnx2i_iscsi.c if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { hba 871 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->hba_shutdown_tmo = 30 * HZ; hba 872 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->conn_teardown_tmo = 20 * HZ; hba 873 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->conn_ctx_destroy_tmo = 6 * HZ; hba 875 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->hba_shutdown_tmo = 20 * HZ; hba 876 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->conn_teardown_tmo = 10 * HZ; hba 877 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->conn_ctx_destroy_tmo = 2 * HZ; hba 881 drivers/scsi/bnx2i/bnx2i_iscsi.c spin_lock_init(&hba->stat_lock); hba 883 drivers/scsi/bnx2i/bnx2i_iscsi.c memset(&hba->stats, 0, sizeof(struct iscsi_stats_info)); hba 885 drivers/scsi/bnx2i/bnx2i_iscsi.c if (iscsi_host_add(shost, &hba->pcidev->dev)) hba 887 drivers/scsi/bnx2i/bnx2i_iscsi.c return hba; hba 890 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_release_free_cid_que(hba); hba 892 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_free_mp_bdt(hba); hba 894 drivers/scsi/bnx2i/bnx2i_iscsi.c if (hba->regview) { hba 895 drivers/scsi/bnx2i/bnx2i_iscsi.c pci_iounmap(hba->pcidev, hba->regview); hba 896 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->regview = NULL; hba 899 drivers/scsi/bnx2i/bnx2i_iscsi.c pci_dev_put(hba->pcidev); hba 910 drivers/scsi/bnx2i/bnx2i_iscsi.c void bnx2i_free_hba(struct bnx2i_hba *hba) hba 912 drivers/scsi/bnx2i/bnx2i_iscsi.c struct Scsi_Host *shost = hba->shost; hba 915 drivers/scsi/bnx2i/bnx2i_iscsi.c INIT_LIST_HEAD(&hba->ep_ofld_list); hba 916 drivers/scsi/bnx2i/bnx2i_iscsi.c INIT_LIST_HEAD(&hba->ep_active_list); hba 917 drivers/scsi/bnx2i/bnx2i_iscsi.c INIT_LIST_HEAD(&hba->ep_destroy_list); hba 919 drivers/scsi/bnx2i/bnx2i_iscsi.c if (hba->regview) { hba 920 drivers/scsi/bnx2i/bnx2i_iscsi.c pci_iounmap(hba->pcidev, hba->regview); hba 921 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->regview = NULL; hba 923 drivers/scsi/bnx2i/bnx2i_iscsi.c pci_dev_put(hba->pcidev); hba 924 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_free_mp_bdt(hba); hba 925 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_release_free_cid_que(hba); hba 936 drivers/scsi/bnx2i/bnx2i_iscsi.c static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, hba 940 drivers/scsi/bnx2i/bnx2i_iscsi.c dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba 947 drivers/scsi/bnx2i/bnx2i_iscsi.c dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba 954 drivers/scsi/bnx2i/bnx2i_iscsi.c dma_free_coherent(&hba->pcidev->dev, hba 962 drivers/scsi/bnx2i/bnx2i_iscsi.c dma_free_coherent(&hba->pcidev->dev, hba 977 drivers/scsi/bnx2i/bnx2i_iscsi.c static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, hba 982 drivers/scsi/bnx2i/bnx2i_iscsi.c dma_alloc_coherent(&hba->pcidev->dev, hba 993 drivers/scsi/bnx2i/bnx2i_iscsi.c dma_alloc_coherent(&hba->pcidev->dev, hba 1004 drivers/scsi/bnx2i/bnx2i_iscsi.c dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba 1010 drivers/scsi/bnx2i/bnx2i_iscsi.c dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba 1019 drivers/scsi/bnx2i/bnx2i_iscsi.c dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba 1025 drivers/scsi/bnx2i/bnx2i_iscsi.c dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, hba 1030 drivers/scsi/bnx2i/bnx2i_iscsi.c dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, hba 1162 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba = bnx2i_conn->hba; hba 1173 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_send_cmd_cleanup_req(hba, task->dd_data); hba 1194 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba = bnx2i_conn->hba; hba 1203 drivers/scsi/bnx2i/bnx2i_iscsi.c ADD_STATS_64(hba, tx_pdus, 1); hba 1204 drivers/scsi/bnx2i/bnx2i_iscsi.c ADD_STATS_64(hba, tx_bytes, task->data_count); hba 1228 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba = iscsi_host_priv(shost); hba 1235 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->max_sqes) hba 1269 drivers/scsi/bnx2i/bnx2i_iscsi.c cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma; hba 1270 drivers/scsi/bnx2i/bnx2i_iscsi.c cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32); hba 1293 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba; hba 1302 drivers/scsi/bnx2i/bnx2i_iscsi.c shost = bnx2i_ep->hba->shost; hba 1303 drivers/scsi/bnx2i/bnx2i_iscsi.c hba = iscsi_host_priv(shost); hba 1304 drivers/scsi/bnx2i/bnx2i_iscsi.c if (bnx2i_adapter_ready(hba)) hba 1311 drivers/scsi/bnx2i/bnx2i_iscsi.c if (cmds_max > hba->max_sqes) hba 1312 drivers/scsi/bnx2i/bnx2i_iscsi.c cmds_max = hba->max_sqes; hba 1322 drivers/scsi/bnx2i/bnx2i_iscsi.c if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data)) hba 1343 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba = iscsi_host_priv(shost); hba 1345 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_destroy_cmd_pool(hba, session); hba 1361 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba = iscsi_host_priv(shost); hba 1374 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_conn->hba = hba; hba 1382 drivers/scsi/bnx2i/bnx2i_iscsi.c if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) { hba 1414 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba = iscsi_host_priv(shost); hba 1426 drivers/scsi/bnx2i/bnx2i_iscsi.c if (bnx2i_adapter_ready(hba)) hba 1438 drivers/scsi/bnx2i/bnx2i_iscsi.c if (bnx2i_ep->hba != hba) { hba 1443 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep, bnx2i_ep->hba->netdev->name); hba 1446 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->netdev->name); hba 1454 drivers/scsi/bnx2i/bnx2i_iscsi.c ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn, hba 1460 drivers/scsi/bnx2i/bnx2i_iscsi.c if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) hba 1480 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba; hba 1486 drivers/scsi/bnx2i/bnx2i_iscsi.c hba = iscsi_host_priv(shost); hba 1488 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_conn_free_login_resources(hba, bnx2i_conn); hba 1525 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba = bnx2i_ep->hba; hba 1528 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!hba) hba 1533 drivers/scsi/bnx2i/bnx2i_iscsi.c mutex_lock(&hba->net_dev_lock); hba 1536 drivers/scsi/bnx2i/bnx2i_iscsi.c mutex_unlock(&hba->net_dev_lock); hba 1539 drivers/scsi/bnx2i/bnx2i_iscsi.c mutex_lock(&hba->net_dev_lock); hba 1542 drivers/scsi/bnx2i/bnx2i_iscsi.c mutex_unlock(&hba->net_dev_lock); hba 1560 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba = iscsi_host_priv(shost); hba 1565 drivers/scsi/bnx2i/bnx2i_iscsi.c len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6); hba 1568 drivers/scsi/bnx2i/bnx2i_iscsi.c len = sprintf(buf, "%s\n", hba->netdev->name); hba 1571 drivers/scsi/bnx2i/bnx2i_iscsi.c struct list_head *active_list = &hba->ep_active_list; hba 1573 drivers/scsi/bnx2i/bnx2i_iscsi.c read_lock_bh(&hba->ep_rdwr_lock); hba 1574 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!list_empty(&hba->ep_active_list)) { hba 1587 drivers/scsi/bnx2i/bnx2i_iscsi.c read_unlock_bh(&hba->ep_rdwr_lock); hba 1666 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba; hba 1669 drivers/scsi/bnx2i/bnx2i_iscsi.c hba = get_adapter_list_head(); hba 1670 drivers/scsi/bnx2i/bnx2i_iscsi.c if (hba && hba->cnic) hba 1671 drivers/scsi/bnx2i/bnx2i_iscsi.c cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI); hba 1677 drivers/scsi/bnx2i/bnx2i_iscsi.c hba = bnx2i_find_hba_for_cnic(cnic); hba 1678 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!hba) hba 1681 drivers/scsi/bnx2i/bnx2i_iscsi.c if (bnx2i_adapter_ready(hba)) { hba 1685 drivers/scsi/bnx2i/bnx2i_iscsi.c if (hba->netdev->mtu > hba->mtu_supported) { hba 1687 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->netdev->name, hba->netdev->mtu); hba 1689 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->mtu_supported); hba 1692 drivers/scsi/bnx2i/bnx2i_iscsi.c return hba; hba 1705 drivers/scsi/bnx2i/bnx2i_iscsi.c static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, hba 1708 drivers/scsi/bnx2i/bnx2i_iscsi.c if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk) hba 1709 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cnic->cm_destroy(ep->cm_sk); hba 1711 drivers/scsi/bnx2i/bnx2i_iscsi.c if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) && hba 1727 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->netdev->name); hba 1732 drivers/scsi/bnx2i/bnx2i_iscsi.c ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies; hba 1735 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep_destroy_list_add(hba, ep); hba 1738 drivers/scsi/bnx2i/bnx2i_iscsi.c if (bnx2i_send_conn_destroy(hba, ep)) hba 1748 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep_destroy_list_del(hba, ep); hba 1778 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba; hba 1786 drivers/scsi/bnx2i/bnx2i_iscsi.c hba = iscsi_host_priv(shost); hba 1792 drivers/scsi/bnx2i/bnx2i_iscsi.c hba = bnx2i_check_route(dst_addr); hba 1794 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!hba) { hba 1798 drivers/scsi/bnx2i/bnx2i_iscsi.c mutex_lock(&hba->net_dev_lock); hba 1800 drivers/scsi/bnx2i/bnx2i_iscsi.c if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) { hba 1804 drivers/scsi/bnx2i/bnx2i_iscsi.c cnic = hba->cnic; hba 1805 drivers/scsi/bnx2i/bnx2i_iscsi.c ep = bnx2i_alloc_ep(hba); hba 1813 drivers/scsi/bnx2i/bnx2i_iscsi.c iscsi_cid = bnx2i_alloc_iscsi_cid(hba); hba 1816 drivers/scsi/bnx2i/bnx2i_iscsi.c "iscsi cid\n", hba->netdev->name); hba 1821 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep->hba_age = hba->age; hba 1823 drivers/scsi/bnx2i/bnx2i_iscsi.c rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep); hba 1826 drivers/scsi/bnx2i/bnx2i_iscsi.c "\n", hba->netdev->name); hba 1833 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep_ofld_list_add(hba, bnx2i_ep); hba 1839 drivers/scsi/bnx2i/bnx2i_iscsi.c if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) { hba 1842 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->netdev->name, bnx2i_ep->ep_iscsi_cid); hba 1847 drivers/scsi/bnx2i/bnx2i_iscsi.c "\n", hba->netdev->name); hba 1848 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep_ofld_list_del(hba, bnx2i_ep); hba 1860 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep_ofld_list_del(hba, bnx2i_ep); hba 1865 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->netdev->name, bnx2i_ep->ep_iscsi_cid); hba 1897 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { hba 1905 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep_active_list_add(hba, bnx2i_ep); hba 1911 drivers/scsi/bnx2i/bnx2i_iscsi.c mutex_unlock(&hba->net_dev_lock); hba 1915 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep_active_list_del(hba, bnx2i_ep); hba 1917 drivers/scsi/bnx2i/bnx2i_iscsi.c if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { hba 1918 drivers/scsi/bnx2i/bnx2i_iscsi.c mutex_unlock(&hba->net_dev_lock); hba 1922 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_free_qp_resc(hba, bnx2i_ep); hba 1926 drivers/scsi/bnx2i/bnx2i_iscsi.c mutex_unlock(&hba->net_dev_lock); hba 1983 drivers/scsi/bnx2i/bnx2i_iscsi.c if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) hba 2025 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba = bnx2i_ep->hba; hba 2033 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!hba) hba 2036 drivers/scsi/bnx2i/bnx2i_iscsi.c cnic = hba->cnic; hba 2053 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies; hba 2056 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) hba 2068 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep->hba->netdev->name); hba 2088 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep->hba->netdev->name, close, close_ret); hba 2100 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep_active_list_del(hba, bnx2i_ep); hba 2101 drivers/scsi/bnx2i/bnx2i_iscsi.c if (bnx2i_tear_down_conn(hba, bnx2i_ep)) hba 2120 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba; hba 2137 drivers/scsi/bnx2i/bnx2i_iscsi.c hba = bnx2i_ep->hba; hba 2139 drivers/scsi/bnx2i/bnx2i_iscsi.c mutex_lock(&hba->net_dev_lock); hba 2147 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || hba 2148 drivers/scsi/bnx2i/bnx2i_iscsi.c (bnx2i_ep->hba_age != hba->age)) { hba 2149 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_ep_active_list_del(hba, bnx2i_ep); hba 2155 drivers/scsi/bnx2i/bnx2i_iscsi.c mutex_unlock(&hba->net_dev_lock); hba 2159 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_free_qp_resc(hba, bnx2i_ep); hba 2166 drivers/scsi/bnx2i/bnx2i_iscsi.c mutex_unlock(&hba->net_dev_lock); hba 2168 drivers/scsi/bnx2i/bnx2i_iscsi.c wake_up_interruptible(&hba->eh_wait); hba 2179 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_hba *hba = iscsi_host_priv(shost); hba 2184 drivers/scsi/bnx2i/bnx2i_iscsi.c hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf, hba 41 drivers/scsi/bnx2i/bnx2i_sysfs.c struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); hba 43 drivers/scsi/bnx2i/bnx2i_sysfs.c return sprintf(buf, "0x%x\n", hba->max_sqes); hba 61 drivers/scsi/bnx2i/bnx2i_sysfs.c struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); hba 65 drivers/scsi/bnx2i/bnx2i_sysfs.c if (hba->ofld_conns_active) hba 68 drivers/scsi/bnx2i/bnx2i_sysfs.c if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) hba 76 drivers/scsi/bnx2i/bnx2i_sysfs.c hba->max_sqes = val; hba 97 drivers/scsi/bnx2i/bnx2i_sysfs.c struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); hba 99 drivers/scsi/bnx2i/bnx2i_sysfs.c return sprintf(buf, "0x%x\n", hba->num_ccell); hba 116 drivers/scsi/bnx2i/bnx2i_sysfs.c struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); hba 118 drivers/scsi/bnx2i/bnx2i_sysfs.c if (hba->ofld_conns_active) hba 124 drivers/scsi/bnx2i/bnx2i_sysfs.c hba->num_ccell = val; hba 2558 drivers/scsi/cxgbi/libcxgbi.c struct cxgbi_hba *hba = NULL; hba 2568 drivers/scsi/cxgbi/libcxgbi.c hba = iscsi_host_priv(shost); hba 2569 drivers/scsi/cxgbi/libcxgbi.c if (!hba) { hba 2593 drivers/scsi/cxgbi/libcxgbi.c if (!hba) hba 2594 drivers/scsi/cxgbi/libcxgbi.c hba = csk->cdev->hbas[csk->port_id]; hba 2595 drivers/scsi/cxgbi/libcxgbi.c else if (hba != csk->cdev->hbas[csk->port_id]) { hba 2596 drivers/scsi/cxgbi/libcxgbi.c if (ifindex != hba->ndev->ifindex) { hba 2599 drivers/scsi/cxgbi/libcxgbi.c ifindex = hba->ndev->ifindex; hba 2605 drivers/scsi/cxgbi/libcxgbi.c shost->host_no, hba, hba 2635 drivers/scsi/cxgbi/libcxgbi.c cep->chba = hba; hba 2639 drivers/scsi/cxgbi/libcxgbi.c ep, cep, csk, hba, hba->ndev->name); hba 39 drivers/scsi/hptiop.c static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec); hba 40 drivers/scsi/hptiop.c static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, hba 42 drivers/scsi/hptiop.c static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag); hba 43 drivers/scsi/hptiop.c static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag); hba 44 drivers/scsi/hptiop.c static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); hba 46 drivers/scsi/hptiop.c static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec) hba 52 drivers/scsi/hptiop.c req = readl(&hba->u.itl.iop->inbound_queue); hba 59 drivers/scsi/hptiop.c writel(req, &hba->u.itl.iop->outbound_queue); hba 60 drivers/scsi/hptiop.c readl(&hba->u.itl.iop->outbound_intstatus); hba 67 drivers/scsi/hptiop.c static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec) hba 69 drivers/scsi/hptiop.c return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec); hba 72 drivers/scsi/hptiop.c static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec) hba 74 drivers/scsi/hptiop.c return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec); hba 77 drivers/scsi/hptiop.c static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag) hba 80 drivers/scsi/hptiop.c hptiop_host_request_callback_itl(hba, hba 83 drivers/scsi/hptiop.c hptiop_iop_request_callback_itl(hba, tag); hba 86 drivers/scsi/hptiop.c static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba) hba 90 drivers/scsi/hptiop.c while ((req = readl(&hba->u.itl.iop->outbound_queue)) != hba 94 drivers/scsi/hptiop.c hptiop_request_callback_itl(hba, req); hba 99 drivers/scsi/hptiop.c ((char __iomem *)hba->u.itl.iop + req); hba 103 drivers/scsi/hptiop.c hptiop_request_callback_itl(hba, req); hba 108 drivers/scsi/hptiop.c hptiop_request_callback_itl(hba, req); hba 113 drivers/scsi/hptiop.c static int iop_intr_itl(struct hptiop_hba *hba) hba 115 drivers/scsi/hptiop.c struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop; hba 116 drivers/scsi/hptiop.c void __iomem *plx = hba->u.itl.plx; hba 130 drivers/scsi/hptiop.c hptiop_message_callback(hba, msg); hba 135 drivers/scsi/hptiop.c hptiop_drain_outbound_queue_itl(hba); hba 161 drivers/scsi/hptiop.c static void mv_inbound_write(u64 p, struct hptiop_hba *hba) hba 163 drivers/scsi/hptiop.c u32 inbound_head = readl(&hba->u.mv.mu->inbound_head); hba 169 drivers/scsi/hptiop.c memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8); hba 170 drivers/scsi/hptiop.c writel(head, &hba->u.mv.mu->inbound_head); hba 172 drivers/scsi/hptiop.c &hba->u.mv.regs->inbound_doorbell); hba 175 drivers/scsi/hptiop.c static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag) hba 187 drivers/scsi/hptiop.c hba->msg_done = 1; hba 191 drivers/scsi/hptiop.c req = hba->reqs[tag >> 8].req_virt; hba 195 drivers/scsi/hptiop.c hptiop_finish_scsi_req(hba, tag>>8, req); hba 203 drivers/scsi/hptiop.c static int iop_intr_mv(struct hptiop_hba *hba) hba 208 drivers/scsi/hptiop.c status = readl(&hba->u.mv.regs->outbound_doorbell); hba 209 drivers/scsi/hptiop.c writel(~status, &hba->u.mv.regs->outbound_doorbell); hba 213 drivers/scsi/hptiop.c msg = readl(&hba->u.mv.mu->outbound_msg); hba 215 drivers/scsi/hptiop.c hptiop_message_callback(hba, msg); hba 222 drivers/scsi/hptiop.c while ((tag = mv_outbound_read(hba->u.mv.mu))) hba 223 drivers/scsi/hptiop.c hptiop_request_callback_mv(hba, tag); hba 230 drivers/scsi/hptiop.c static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag) hba 238 drivers/scsi/hptiop.c hba->msg_done = 1; hba 242 drivers/scsi/hptiop.c req = hba->reqs[(_tag >> 4) & 0xff].req_virt; hba 245 drivers/scsi/hptiop.c hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req); hba 253 drivers/scsi/hptiop.c static int iop_intr_mvfrey(struct hptiop_hba *hba) hba 258 drivers/scsi/hptiop.c if (hba->initialized) hba 259 drivers/scsi/hptiop.c writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); hba 261 drivers/scsi/hptiop.c status = readl(&(hba->u.mvfrey.mu->f0_doorbell)); hba 263 drivers/scsi/hptiop.c writel(status, &(hba->u.mvfrey.mu->f0_doorbell)); hba 265 drivers/scsi/hptiop.c u32 msg = readl(&(hba->u.mvfrey.mu->cpu_to_f0_msg_a)); hba 267 drivers/scsi/hptiop.c hptiop_message_callback(hba, msg); hba 272 drivers/scsi/hptiop.c status = readl(&(hba->u.mvfrey.mu->isr_cause)); hba 274 drivers/scsi/hptiop.c writel(status, &(hba->u.mvfrey.mu->isr_cause)); hba 276 drivers/scsi/hptiop.c cptr = *hba->u.mvfrey.outlist_cptr & 0xff; hba 277 drivers/scsi/hptiop.c cur_rptr = hba->u.mvfrey.outlist_rptr; hba 280 drivers/scsi/hptiop.c if (cur_rptr == hba->u.mvfrey.list_count) hba 283 drivers/scsi/hptiop.c _tag = hba->u.mvfrey.outlist[cur_rptr].val; hba 285 drivers/scsi/hptiop.c hptiop_request_callback_mvfrey(hba, _tag); hba 288 drivers/scsi/hptiop.c hba->u.mvfrey.outlist_rptr = cur_rptr; hba 289 drivers/scsi/hptiop.c } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff)); hba 292 drivers/scsi/hptiop.c if (hba->initialized) hba 293 drivers/scsi/hptiop.c writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); hba 298 drivers/scsi/hptiop.c static int iop_send_sync_request_itl(struct hptiop_hba *hba, hba 306 drivers/scsi/hptiop.c writel((unsigned long)req - (unsigned long)hba->u.itl.iop, hba 307 drivers/scsi/hptiop.c &hba->u.itl.iop->inbound_queue); hba 308 drivers/scsi/hptiop.c readl(&hba->u.itl.iop->outbound_intstatus); hba 311 drivers/scsi/hptiop.c iop_intr_itl(hba); hba 320 drivers/scsi/hptiop.c static int iop_send_sync_request_mv(struct hptiop_hba *hba, hba 323 drivers/scsi/hptiop.c struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req; hba 326 drivers/scsi/hptiop.c hba->msg_done = 0; hba 328 drivers/scsi/hptiop.c mv_inbound_write(hba->u.mv.internal_req_phy | hba 329 drivers/scsi/hptiop.c MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba); hba 332 drivers/scsi/hptiop.c iop_intr_mv(hba); hba 333 drivers/scsi/hptiop.c if (hba->msg_done) hba 340 drivers/scsi/hptiop.c static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba, hba 344 drivers/scsi/hptiop.c hba->u.mvfrey.internal_req.req_virt; hba 347 drivers/scsi/hptiop.c hba->msg_done = 0; hba 349 drivers/scsi/hptiop.c hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req)); hba 352 drivers/scsi/hptiop.c iop_intr_mvfrey(hba); hba 353 drivers/scsi/hptiop.c if (hba->msg_done) hba 357 drivers/scsi/hptiop.c return hba->msg_done ? 0 : -1; hba 360 drivers/scsi/hptiop.c static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg) hba 362 drivers/scsi/hptiop.c writel(msg, &hba->u.itl.iop->inbound_msgaddr0); hba 363 drivers/scsi/hptiop.c readl(&hba->u.itl.iop->outbound_intstatus); hba 366 drivers/scsi/hptiop.c static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg) hba 368 drivers/scsi/hptiop.c writel(msg, &hba->u.mv.mu->inbound_msg); hba 369 drivers/scsi/hptiop.c writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell); hba 370 drivers/scsi/hptiop.c readl(&hba->u.mv.regs->inbound_doorbell); hba 373 drivers/scsi/hptiop.c static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg) hba 375 drivers/scsi/hptiop.c writel(msg, &(hba->u.mvfrey.mu->f0_to_cpu_msg_a)); hba 376 drivers/scsi/hptiop.c readl(&(hba->u.mvfrey.mu->f0_to_cpu_msg_a)); hba 379 drivers/scsi/hptiop.c static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec) hba 383 drivers/scsi/hptiop.c hba->msg_done = 0; hba 384 drivers/scsi/hptiop.c hba->ops->disable_intr(hba); hba 385 drivers/scsi/hptiop.c hba->ops->post_msg(hba, msg); hba 388 drivers/scsi/hptiop.c spin_lock_irq(hba->host->host_lock); hba 389 drivers/scsi/hptiop.c hba->ops->iop_intr(hba); hba 390 drivers/scsi/hptiop.c spin_unlock_irq(hba->host->host_lock); hba 391 drivers/scsi/hptiop.c if (hba->msg_done) hba 396 drivers/scsi/hptiop.c hba->ops->enable_intr(hba); hba 397 drivers/scsi/hptiop.c return hba->msg_done? 0 : -1; hba 400 drivers/scsi/hptiop.c static int iop_get_config_itl(struct hptiop_hba *hba, hba 406 drivers/scsi/hptiop.c req32 = readl(&hba->u.itl.iop->inbound_queue); hba 411 drivers/scsi/hptiop.c ((unsigned long)hba->u.itl.iop + req32); hba 418 drivers/scsi/hptiop.c if (iop_send_sync_request_itl(hba, req, 20000)) { hba 424 drivers/scsi/hptiop.c writel(req32, &hba->u.itl.iop->outbound_queue); hba 428 drivers/scsi/hptiop.c static int iop_get_config_mv(struct hptiop_hba *hba, hba 431 drivers/scsi/hptiop.c struct hpt_iop_request_get_config *req = hba->u.mv.internal_req; hba 441 drivers/scsi/hptiop.c if (iop_send_sync_request_mv(hba, 0, 20000)) { hba 450 drivers/scsi/hptiop.c static int iop_get_config_mvfrey(struct hptiop_hba *hba, hba 453 drivers/scsi/hptiop.c struct hpt_iop_request_get_config *info = hba->u.mvfrey.config; hba 472 drivers/scsi/hptiop.c static int iop_set_config_itl(struct hptiop_hba *hba, hba 478 drivers/scsi/hptiop.c req32 = readl(&hba->u.itl.iop->inbound_queue); hba 483 drivers/scsi/hptiop.c ((unsigned long)hba->u.itl.iop + req32); hba 495 drivers/scsi/hptiop.c if (iop_send_sync_request_itl(hba, req, 20000)) { hba 500 drivers/scsi/hptiop.c writel(req32, &hba->u.itl.iop->outbound_queue); hba 504 drivers/scsi/hptiop.c static int iop_set_config_mv(struct hptiop_hba *hba, hba 507 drivers/scsi/hptiop.c struct hpt_iop_request_set_config *req = hba->u.mv.internal_req; hba 518 drivers/scsi/hptiop.c if (iop_send_sync_request_mv(hba, 0, 20000)) { hba 526 drivers/scsi/hptiop.c static int iop_set_config_mvfrey(struct hptiop_hba *hba, hba 530 drivers/scsi/hptiop.c hba->u.mvfrey.internal_req.req_virt; hba 541 drivers/scsi/hptiop.c if (iop_send_sync_request_mvfrey(hba, 0, 20000)) { hba 549 drivers/scsi/hptiop.c static void hptiop_enable_intr_itl(struct hptiop_hba *hba) hba 552 drivers/scsi/hptiop.c &hba->u.itl.iop->outbound_intmask); hba 555 drivers/scsi/hptiop.c static void hptiop_enable_intr_mv(struct hptiop_hba *hba) hba 558 drivers/scsi/hptiop.c &hba->u.mv.regs->outbound_intmask); hba 561 drivers/scsi/hptiop.c static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba) hba 563 drivers/scsi/hptiop.c writel(CPU_TO_F0_DRBL_MSG_BIT, &(hba->u.mvfrey.mu->f0_doorbell_enable)); hba 564 drivers/scsi/hptiop.c writel(0x1, &(hba->u.mvfrey.mu->isr_enable)); hba 565 drivers/scsi/hptiop.c writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); hba 568 drivers/scsi/hptiop.c static int hptiop_initialize_iop(struct hptiop_hba *hba) hba 571 drivers/scsi/hptiop.c hba->ops->enable_intr(hba); hba 573 drivers/scsi/hptiop.c hba->initialized = 1; hba 576 drivers/scsi/hptiop.c if (iop_send_sync_msg(hba, hba 579 drivers/scsi/hptiop.c hba->host->host_no); hba 585 drivers/scsi/hptiop.c static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index) hba 590 drivers/scsi/hptiop.c struct pci_dev *pcidev = hba->pcidev; hba 595 drivers/scsi/hptiop.c hba->host->host_no); hba 605 drivers/scsi/hptiop.c hba->host->host_no); hba 611 drivers/scsi/hptiop.c static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba) hba 613 drivers/scsi/hptiop.c struct pci_dev *pcidev = hba->pcidev; hba 614 drivers/scsi/hptiop.c hba->u.itl.iop = hptiop_map_pci_bar(hba, 0); hba 615 drivers/scsi/hptiop.c if (hba->u.itl.iop == NULL) hba 618 drivers/scsi/hptiop.c hba->u.itl.plx = hba->u.itl.iop; hba 619 drivers/scsi/hptiop.c hba->u.itl.iop = hptiop_map_pci_bar(hba, 2); hba 620 drivers/scsi/hptiop.c if (hba->u.itl.iop == NULL) { hba 621 drivers/scsi/hptiop.c iounmap(hba->u.itl.plx); hba 628 drivers/scsi/hptiop.c static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba) hba 630 drivers/scsi/hptiop.c if (hba->u.itl.plx) hba 631 drivers/scsi/hptiop.c iounmap(hba->u.itl.plx); hba 632 drivers/scsi/hptiop.c iounmap(hba->u.itl.iop); hba 635 drivers/scsi/hptiop.c static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba) hba 637 drivers/scsi/hptiop.c hba->u.mv.regs = hptiop_map_pci_bar(hba, 0); hba 638 drivers/scsi/hptiop.c if (hba->u.mv.regs == NULL) hba 641 drivers/scsi/hptiop.c hba->u.mv.mu = hptiop_map_pci_bar(hba, 2); hba 642 drivers/scsi/hptiop.c if (hba->u.mv.mu == NULL) { hba 643 drivers/scsi/hptiop.c iounmap(hba->u.mv.regs); hba 650 drivers/scsi/hptiop.c static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba) hba 652 drivers/scsi/hptiop.c hba->u.mvfrey.config = hptiop_map_pci_bar(hba, 0); hba 653 drivers/scsi/hptiop.c if (hba->u.mvfrey.config == NULL) hba 656 drivers/scsi/hptiop.c hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, 2); hba 657 drivers/scsi/hptiop.c if (hba->u.mvfrey.mu == NULL) { hba 658 drivers/scsi/hptiop.c iounmap(hba->u.mvfrey.config); hba 665 drivers/scsi/hptiop.c static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba) hba 667 drivers/scsi/hptiop.c iounmap(hba->u.mv.regs); hba 668 drivers/scsi/hptiop.c iounmap(hba->u.mv.mu); hba 671 drivers/scsi/hptiop.c static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba) hba 673 drivers/scsi/hptiop.c iounmap(hba->u.mvfrey.config); hba 674 drivers/scsi/hptiop.c iounmap(hba->u.mvfrey.mu); hba 677 drivers/scsi/hptiop.c static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg) hba 683 drivers/scsi/hptiop.c hba->msg_done = 1; hba 685 drivers/scsi/hptiop.c if (!hba->initialized) hba 689 drivers/scsi/hptiop.c atomic_set(&hba->resetting, 0); hba 690 drivers/scsi/hptiop.c wake_up(&hba->reset_wq); hba 693 drivers/scsi/hptiop.c hba->msg_done = 1; hba 696 drivers/scsi/hptiop.c static struct hptiop_request *get_req(struct hptiop_hba *hba) hba 700 drivers/scsi/hptiop.c dprintk("get_req : req=%p\n", hba->req_list); hba 702 drivers/scsi/hptiop.c ret = hba->req_list; hba 704 drivers/scsi/hptiop.c hba->req_list = ret->next; hba 709 drivers/scsi/hptiop.c static void free_req(struct hptiop_hba *hba, struct hptiop_request *req) hba 712 drivers/scsi/hptiop.c req->next = hba->req_list; hba 713 drivers/scsi/hptiop.c hba->req_list = req; hba 716 drivers/scsi/hptiop.c static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, hba 729 drivers/scsi/hptiop.c scp = hba->reqs[tag].scp; hba 774 drivers/scsi/hptiop.c free_req(hba, &hba->reqs[tag]); hba 777 drivers/scsi/hptiop.c static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag) hba 782 drivers/scsi/hptiop.c if (hba->iopintf_v2) { hba 784 drivers/scsi/hptiop.c req = hba->reqs[tag].req_virt; hba 789 drivers/scsi/hptiop.c req = hba->reqs[tag].req_virt; hba 792 drivers/scsi/hptiop.c hptiop_finish_scsi_req(hba, tag, req); hba 795 drivers/scsi/hptiop.c static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag) hba 802 drivers/scsi/hptiop.c ((unsigned long)hba->u.itl.iop + tag); hba 831 drivers/scsi/hptiop.c writel(tag, &hba->u.itl.iop->outbound_queue); hba 836 drivers/scsi/hptiop.c struct hptiop_hba *hba = dev_id; hba 840 drivers/scsi/hptiop.c spin_lock_irqsave(hba->host->host_lock, flags); hba 841 drivers/scsi/hptiop.c handled = hba->ops->iop_intr(hba); hba 842 drivers/scsi/hptiop.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 850 drivers/scsi/hptiop.c struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; hba 862 drivers/scsi/hptiop.c BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors); hba 866 drivers/scsi/hptiop.c hba->ops->host_phy_flag; hba 874 drivers/scsi/hptiop.c static void hptiop_post_req_itl(struct hptiop_hba *hba, hba 883 drivers/scsi/hptiop.c if (hba->iopintf_v2) { hba 895 drivers/scsi/hptiop.c &hba->u.itl.iop->inbound_queue); hba 898 drivers/scsi/hptiop.c &hba->u.itl.iop->inbound_queue); hba 901 drivers/scsi/hptiop.c static void hptiop_post_req_mv(struct hptiop_hba *hba, hba 922 drivers/scsi/hptiop.c MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba); hba 925 drivers/scsi/hptiop.c static void hptiop_post_req_mvfrey(struct hptiop_hba *hba, hba 939 drivers/scsi/hptiop.c hba->u.mvfrey.inlist_wptr++; hba 940 drivers/scsi/hptiop.c index = hba->u.mvfrey.inlist_wptr & 0x3fff; hba 942 drivers/scsi/hptiop.c if (index == hba->u.mvfrey.list_count) { hba 944 drivers/scsi/hptiop.c hba->u.mvfrey.inlist_wptr &= ~0x3fff; hba 945 drivers/scsi/hptiop.c hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; hba 948 drivers/scsi/hptiop.c hba->u.mvfrey.inlist[index].addr = hba 950 drivers/scsi/hptiop.c hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4; hba 951 drivers/scsi/hptiop.c writel(hba->u.mvfrey.inlist_wptr, hba 952 drivers/scsi/hptiop.c &(hba->u.mvfrey.mu->inbound_write_ptr)); hba 953 drivers/scsi/hptiop.c readl(&(hba->u.mvfrey.mu->inbound_write_ptr)); hba 956 drivers/scsi/hptiop.c static int hptiop_reset_comm_itl(struct hptiop_hba *hba) hba 961 drivers/scsi/hptiop.c static int hptiop_reset_comm_mv(struct hptiop_hba *hba) hba 966 drivers/scsi/hptiop.c static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba) hba 968 drivers/scsi/hptiop.c u32 list_count = hba->u.mvfrey.list_count; hba 970 drivers/scsi/hptiop.c if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000)) hba 976 drivers/scsi/hptiop.c writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff), hba 977 drivers/scsi/hptiop.c &(hba->u.mvfrey.mu->inbound_base)); hba 978 drivers/scsi/hptiop.c writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16), hba 979 drivers/scsi/hptiop.c &(hba->u.mvfrey.mu->inbound_base_high)); hba 981 drivers/scsi/hptiop.c writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff), hba 982 drivers/scsi/hptiop.c &(hba->u.mvfrey.mu->outbound_base)); hba 983 drivers/scsi/hptiop.c writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16), hba 984 drivers/scsi/hptiop.c &(hba->u.mvfrey.mu->outbound_base_high)); hba 986 drivers/scsi/hptiop.c writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff), hba 987 drivers/scsi/hptiop.c &(hba->u.mvfrey.mu->outbound_shadow_base)); hba 988 drivers/scsi/hptiop.c writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16), hba 989 drivers/scsi/hptiop.c &(hba->u.mvfrey.mu->outbound_shadow_base_high)); hba 991 drivers/scsi/hptiop.c hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE; hba 992 drivers/scsi/hptiop.c *hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE; hba 993 drivers/scsi/hptiop.c hba->u.mvfrey.outlist_rptr = list_count - 1; hba 1001 drivers/scsi/hptiop.c struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; hba 1009 drivers/scsi/hptiop.c _req = get_req(hba); hba 1031 drivers/scsi/hptiop.c (scp->device->id > hba->max_devices) || hba 1032 drivers/scsi/hptiop.c ((scp->device->id == (hba->max_devices-1)) && scp->device->lun)) { hba 1034 drivers/scsi/hptiop.c free_req(hba, _req); hba 1058 drivers/scsi/hptiop.c hba->ops->post_req(hba, _req); hba 1074 drivers/scsi/hptiop.c static int hptiop_reset_hba(struct hptiop_hba *hba) hba 1076 drivers/scsi/hptiop.c if (atomic_xchg(&hba->resetting, 1) == 0) { hba 1077 drivers/scsi/hptiop.c atomic_inc(&hba->reset_count); hba 1078 drivers/scsi/hptiop.c hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET); hba 1081 drivers/scsi/hptiop.c wait_event_timeout(hba->reset_wq, hba 1082 drivers/scsi/hptiop.c atomic_read(&hba->resetting) == 0, 60 * HZ); hba 1084 drivers/scsi/hptiop.c if (atomic_read(&hba->resetting)) { hba 1086 drivers/scsi/hptiop.c printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no); hba 1090 drivers/scsi/hptiop.c if (iop_send_sync_msg(hba, hba 1093 drivers/scsi/hptiop.c hba->host->host_no); hba 1101 drivers/scsi/hptiop.c struct hptiop_hba * hba = (struct hptiop_hba *)scp->device->host->hostdata; hba 1106 drivers/scsi/hptiop.c return hptiop_reset_hba(hba)? FAILED : SUCCESS; hba 1112 drivers/scsi/hptiop.c struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; hba 1114 drivers/scsi/hptiop.c if (queue_depth > hba->max_requests) hba 1115 drivers/scsi/hptiop.c queue_depth = hba->max_requests; hba 1129 drivers/scsi/hptiop.c struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; hba 1132 drivers/scsi/hptiop.c hba->firmware_version >> 24, hba 1133 drivers/scsi/hptiop.c (hba->firmware_version >> 16) & 0xff, hba 1134 drivers/scsi/hptiop.c (hba->firmware_version >> 8) & 0xff, hba 1135 drivers/scsi/hptiop.c hba->firmware_version & 0xff); hba 1182 drivers/scsi/hptiop.c static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba) hba 1187 drivers/scsi/hptiop.c static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba) hba 1189 drivers/scsi/hptiop.c hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev, hba 1190 drivers/scsi/hptiop.c 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL); hba 1191 drivers/scsi/hptiop.c if (hba->u.mv.internal_req) hba 1197 drivers/scsi/hptiop.c static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba) hba 1199 drivers/scsi/hptiop.c u32 list_count = readl(&hba->u.mvfrey.mu->inbound_conf_ctl); hba 1203 drivers/scsi/hptiop.c BUG_ON(hba->max_request_size == 0); hba 1212 drivers/scsi/hptiop.c hba->u.mvfrey.list_count = list_count; hba 1213 drivers/scsi/hptiop.c hba->u.mvfrey.internal_mem_size = 0x800 + hba 1218 drivers/scsi/hptiop.c p = dma_alloc_coherent(&hba->pcidev->dev, hba 1219 drivers/scsi/hptiop.c hba->u.mvfrey.internal_mem_size, &phy, GFP_KERNEL); hba 1223 drivers/scsi/hptiop.c hba->u.mvfrey.internal_req.req_virt = p; hba 1224 drivers/scsi/hptiop.c hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5; hba 1225 drivers/scsi/hptiop.c hba->u.mvfrey.internal_req.scp = NULL; hba 1226 drivers/scsi/hptiop.c hba->u.mvfrey.internal_req.next = NULL; hba 1231 drivers/scsi/hptiop.c hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p; hba 1232 drivers/scsi/hptiop.c hba->u.mvfrey.inlist_phy = phy; hba 1237 drivers/scsi/hptiop.c hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p; hba 1238 drivers/scsi/hptiop.c hba->u.mvfrey.outlist_phy = phy; hba 1243 drivers/scsi/hptiop.c hba->u.mvfrey.outlist_cptr = (__le32 *)p; hba 1244 drivers/scsi/hptiop.c hba->u.mvfrey.outlist_cptr_phy = phy; hba 1249 drivers/scsi/hptiop.c static int hptiop_internal_memfree_itl(struct hptiop_hba *hba) hba 1254 drivers/scsi/hptiop.c static int hptiop_internal_memfree_mv(struct hptiop_hba *hba) hba 1256 drivers/scsi/hptiop.c if (hba->u.mv.internal_req) { hba 1257 drivers/scsi/hptiop.c dma_free_coherent(&hba->pcidev->dev, 0x800, hba 1258 drivers/scsi/hptiop.c hba->u.mv.internal_req, hba->u.mv.internal_req_phy); hba 1264 drivers/scsi/hptiop.c static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba) hba 1266 drivers/scsi/hptiop.c if (hba->u.mvfrey.internal_req.req_virt) { hba 1267 drivers/scsi/hptiop.c dma_free_coherent(&hba->pcidev->dev, hba 1268 drivers/scsi/hptiop.c hba->u.mvfrey.internal_mem_size, hba 1269 drivers/scsi/hptiop.c hba->u.mvfrey.internal_req.req_virt, hba 1271 drivers/scsi/hptiop.c hba->u.mvfrey.internal_req.req_shifted_phy << 5); hba 1280 drivers/scsi/hptiop.c struct hptiop_hba *hba; hba 1325 drivers/scsi/hptiop.c hba = (struct hptiop_hba *)host->hostdata; hba 1326 drivers/scsi/hptiop.c memset(hba, 0, sizeof(struct hptiop_hba)); hba 1328 drivers/scsi/hptiop.c hba->ops = iop_ops; hba 1329 drivers/scsi/hptiop.c hba->pcidev = pcidev; hba 1330 drivers/scsi/hptiop.c hba->host = host; hba 1331 drivers/scsi/hptiop.c hba->initialized = 0; hba 1332 drivers/scsi/hptiop.c hba->iopintf_v2 = 0; hba 1334 drivers/scsi/hptiop.c atomic_set(&hba->resetting, 0); hba 1335 drivers/scsi/hptiop.c atomic_set(&hba->reset_count, 0); hba 1337 drivers/scsi/hptiop.c init_waitqueue_head(&hba->reset_wq); hba 1338 drivers/scsi/hptiop.c init_waitqueue_head(&hba->ioctl_wq); hba 1346 drivers/scsi/hptiop.c if (hba->ops->map_pci_bar(hba)) hba 1349 drivers/scsi/hptiop.c if (hba->ops->iop_wait_ready(hba, 20000)) { hba 1351 drivers/scsi/hptiop.c hba->host->host_no); hba 1355 drivers/scsi/hptiop.c if (hba->ops->family == MV_BASED_IOP) { hba 1356 drivers/scsi/hptiop.c if (hba->ops->internal_memalloc(hba)) { hba 1358 drivers/scsi/hptiop.c hba->host->host_no); hba 1363 drivers/scsi/hptiop.c if (hba->ops->get_config(hba, &iop_config)) { hba 1365 drivers/scsi/hptiop.c hba->host->host_no); hba 1369 drivers/scsi/hptiop.c hba->max_requests = min(le32_to_cpu(iop_config.max_requests), hba 1371 drivers/scsi/hptiop.c hba->max_devices = le32_to_cpu(iop_config.max_devices); hba 1372 drivers/scsi/hptiop.c hba->max_request_size = le32_to_cpu(iop_config.request_size); hba 1373 drivers/scsi/hptiop.c hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count); hba 1374 drivers/scsi/hptiop.c hba->firmware_version = le32_to_cpu(iop_config.firmware_version); hba 1375 drivers/scsi/hptiop.c hba->interface_version = le32_to_cpu(iop_config.interface_version); hba 1376 drivers/scsi/hptiop.c hba->sdram_size = le32_to_cpu(iop_config.sdram_size); hba 1378 drivers/scsi/hptiop.c if (hba->ops->family == MVFREY_BASED_IOP) { hba 1379 drivers/scsi/hptiop.c if (hba->ops->internal_memalloc(hba)) { hba 1381 drivers/scsi/hptiop.c hba->host->host_no); hba 1384 drivers/scsi/hptiop.c if (hba->ops->reset_comm(hba)) { hba 1386 drivers/scsi/hptiop.c hba->host->host_no); hba 1391 drivers/scsi/hptiop.c if (hba->firmware_version > 0x01020000 || hba 1392 drivers/scsi/hptiop.c hba->interface_version > 0x01020000) hba 1393 drivers/scsi/hptiop.c hba->iopintf_v2 = 1; hba 1403 drivers/scsi/hptiop.c + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1); hba 1412 drivers/scsi/hptiop.c if (hba->ops->set_config(hba, &set_config)) { hba 1414 drivers/scsi/hptiop.c hba->host->host_no); hba 1421 drivers/scsi/hptiop.c driver_name, hba)) { hba 1423 drivers/scsi/hptiop.c hba->host->host_no, pcidev->irq); hba 1429 drivers/scsi/hptiop.c dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests); hba 1431 drivers/scsi/hptiop.c hba->req_size = req_size; hba 1432 drivers/scsi/hptiop.c hba->req_list = NULL; hba 1434 drivers/scsi/hptiop.c for (i = 0; i < hba->max_requests; i++) { hba 1436 drivers/scsi/hptiop.c hba->req_size + 0x20, hba 1441 drivers/scsi/hptiop.c hba->host->host_no); hba 1445 drivers/scsi/hptiop.c hba->dma_coherent[i] = start_virt; hba 1446 drivers/scsi/hptiop.c hba->dma_coherent_handle[i] = start_phy; hba 1454 drivers/scsi/hptiop.c hba->reqs[i].next = NULL; hba 1455 drivers/scsi/hptiop.c hba->reqs[i].req_virt = start_virt; hba 1456 drivers/scsi/hptiop.c hba->reqs[i].req_shifted_phy = start_phy >> 5; hba 1457 drivers/scsi/hptiop.c hba->reqs[i].index = i; hba 1458 drivers/scsi/hptiop.c free_req(hba, &hba->reqs[i]); hba 1462 drivers/scsi/hptiop.c if (hptiop_initialize_iop(hba)) hba 1467 drivers/scsi/hptiop.c hba->host->host_no); hba 1473 drivers/scsi/hptiop.c dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no); hba 1477 drivers/scsi/hptiop.c for (i = 0; i < hba->max_requests; i++) { hba 1478 drivers/scsi/hptiop.c if (hba->dma_coherent[i] && hba->dma_coherent_handle[i]) hba 1479 drivers/scsi/hptiop.c dma_free_coherent(&hba->pcidev->dev, hba 1480 drivers/scsi/hptiop.c hba->req_size + 0x20, hba 1481 drivers/scsi/hptiop.c hba->dma_coherent[i], hba 1482 drivers/scsi/hptiop.c hba->dma_coherent_handle[i]); hba 1487 drivers/scsi/hptiop.c free_irq(hba->pcidev->irq, hba); hba 1490 drivers/scsi/hptiop.c hba->ops->internal_memfree(hba); hba 1492 drivers/scsi/hptiop.c hba->ops->unmap_pci_bar(hba); hba 1510 drivers/scsi/hptiop.c struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; hba 1512 drivers/scsi/hptiop.c dprintk("hptiop_shutdown(%p)\n", hba); hba 1515 drivers/scsi/hptiop.c if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) hba 1517 drivers/scsi/hptiop.c hba->host->host_no); hba 1520 drivers/scsi/hptiop.c hba->ops->disable_intr(hba); hba 1523 drivers/scsi/hptiop.c static void hptiop_disable_intr_itl(struct hptiop_hba *hba) hba 1527 drivers/scsi/hptiop.c int_mask = readl(&hba->u.itl.iop->outbound_intmask); hba 1530 drivers/scsi/hptiop.c &hba->u.itl.iop->outbound_intmask); hba 1531 drivers/scsi/hptiop.c readl(&hba->u.itl.iop->outbound_intmask); hba 1534 drivers/scsi/hptiop.c static void hptiop_disable_intr_mv(struct hptiop_hba *hba) hba 1536 drivers/scsi/hptiop.c writel(0, &hba->u.mv.regs->outbound_intmask); hba 1537 drivers/scsi/hptiop.c readl(&hba->u.mv.regs->outbound_intmask); hba 1540 drivers/scsi/hptiop.c static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba) hba 1542 drivers/scsi/hptiop.c writel(0, &(hba->u.mvfrey.mu->f0_doorbell_enable)); hba 1543 drivers/scsi/hptiop.c readl(&(hba->u.mvfrey.mu->f0_doorbell_enable)); hba 1544 drivers/scsi/hptiop.c writel(0, &(hba->u.mvfrey.mu->isr_enable)); hba 1545 drivers/scsi/hptiop.c readl(&(hba->u.mvfrey.mu->isr_enable)); hba 1546 drivers/scsi/hptiop.c writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); hba 1547 drivers/scsi/hptiop.c readl(&(hba->u.mvfrey.mu->pcie_f0_int_enable)); hba 1553 drivers/scsi/hptiop.c struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; hba 1556 drivers/scsi/hptiop.c dprintk("scsi%d: hptiop_remove\n", hba->host->host_no); hba 1562 drivers/scsi/hptiop.c free_irq(hba->pcidev->irq, hba); hba 1564 drivers/scsi/hptiop.c for (i = 0; i < hba->max_requests; i++) { hba 1565 drivers/scsi/hptiop.c if (hba->dma_coherent[i] && hba->dma_coherent_handle[i]) hba 1566 drivers/scsi/hptiop.c dma_free_coherent(&hba->pcidev->dev, hba 1567 drivers/scsi/hptiop.c hba->req_size + 0x20, hba 1568 drivers/scsi/hptiop.c hba->dma_coherent[i], hba 1569 drivers/scsi/hptiop.c hba->dma_coherent_handle[i]); hba 1574 drivers/scsi/hptiop.c hba->ops->internal_memfree(hba); hba 1576 drivers/scsi/hptiop.c hba->ops->unmap_pci_bar(hba); hba 1578 drivers/scsi/hptiop.c pci_release_regions(hba->pcidev); hba 1579 drivers/scsi/hptiop.c pci_set_drvdata(hba->pcidev, NULL); hba 1580 drivers/scsi/hptiop.c pci_disable_device(hba->pcidev); hba 333 drivers/scsi/hptiop.h struct hptiop_hba * hba; hba 346 drivers/scsi/hptiop.h int (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec); hba 347 drivers/scsi/hptiop.h int (*internal_memalloc)(struct hptiop_hba *hba); hba 348 drivers/scsi/hptiop.h int (*internal_memfree)(struct hptiop_hba *hba); hba 349 drivers/scsi/hptiop.h int (*map_pci_bar)(struct hptiop_hba *hba); hba 350 drivers/scsi/hptiop.h void (*unmap_pci_bar)(struct hptiop_hba *hba); hba 351 drivers/scsi/hptiop.h void (*enable_intr)(struct hptiop_hba *hba); hba 352 drivers/scsi/hptiop.h void (*disable_intr)(struct hptiop_hba *hba); hba 353 drivers/scsi/hptiop.h int (*get_config)(struct hptiop_hba *hba, hba 355 drivers/scsi/hptiop.h int (*set_config)(struct hptiop_hba *hba, hba 357 drivers/scsi/hptiop.h int (*iop_intr)(struct hptiop_hba *hba); hba 358 drivers/scsi/hptiop.h void (*post_msg)(struct hptiop_hba *hba, u32 msg); hba 359 drivers/scsi/hptiop.h void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req); hba 361 drivers/scsi/hptiop.h int (*reset_comm)(struct hptiop_hba *hba); hba 117 drivers/scsi/megaraid.c #define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01) hba 388 drivers/scsi/stex.c static struct status_msg *stex_get_status(struct st_hba *hba) hba 390 drivers/scsi/stex.c struct status_msg *status = hba->status_buffer + hba->status_tail; hba 392 drivers/scsi/stex.c ++hba->status_tail; hba 393 drivers/scsi/stex.c hba->status_tail %= hba->sts_count+1; hba 409 drivers/scsi/stex.c static struct req_msg *stex_alloc_req(struct st_hba *hba) hba 411 drivers/scsi/stex.c struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size; hba 413 drivers/scsi/stex.c ++hba->req_head; hba 414 drivers/scsi/stex.c hba->req_head %= hba->rq_count+1; hba 419 drivers/scsi/stex.c static struct req_msg *stex_ss_alloc_req(struct st_hba *hba) hba 421 drivers/scsi/stex.c return (struct req_msg *)(hba->dma_mem + hba 422 drivers/scsi/stex.c hba->req_head * hba->rq_size + sizeof(struct st_msg_header)); hba 425 drivers/scsi/stex.c static int stex_map_sg(struct st_hba *hba, hba 442 drivers/scsi/stex.c dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize); hba 457 drivers/scsi/stex.c static int stex_ss_map_sg(struct st_hba *hba, hba 474 drivers/scsi/stex.c dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize); hba 490 drivers/scsi/stex.c static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) hba 495 drivers/scsi/stex.c p = hba->copy_buffer; hba 498 drivers/scsi/stex.c *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); hba 506 drivers/scsi/stex.c p->bus = hba->pdev->bus->number; hba 507 drivers/scsi/stex.c p->slot = hba->pdev->devfn; hba 509 drivers/scsi/stex.c p->irq_vec = hba->pdev->irq; hba 510 drivers/scsi/stex.c p->id = hba->pdev->vendor << 16 | hba->pdev->device; hba 512 drivers/scsi/stex.c hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; hba 518 drivers/scsi/stex.c stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) hba 522 drivers/scsi/stex.c hba->ccb[tag].req = req; hba 523 drivers/scsi/stex.c hba->out_req_cnt++; hba 525 drivers/scsi/stex.c writel(hba->req_head, hba->mmio_base + IMR0); hba 526 drivers/scsi/stex.c writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL); hba 527 drivers/scsi/stex.c readl(hba->mmio_base + IDBL); /* flush */ hba 531 drivers/scsi/stex.c stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) hba 539 drivers/scsi/stex.c hba->ccb[tag].req = req; hba 540 drivers/scsi/stex.c hba->out_req_cnt++; hba 542 drivers/scsi/stex.c cmd = hba->ccb[tag].cmd; hba 548 drivers/scsi/stex.c addr = hba->dma_handle + hba->req_head * hba->rq_size; hba 549 drivers/scsi/stex.c addr += (hba->ccb[tag].sg_count+4)/11; hba 552 drivers/scsi/stex.c ++hba->req_head; hba 553 drivers/scsi/stex.c hba->req_head %= hba->rq_count+1; hba 554 drivers/scsi/stex.c if (hba->cardtype == st_P3) { hba 555 drivers/scsi/stex.c writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); hba 556 drivers/scsi/stex.c writel(addr, hba->mmio_base + YH2I_REQ); hba 558 drivers/scsi/stex.c writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); hba 559 drivers/scsi/stex.c readl(hba->mmio_base + YH2I_REQ_HI); /* flush */ hba 560 drivers/scsi/stex.c writel(addr, hba->mmio_base + YH2I_REQ); hba 561 drivers/scsi/stex.c readl(hba->mmio_base + YH2I_REQ); /* flush */ hba 565 drivers/scsi/stex.c static void return_abnormal_state(struct st_hba *hba, int status) hba 571 drivers/scsi/stex.c spin_lock_irqsave(hba->host->host_lock, flags); hba 572 drivers/scsi/stex.c for (tag = 0; tag < hba->host->can_queue; tag++) { hba 573 drivers/scsi/stex.c ccb = &hba->ccb[tag]; hba 584 drivers/scsi/stex.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 599 drivers/scsi/stex.c struct st_hba *hba; hba 608 drivers/scsi/stex.c hba = (struct st_hba *) &host->hostdata[0]; hba 609 drivers/scsi/stex.c if (hba->mu_status == MU_STATE_NOCONNECT) { hba 614 drivers/scsi/stex.c if (unlikely(hba->mu_status != MU_STATE_STARTED)) hba 640 drivers/scsi/stex.c if (hba->cardtype == st_shasta || id == host->max_id - 1) { hba 680 drivers/scsi/stex.c ver.host_no = hba->host->host_no; hba 699 drivers/scsi/stex.c req = hba->alloc_rq(hba); hba 714 drivers/scsi/stex.c hba->ccb[tag].cmd = cmd; hba 715 drivers/scsi/stex.c hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE; hba 716 drivers/scsi/stex.c hba->ccb[tag].sense_buffer = cmd->sense_buffer; hba 718 drivers/scsi/stex.c if (!hba->map_sg(hba, req, &hba->ccb[tag])) { hba 719 drivers/scsi/stex.c hba->ccb[tag].sg_count = 0; hba 723 drivers/scsi/stex.c hba->send(hba, req, tag); hba 786 drivers/scsi/stex.c static void stex_check_cmd(struct st_hba *hba, hba 795 drivers/scsi/stex.c static void stex_mu_intr(struct st_hba *hba, u32 doorbell) hba 797 drivers/scsi/stex.c void __iomem *base = hba->mmio_base; hba 807 drivers/scsi/stex.c hba->status_head = readl(base + OMR1); hba 808 drivers/scsi/stex.c if (unlikely(hba->status_head > hba->sts_count)) { hba 810 drivers/scsi/stex.c pci_name(hba->pdev)); hba 822 drivers/scsi/stex.c if (unlikely(hba->out_req_cnt <= 0 || hba 823 drivers/scsi/stex.c (hba->mu_status == MU_STATE_RESETTING && hba 824 drivers/scsi/stex.c hba->cardtype != st_yosemite))) { hba 825 drivers/scsi/stex.c hba->status_tail = hba->status_head; hba 829 drivers/scsi/stex.c while (hba->status_tail != hba->status_head) { hba 830 drivers/scsi/stex.c resp = stex_get_status(hba); hba 832 drivers/scsi/stex.c if (unlikely(tag >= hba->host->can_queue)) { hba 834 drivers/scsi/stex.c "(%s): invalid tag\n", pci_name(hba->pdev)); hba 838 drivers/scsi/stex.c hba->out_req_cnt--; hba 839 drivers/scsi/stex.c ccb = &hba->ccb[tag]; hba 840 drivers/scsi/stex.c if (unlikely(hba->wait_ccb == ccb)) hba 841 drivers/scsi/stex.c hba->wait_ccb = NULL; hba 844 drivers/scsi/stex.c "(%s): lagging req\n", pci_name(hba->pdev)); hba 852 drivers/scsi/stex.c pci_name(hba->pdev)); hba 864 drivers/scsi/stex.c if (hba->cardtype == st_yosemite) hba 865 drivers/scsi/stex.c stex_check_cmd(hba, ccb, resp); hba 869 drivers/scsi/stex.c stex_controller_info(hba, ccb); hba 878 drivers/scsi/stex.c writel(hba->status_head, base + IMR1); hba 884 drivers/scsi/stex.c struct st_hba *hba = __hba; hba 885 drivers/scsi/stex.c void __iomem *base = hba->mmio_base; hba 889 drivers/scsi/stex.c spin_lock_irqsave(hba->host->host_lock, flags); hba 897 drivers/scsi/stex.c stex_mu_intr(hba, data); hba 898 drivers/scsi/stex.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 900 drivers/scsi/stex.c hba->cardtype == st_shasta)) hba 901 drivers/scsi/stex.c queue_work(hba->work_q, &hba->reset_work); hba 905 drivers/scsi/stex.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 910 drivers/scsi/stex.c static void stex_ss_mu_intr(struct st_hba *hba) hba 920 drivers/scsi/stex.c if (unlikely(hba->out_req_cnt <= 0 || hba 921 drivers/scsi/stex.c hba->mu_status == MU_STATE_RESETTING)) hba 924 drivers/scsi/stex.c while (count < hba->sts_count) { hba 925 drivers/scsi/stex.c scratch = hba->scratch + hba->status_tail; hba 930 drivers/scsi/stex.c resp = hba->status_buffer + hba->status_tail; hba 933 drivers/scsi/stex.c ++hba->status_tail; hba 934 drivers/scsi/stex.c hba->status_tail %= hba->sts_count+1; hba 937 drivers/scsi/stex.c if (unlikely(tag >= hba->host->can_queue)) { hba 939 drivers/scsi/stex.c "(%s): invalid tag\n", pci_name(hba->pdev)); hba 943 drivers/scsi/stex.c hba->out_req_cnt--; hba 944 drivers/scsi/stex.c ccb = &hba->ccb[tag]; hba 945 drivers/scsi/stex.c if (unlikely(hba->wait_ccb == ccb)) hba 946 drivers/scsi/stex.c hba->wait_ccb = NULL; hba 949 drivers/scsi/stex.c "(%s): lagging req\n", pci_name(hba->pdev)); hba 965 drivers/scsi/stex.c pci_name(hba->pdev)); hba 972 drivers/scsi/stex.c stex_check_cmd(hba, ccb, resp); hba 985 drivers/scsi/stex.c struct st_hba *hba = __hba; hba 986 drivers/scsi/stex.c void __iomem *base = hba->mmio_base; hba 990 drivers/scsi/stex.c spin_lock_irqsave(hba->host->host_lock, flags); hba 992 drivers/scsi/stex.c if (hba->cardtype == st_yel) { hba 997 drivers/scsi/stex.c stex_ss_mu_intr(hba); hba 998 drivers/scsi/stex.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1000 drivers/scsi/stex.c queue_work(hba->work_q, &hba->reset_work); hba 1011 drivers/scsi/stex.c stex_ss_mu_intr(hba); hba 1012 drivers/scsi/stex.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1014 drivers/scsi/stex.c queue_work(hba->work_q, &hba->reset_work); hba 1019 drivers/scsi/stex.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1024 drivers/scsi/stex.c static int stex_common_handshake(struct st_hba *hba) hba 1026 drivers/scsi/stex.c void __iomem *base = hba->mmio_base; hba 1040 drivers/scsi/stex.c pci_name(hba->pdev)); hba 1053 drivers/scsi/stex.c if (hba->host->can_queue > data) { hba 1054 drivers/scsi/stex.c hba->host->can_queue = data; hba 1055 drivers/scsi/stex.c hba->host->cmd_per_lun = data; hba 1059 drivers/scsi/stex.c h = (struct handshake_frame *)hba->status_buffer; hba 1060 drivers/scsi/stex.c h->rb_phy = cpu_to_le64(hba->dma_handle); hba 1061 drivers/scsi/stex.c h->req_sz = cpu_to_le16(hba->rq_size); hba 1062 drivers/scsi/stex.c h->req_cnt = cpu_to_le16(hba->rq_count+1); hba 1064 drivers/scsi/stex.c h->status_cnt = cpu_to_le16(hba->sts_count+1); hba 1067 drivers/scsi/stex.c if (hba->extra_offset) { hba 1068 drivers/scsi/stex.c h->extra_offset = cpu_to_le32(hba->extra_offset); hba 1069 drivers/scsi/stex.c h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset); hba 1073 drivers/scsi/stex.c status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size; hba 1090 drivers/scsi/stex.c pci_name(hba->pdev)); hba 1108 drivers/scsi/stex.c static int stex_ss_handshake(struct st_hba *hba) hba 1110 drivers/scsi/stex.c void __iomem *base = hba->mmio_base; hba 1120 drivers/scsi/stex.c if (hba->cardtype == st_yel) { hba 1126 drivers/scsi/stex.c pci_name(hba->pdev)); hba 1138 drivers/scsi/stex.c pci_name(hba->pdev)); hba 1146 drivers/scsi/stex.c msg_h = (struct st_msg_header *)hba->dma_mem; hba 1147 drivers/scsi/stex.c msg_h->handle = cpu_to_le64(hba->dma_handle); hba 1151 drivers/scsi/stex.c h->rb_phy = cpu_to_le64(hba->dma_handle); hba 1152 drivers/scsi/stex.c h->req_sz = cpu_to_le16(hba->rq_size); hba 1153 drivers/scsi/stex.c h->req_cnt = cpu_to_le16(hba->rq_count+1); hba 1155 drivers/scsi/stex.c h->status_cnt = cpu_to_le16(hba->sts_count+1); hba 1159 drivers/scsi/stex.c scratch_size = (hba->sts_count+1)*sizeof(u32); hba 1162 drivers/scsi/stex.c if (hba->cardtype == st_yel) { hba 1166 drivers/scsi/stex.c writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); hba 1168 drivers/scsi/stex.c writel(hba->dma_handle, base + YH2I_REQ); hba 1175 drivers/scsi/stex.c if (hba->msi_lock == 0) { hba 1178 drivers/scsi/stex.c hba->msi_lock = 1; hba 1180 drivers/scsi/stex.c writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); hba 1181 drivers/scsi/stex.c writel(hba->dma_handle, base + YH2I_REQ); hba 1185 drivers/scsi/stex.c scratch = hba->scratch; hba 1186 drivers/scsi/stex.c if (hba->cardtype == st_yel) { hba 1191 drivers/scsi/stex.c pci_name(hba->pdev)); hba 1204 drivers/scsi/stex.c pci_name(hba->pdev)); hba 1219 drivers/scsi/stex.c static int stex_handshake(struct st_hba *hba) hba 1225 drivers/scsi/stex.c if (hba->cardtype == st_yel || hba->cardtype == st_P3) hba 1226 drivers/scsi/stex.c err = stex_ss_handshake(hba); hba 1228 drivers/scsi/stex.c err = stex_common_handshake(hba); hba 1229 drivers/scsi/stex.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1230 drivers/scsi/stex.c mu_status = hba->mu_status; hba 1232 drivers/scsi/stex.c hba->req_head = 0; hba 1233 drivers/scsi/stex.c hba->req_tail = 0; hba 1234 drivers/scsi/stex.c hba->status_head = 0; hba 1235 drivers/scsi/stex.c hba->status_tail = 0; hba 1236 drivers/scsi/stex.c hba->out_req_cnt = 0; hba 1237 drivers/scsi/stex.c hba->mu_status = MU_STATE_STARTED; hba 1239 drivers/scsi/stex.c hba->mu_status = MU_STATE_FAILED; hba 1241 drivers/scsi/stex.c wake_up_all(&hba->reset_waitq); hba 1242 drivers/scsi/stex.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1249 drivers/scsi/stex.c struct st_hba *hba = (struct st_hba *)host->hostdata; hba 1258 drivers/scsi/stex.c base = hba->mmio_base; hba 1261 drivers/scsi/stex.c hba->ccb[tag].req && hba->ccb[tag].cmd == cmd) hba 1262 drivers/scsi/stex.c hba->wait_ccb = &hba->ccb[tag]; hba 1266 drivers/scsi/stex.c if (hba->cardtype == st_yel) { hba 1272 drivers/scsi/stex.c stex_ss_mu_intr(hba); hba 1273 drivers/scsi/stex.c } else if (hba->cardtype == st_P3) { hba 1281 drivers/scsi/stex.c stex_ss_mu_intr(hba); hba 1289 drivers/scsi/stex.c stex_mu_intr(hba, data); hba 1291 drivers/scsi/stex.c if (hba->wait_ccb == NULL) { hba 1293 drivers/scsi/stex.c "(%s): lost interrupt\n", pci_name(hba->pdev)); hba 1299 drivers/scsi/stex.c hba->wait_ccb->req = NULL; /* nullify the req's future return */ hba 1300 drivers/scsi/stex.c hba->wait_ccb = NULL; hba 1307 drivers/scsi/stex.c static void stex_hard_reset(struct st_hba *hba) hba 1315 drivers/scsi/stex.c pci_read_config_dword(hba->pdev, i * 4, hba 1316 drivers/scsi/stex.c &hba->pdev->saved_config_space[i]); hba 1320 drivers/scsi/stex.c bus = hba->pdev->bus; hba 1334 drivers/scsi/stex.c pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd); hba 1342 drivers/scsi/stex.c pci_write_config_dword(hba->pdev, i * 4, hba 1343 drivers/scsi/stex.c hba->pdev->saved_config_space[i]); hba 1346 drivers/scsi/stex.c static int stex_yos_reset(struct st_hba *hba) hba 1352 drivers/scsi/stex.c base = hba->mmio_base; hba 1356 drivers/scsi/stex.c while (hba->out_req_cnt > 0) { hba 1359 drivers/scsi/stex.c "(%s): reset timeout\n", pci_name(hba->pdev)); hba 1366 drivers/scsi/stex.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1368 drivers/scsi/stex.c hba->mu_status = MU_STATE_FAILED; hba 1370 drivers/scsi/stex.c hba->mu_status = MU_STATE_STARTED; hba 1371 drivers/scsi/stex.c wake_up_all(&hba->reset_waitq); hba 1372 drivers/scsi/stex.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1377 drivers/scsi/stex.c static void stex_ss_reset(struct st_hba *hba) hba 1379 drivers/scsi/stex.c writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); hba 1380 drivers/scsi/stex.c readl(hba->mmio_base + YH2I_INT); hba 1384 drivers/scsi/stex.c static void stex_p3_reset(struct st_hba *hba) hba 1386 drivers/scsi/stex.c writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); hba 1390 drivers/scsi/stex.c static int stex_do_reset(struct st_hba *hba) hba 1395 drivers/scsi/stex.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1396 drivers/scsi/stex.c if (hba->mu_status == MU_STATE_STARTING) { hba 1397 drivers/scsi/stex.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1399 drivers/scsi/stex.c pci_name(hba->pdev)); hba 1402 drivers/scsi/stex.c while (hba->mu_status == MU_STATE_RESETTING) { hba 1403 drivers/scsi/stex.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1404 drivers/scsi/stex.c wait_event_timeout(hba->reset_waitq, hba 1405 drivers/scsi/stex.c hba->mu_status != MU_STATE_RESETTING, hba 1407 drivers/scsi/stex.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1408 drivers/scsi/stex.c mu_status = hba->mu_status; hba 1412 drivers/scsi/stex.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1416 drivers/scsi/stex.c hba->mu_status = MU_STATE_RESETTING; hba 1417 drivers/scsi/stex.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1419 drivers/scsi/stex.c if (hba->cardtype == st_yosemite) hba 1420 drivers/scsi/stex.c return stex_yos_reset(hba); hba 1422 drivers/scsi/stex.c if (hba->cardtype == st_shasta) hba 1423 drivers/scsi/stex.c stex_hard_reset(hba); hba 1424 drivers/scsi/stex.c else if (hba->cardtype == st_yel) hba 1425 drivers/scsi/stex.c stex_ss_reset(hba); hba 1426 drivers/scsi/stex.c else if (hba->cardtype == st_P3) hba 1427 drivers/scsi/stex.c stex_p3_reset(hba); hba 1429 drivers/scsi/stex.c return_abnormal_state(hba, DID_RESET); hba 1431 drivers/scsi/stex.c if (stex_handshake(hba) == 0) hba 1435 drivers/scsi/stex.c pci_name(hba->pdev)); hba 1441 drivers/scsi/stex.c struct st_hba *hba; hba 1443 drivers/scsi/stex.c hba = (struct st_hba *) &cmd->device->host->hostdata[0]; hba 1448 drivers/scsi/stex.c return stex_do_reset(hba) ? FAILED : SUCCESS; hba 1453 drivers/scsi/stex.c struct st_hba *hba = container_of(work, struct st_hba, reset_work); hba 1455 drivers/scsi/stex.c stex_do_reset(hba); hba 1616 drivers/scsi/stex.c static int stex_request_irq(struct st_hba *hba) hba 1618 drivers/scsi/stex.c struct pci_dev *pdev = hba->pdev; hba 1621 drivers/scsi/stex.c if (msi || hba->cardtype == st_P3) { hba 1628 drivers/scsi/stex.c hba->msi_enabled = 1; hba 1630 drivers/scsi/stex.c hba->msi_enabled = 0; hba 1633 drivers/scsi/stex.c (hba->cardtype == st_yel || hba->cardtype == st_P3) ? hba 1634 drivers/scsi/stex.c stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba); hba 1637 drivers/scsi/stex.c if (hba->msi_enabled) hba 1643 drivers/scsi/stex.c static void stex_free_irq(struct st_hba *hba) hba 1645 drivers/scsi/stex.c struct pci_dev *pdev = hba->pdev; hba 1647 drivers/scsi/stex.c free_irq(pdev->irq, hba); hba 1648 drivers/scsi/stex.c if (hba->msi_enabled) hba 1654 drivers/scsi/stex.c struct st_hba *hba; hba 1678 drivers/scsi/stex.c hba = (struct st_hba *)host->hostdata; hba 1679 drivers/scsi/stex.c memset(hba, 0, sizeof(struct st_hba)); hba 1688 drivers/scsi/stex.c hba->mmio_base = pci_ioremap_bar(pdev, 0); hba 1689 drivers/scsi/stex.c if ( !hba->mmio_base) { hba 1705 drivers/scsi/stex.c hba->cardtype = (unsigned int) id->driver_data; hba 1706 drivers/scsi/stex.c ci = &stex_card_info[hba->cardtype]; hba 1722 drivers/scsi/stex.c if (hba->cardtype == st_yel || hba->cardtype == st_P3) hba 1723 drivers/scsi/stex.c hba->supports_pm = 1; hba 1727 drivers/scsi/stex.c if (hba->cardtype == st_yel || hba->cardtype == st_P3) hba 1730 drivers/scsi/stex.c hba->dma_size = cp_offset + sizeof(struct st_frame); hba 1731 drivers/scsi/stex.c if (hba->cardtype == st_seq || hba 1732 drivers/scsi/stex.c (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { hba 1733 drivers/scsi/stex.c hba->extra_offset = hba->dma_size; hba 1734 drivers/scsi/stex.c hba->dma_size += ST_ADDITIONAL_MEM; hba 1736 drivers/scsi/stex.c hba->dma_mem = dma_alloc_coherent(&pdev->dev, hba 1737 drivers/scsi/stex.c hba->dma_size, &hba->dma_handle, GFP_KERNEL); hba 1738 drivers/scsi/stex.c if (!hba->dma_mem) { hba 1740 drivers/scsi/stex.c if (hba->cardtype == st_seq || hba 1741 drivers/scsi/stex.c (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { hba 1745 drivers/scsi/stex.c hba->dma_size = hba->extra_offset hba 1747 drivers/scsi/stex.c hba->dma_mem = dma_alloc_coherent(&pdev->dev, hba 1748 drivers/scsi/stex.c hba->dma_size, &hba->dma_handle, GFP_KERNEL); hba 1751 drivers/scsi/stex.c if (!hba->dma_mem) { hba 1759 drivers/scsi/stex.c hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL); hba 1760 drivers/scsi/stex.c if (!hba->ccb) { hba 1767 drivers/scsi/stex.c if (hba->cardtype == st_yel || hba->cardtype == st_P3) hba 1768 drivers/scsi/stex.c hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset); hba 1769 drivers/scsi/stex.c hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset); hba 1770 drivers/scsi/stex.c hba->copy_buffer = hba->dma_mem + cp_offset; hba 1771 drivers/scsi/stex.c hba->rq_count = ci->rq_count; hba 1772 drivers/scsi/stex.c hba->rq_size = ci->rq_size; hba 1773 drivers/scsi/stex.c hba->sts_count = ci->sts_count; hba 1774 drivers/scsi/stex.c hba->alloc_rq = ci->alloc_rq; hba 1775 drivers/scsi/stex.c hba->map_sg = ci->map_sg; hba 1776 drivers/scsi/stex.c hba->send = ci->send; hba 1777 drivers/scsi/stex.c hba->mu_status = MU_STATE_STARTING; hba 1778 drivers/scsi/stex.c hba->msi_lock = 0; hba 1780 drivers/scsi/stex.c if (hba->cardtype == st_yel || hba->cardtype == st_P3) hba 1792 drivers/scsi/stex.c hba->host = host; hba 1793 drivers/scsi/stex.c hba->pdev = pdev; hba 1794 drivers/scsi/stex.c init_waitqueue_head(&hba->reset_waitq); hba 1796 drivers/scsi/stex.c snprintf(hba->work_q_name, sizeof(hba->work_q_name), hba 1798 drivers/scsi/stex.c hba->work_q = create_singlethread_workqueue(hba->work_q_name); hba 1799 drivers/scsi/stex.c if (!hba->work_q) { hba 1805 drivers/scsi/stex.c INIT_WORK(&hba->reset_work, stex_reset_work); hba 1807 drivers/scsi/stex.c err = stex_request_irq(hba); hba 1814 drivers/scsi/stex.c err = stex_handshake(hba); hba 1818 drivers/scsi/stex.c pci_set_drvdata(pdev, hba); hba 1832 drivers/scsi/stex.c stex_free_irq(hba); hba 1834 drivers/scsi/stex.c destroy_workqueue(hba->work_q); hba 1836 drivers/scsi/stex.c kfree(hba->ccb); hba 1838 drivers/scsi/stex.c dma_free_coherent(&pdev->dev, hba->dma_size, hba 1839 drivers/scsi/stex.c hba->dma_mem, hba->dma_handle); hba 1841 drivers/scsi/stex.c iounmap(hba->mmio_base); hba 1852 drivers/scsi/stex.c static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic) hba 1860 drivers/scsi/stex.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1862 drivers/scsi/stex.c if ((hba->cardtype == st_yel || hba->cardtype == st_P3) && hba 1863 drivers/scsi/stex.c hba->supports_pm == 1) { hba 1865 drivers/scsi/stex.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1869 drivers/scsi/stex.c req = hba->alloc_rq(hba); hba 1870 drivers/scsi/stex.c if (hba->cardtype == st_yel || hba->cardtype == st_P3) { hba 1872 drivers/scsi/stex.c memset(msg_h, 0, hba->rq_size); hba 1874 drivers/scsi/stex.c memset(req, 0, hba->rq_size); hba 1876 drivers/scsi/stex.c if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel hba 1877 drivers/scsi/stex.c || hba->cardtype == st_P3) hba 1883 drivers/scsi/stex.c } else if ((hba->cardtype == st_yel || hba->cardtype == st_P3) hba 1895 drivers/scsi/stex.c hba->ccb[tag].cmd = NULL; hba 1896 drivers/scsi/stex.c hba->ccb[tag].sg_count = 0; hba 1897 drivers/scsi/stex.c hba->ccb[tag].sense_bufflen = 0; hba 1898 drivers/scsi/stex.c hba->ccb[tag].sense_buffer = NULL; hba 1899 drivers/scsi/stex.c hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE; hba 1900 drivers/scsi/stex.c hba->send(hba, req, tag); hba 1901 drivers/scsi/stex.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1903 drivers/scsi/stex.c while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) { hba 1905 drivers/scsi/stex.c hba->ccb[tag].req_type = 0; hba 1906 drivers/scsi/stex.c hba->mu_status = MU_STATE_STOP; hba 1911 drivers/scsi/stex.c hba->mu_status = MU_STATE_STOP; hba 1914 drivers/scsi/stex.c static void stex_hba_free(struct st_hba *hba) hba 1916 drivers/scsi/stex.c stex_free_irq(hba); hba 1918 drivers/scsi/stex.c destroy_workqueue(hba->work_q); hba 1920 drivers/scsi/stex.c iounmap(hba->mmio_base); hba 1922 drivers/scsi/stex.c pci_release_regions(hba->pdev); hba 1924 drivers/scsi/stex.c kfree(hba->ccb); hba 1926 drivers/scsi/stex.c dma_free_coherent(&hba->pdev->dev, hba->dma_size, hba 1927 drivers/scsi/stex.c hba->dma_mem, hba->dma_handle); hba 1932 drivers/scsi/stex.c struct st_hba *hba = pci_get_drvdata(pdev); hba 1934 drivers/scsi/stex.c hba->mu_status = MU_STATE_NOCONNECT; hba 1935 drivers/scsi/stex.c return_abnormal_state(hba, DID_NO_CONNECT); hba 1936 drivers/scsi/stex.c scsi_remove_host(hba->host); hba 1938 drivers/scsi/stex.c scsi_block_requests(hba->host); hba 1940 drivers/scsi/stex.c stex_hba_free(hba); hba 1942 drivers/scsi/stex.c scsi_host_put(hba->host); hba 1951 drivers/scsi/stex.c struct st_hba *hba = pci_get_drvdata(pdev); hba 1953 drivers/scsi/stex.c if (hba->supports_pm == 0) { hba 1954 drivers/scsi/stex.c stex_hba_stop(hba, ST_IGNORED); hba 1955 drivers/scsi/stex.c } else if (hba->supports_pm == 1 && S6flag) { hba 1957 drivers/scsi/stex.c stex_hba_stop(hba, ST_S6); hba 1959 drivers/scsi/stex.c stex_hba_stop(hba, ST_S5); hba 1962 drivers/scsi/stex.c static int stex_choice_sleep_mic(struct st_hba *hba, pm_message_t state) hba 1968 drivers/scsi/stex.c hba->msi_lock = 0; hba 1977 drivers/scsi/stex.c struct st_hba *hba = pci_get_drvdata(pdev); hba 1979 drivers/scsi/stex.c if ((hba->cardtype == st_yel || hba->cardtype == st_P3) hba 1980 drivers/scsi/stex.c && hba->supports_pm == 1) hba 1981 drivers/scsi/stex.c stex_hba_stop(hba, stex_choice_sleep_mic(hba, state)); hba 1983 drivers/scsi/stex.c stex_hba_stop(hba, ST_IGNORED); hba 1989 drivers/scsi/stex.c struct st_hba *hba = pci_get_drvdata(pdev); hba 1991 drivers/scsi/stex.c hba->mu_status = MU_STATE_STARTING; hba 1992 drivers/scsi/stex.c stex_handshake(hba); hba 29 drivers/scsi/ufs/cdns-pltfrm.c static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba) hba 32 drivers/scsi/ufs/cdns-pltfrm.c struct list_head *head = &hba->clk_list_head; hba 47 drivers/scsi/ufs/cdns-pltfrm.c dev_err(hba->dev, "%s: unable to find core_clk rate\n", hba 54 drivers/scsi/ufs/cdns-pltfrm.c ufshcd_writel(hba, core_clk_div, CDNS_UFS_REG_HCLKDIV); hba 71 drivers/scsi/ufs/cdns-pltfrm.c static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba, hba 77 drivers/scsi/ufs/cdns-pltfrm.c return cdns_ufs_set_hclkdiv(hba); hba 87 drivers/scsi/ufs/cdns-pltfrm.c static int cdns_ufs_link_startup_notify(struct ufs_hba *hba, hba 100 drivers/scsi/ufs/cdns-pltfrm.c ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); hba 106 drivers/scsi/ufs/cdns-pltfrm.c hba->ahit = 0; hba 117 drivers/scsi/ufs/cdns-pltfrm.c static int cdns_ufs_init(struct ufs_hba *hba) hba 121 drivers/scsi/ufs/cdns-pltfrm.c if (hba->vops && hba->vops->phy_initialization) hba 122 drivers/scsi/ufs/cdns-pltfrm.c status = hba->vops->phy_initialization(hba); hba 133 drivers/scsi/ufs/cdns-pltfrm.c static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba) hba 138 drivers/scsi/ufs/cdns-pltfrm.c data = ufshcd_readl(hba, CDNS_UFS_REG_PHY_XCFGD1); hba 140 drivers/scsi/ufs/cdns-pltfrm.c ufshcd_writel(hba, data, CDNS_UFS_REG_PHY_XCFGD1); hba 205 drivers/scsi/ufs/cdns-pltfrm.c struct ufs_hba *hba = platform_get_drvdata(pdev); hba 207 drivers/scsi/ufs/cdns-pltfrm.c ufshcd_remove(hba); hba 75 drivers/scsi/ufs/tc-dwc-g210-pci.c struct ufs_hba *hba = pci_get_drvdata(pdev); hba 79 drivers/scsi/ufs/tc-dwc-g210-pci.c ufshcd_remove(hba); hba 92 drivers/scsi/ufs/tc-dwc-g210-pci.c struct ufs_hba *hba; hba 124 drivers/scsi/ufs/tc-dwc-g210-pci.c err = ufshcd_alloc_host(&pdev->dev, &hba); hba 130 drivers/scsi/ufs/tc-dwc-g210-pci.c hba->vops = &tc_dwc_g210_pci_hba_vops; hba 132 drivers/scsi/ufs/tc-dwc-g210-pci.c err = ufshcd_init(hba, mmio_base, pdev->irq); hba 138 drivers/scsi/ufs/tc-dwc-g210-pci.c pci_set_drvdata(pdev, hba); hba 78 drivers/scsi/ufs/tc-dwc-g210-pltfrm.c struct ufs_hba *hba = platform_get_drvdata(pdev); hba 81 drivers/scsi/ufs/tc-dwc-g210-pltfrm.c ufshcd_remove(hba); hba 24 drivers/scsi/ufs/tc-dwc-g210.c static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba) hba 77 drivers/scsi/ufs/tc-dwc-g210.c return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, hba 88 drivers/scsi/ufs/tc-dwc-g210.c static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba) hba 130 drivers/scsi/ufs/tc-dwc-g210.c return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, hba 141 drivers/scsi/ufs/tc-dwc-g210.c static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba) hba 188 drivers/scsi/ufs/tc-dwc-g210.c ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES), hba 190 drivers/scsi/ufs/tc-dwc-g210.c ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES), hba 195 drivers/scsi/ufs/tc-dwc-g210.c ret = ufshcd_dwc_dme_set_attrs(hba, setup_tx_attrs, hba 203 drivers/scsi/ufs/tc-dwc-g210.c ret = ufshcd_dwc_dme_set_attrs(hba, setup_rx_attrs, hba 218 drivers/scsi/ufs/tc-dwc-g210.c static int tc_dwc_g210_setup_20bit_rmmi(struct ufs_hba *hba) hba 232 drivers/scsi/ufs/tc-dwc-g210.c ret = ufshcd_dwc_dme_set_attrs(hba, setup_attrs, hba 238 drivers/scsi/ufs/tc-dwc-g210.c ret = tc_dwc_g210_setup_20bit_rmmi_lane0(hba); hba 243 drivers/scsi/ufs/tc-dwc-g210.c ret = tc_dwc_g210_setup_20bit_rmmi_lane1(hba); hba 259 drivers/scsi/ufs/tc-dwc-g210.c int tc_dwc_g210_config_40_bit(struct ufs_hba *hba) hba 263 drivers/scsi/ufs/tc-dwc-g210.c dev_info(hba->dev, "Configuring Test Chip 40-bit RMMI\n"); hba 264 drivers/scsi/ufs/tc-dwc-g210.c ret = tc_dwc_g210_setup_40bit_rmmi(hba); hba 266 drivers/scsi/ufs/tc-dwc-g210.c dev_err(hba->dev, "Configuration failed\n"); hba 271 drivers/scsi/ufs/tc-dwc-g210.c ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01); hba 276 drivers/scsi/ufs/tc-dwc-g210.c ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01); hba 291 drivers/scsi/ufs/tc-dwc-g210.c int tc_dwc_g210_config_20_bit(struct ufs_hba *hba) hba 295 drivers/scsi/ufs/tc-dwc-g210.c dev_info(hba->dev, "Configuring Test Chip 20-bit RMMI\n"); hba 296 drivers/scsi/ufs/tc-dwc-g210.c ret = tc_dwc_g210_setup_20bit_rmmi(hba); hba 298 drivers/scsi/ufs/tc-dwc-g210.c dev_err(hba->dev, "Configuration failed\n"); hba 303 drivers/scsi/ufs/tc-dwc-g210.c ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01); hba 308 drivers/scsi/ufs/tc-dwc-g210.c ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01); hba 13 drivers/scsi/ufs/tc-dwc-g210.h int tc_dwc_g210_config_40_bit(struct ufs_hba *hba); hba 14 drivers/scsi/ufs/tc-dwc-g210.h int tc_dwc_g210_config_20_bit(struct ufs_hba *hba); hba 23 drivers/scsi/ufs/ufs-hisi.c static int ufs_hisi_check_hibern8(struct ufs_hba *hba) hba 31 drivers/scsi/ufs/ufs-hisi.c err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0), hba 33 drivers/scsi/ufs/ufs-hisi.c err |= ufshcd_dme_get(hba, hba 48 drivers/scsi/ufs/ufs-hisi.c err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0), hba 50 drivers/scsi/ufs/ufs-hisi.c err |= ufshcd_dme_get(hba, hba 55 drivers/scsi/ufs/ufs-hisi.c dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", hba 60 drivers/scsi/ufs/ufs-hisi.c dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n", hba 67 drivers/scsi/ufs/ufs-hisi.c static void ufs_hisi_clk_init(struct ufs_hba *hba) hba 69 drivers/scsi/ufs/ufs-hisi.c struct ufs_hisi_host *host = ufshcd_get_variant(hba); hba 81 drivers/scsi/ufs/ufs-hisi.c static void ufs_hisi_soc_init(struct ufs_hba *hba) hba 83 drivers/scsi/ufs/ufs-hisi.c struct ufs_hisi_host *host = ufshcd_get_variant(hba); hba 138 drivers/scsi/ufs/ufs-hisi.c static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba) hba 140 drivers/scsi/ufs/ufs-hisi.c struct ufs_hisi_host *host = ufshcd_get_variant(hba); hba 146 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1); hba 148 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2); hba 150 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1); hba 152 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D); hba 154 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1); hba 158 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8127, 0x0), 0x98); hba 160 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8128, 0x0), 0x1); hba 164 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); hba 166 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58); hba 168 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58); hba 170 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB); hba 172 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB); hba 174 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1); hba 176 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1); hba 178 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); hba 180 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1); hba 181 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); hba 185 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x4), 0xA); hba 187 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x5), 0xA); hba 189 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x4), 0xA); hba 191 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x5), 0xA); hba 194 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7); hba 196 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7); hba 200 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F); hba 202 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F); hba 204 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F); hba 206 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F); hba 208 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F); hba 210 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F); hba 212 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5); hba 214 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5); hba 216 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); hba 218 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value); hba 220 drivers/scsi/ufs/ufs-hisi.c dev_info(hba->dev, hba 224 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0); hba 225 drivers/scsi/ufs/ufs-hisi.c err = ufs_hisi_check_hibern8(hba); hba 227 drivers/scsi/ufs/ufs-hisi.c dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n"); hba 230 drivers/scsi/ufs/ufs-hisi.c ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV); hba 233 drivers/scsi/ufs/ufs-hisi.c reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); hba 235 drivers/scsi/ufs/ufs-hisi.c ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER); hba 238 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x155E, 0x0), 0x0); hba 240 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0); hba 241 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value); hba 244 drivers/scsi/ufs/ufs-hisi.c dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n"); hba 250 drivers/scsi/ufs/ufs-hisi.c static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba) hba 252 drivers/scsi/ufs/ufs-hisi.c struct ufs_hisi_host *host = ufshcd_get_variant(hba); hba 255 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0); hba 257 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0); hba 259 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9); hba 268 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000); hba 270 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005); hba 275 drivers/scsi/ufs/ufs-hisi.c static int ufs_hisi_link_startup_notify(struct ufs_hba *hba, hba 282 drivers/scsi/ufs/ufs-hisi.c err = ufs_hisi_link_startup_pre_change(hba); hba 285 drivers/scsi/ufs/ufs-hisi.c err = ufs_hisi_link_startup_post_change(hba); hba 310 drivers/scsi/ufs/ufs-hisi.c static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) hba 312 drivers/scsi/ufs/ufs-hisi.c struct ufs_hisi_host *host = ufshcd_get_variant(hba); hba 320 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0xD0A0), 0x13); hba 322 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1552), 0x4f); hba 324 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1554), 0x4f); hba 326 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1556), 0x4f); hba 328 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a7), 0xA); hba 330 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a8), 0xA); hba 331 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xd085, 0x0), 0x01); hba 334 drivers/scsi/ufs/ufs-hisi.c if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) { hba 337 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10); hba 339 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48); hba 343 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1); hba 345 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0); hba 347 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191); hba 349 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535); hba 351 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767); hba 353 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191); hba 355 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535); hba 357 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767); hba 359 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191); hba 361 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535); hba 363 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767); hba 365 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191); hba 367 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535); hba 369 drivers/scsi/ufs/ufs-hisi.c ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767); hba 372 drivers/scsi/ufs/ufs-hisi.c static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba, hba 381 drivers/scsi/ufs/ufs-hisi.c dev_err(hba->dev, hba 393 drivers/scsi/ufs/ufs-hisi.c dev_err(hba->dev, hba 398 drivers/scsi/ufs/ufs-hisi.c ufs_hisi_pwr_change_pre_change(hba); hba 410 drivers/scsi/ufs/ufs-hisi.c static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) hba 412 drivers/scsi/ufs/ufs-hisi.c struct ufs_hisi_host *host = ufshcd_get_variant(hba); hba 432 drivers/scsi/ufs/ufs-hisi.c static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) hba 434 drivers/scsi/ufs/ufs-hisi.c struct ufs_hisi_host *host = ufshcd_get_variant(hba); hba 450 drivers/scsi/ufs/ufs-hisi.c struct device *dev = host->hba->dev; hba 461 drivers/scsi/ufs/ufs-hisi.c static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba) hba 463 drivers/scsi/ufs/ufs-hisi.c hba->rpm_lvl = UFS_PM_LVL_1; hba 464 drivers/scsi/ufs/ufs-hisi.c hba->spm_lvl = UFS_PM_LVL_3; hba 471 drivers/scsi/ufs/ufs-hisi.c static int ufs_hisi_init_common(struct ufs_hba *hba) hba 474 drivers/scsi/ufs/ufs-hisi.c struct device *dev = hba->dev; hba 481 drivers/scsi/ufs/ufs-hisi.c host->hba = hba; hba 482 drivers/scsi/ufs/ufs-hisi.c ufshcd_set_variant(hba, host); hba 490 drivers/scsi/ufs/ufs-hisi.c ufs_hisi_set_pm_lvl(hba); hba 494 drivers/scsi/ufs/ufs-hisi.c ufshcd_set_variant(hba, NULL); hba 501 drivers/scsi/ufs/ufs-hisi.c static int ufs_hi3660_init(struct ufs_hba *hba) hba 504 drivers/scsi/ufs/ufs-hisi.c struct device *dev = hba->dev; hba 506 drivers/scsi/ufs/ufs-hisi.c ret = ufs_hisi_init_common(hba); hba 512 drivers/scsi/ufs/ufs-hisi.c ufs_hisi_clk_init(hba); hba 514 drivers/scsi/ufs/ufs-hisi.c ufs_hisi_soc_init(hba); hba 519 drivers/scsi/ufs/ufs-hisi.c static int ufs_hi3670_init(struct ufs_hba *hba) hba 522 drivers/scsi/ufs/ufs-hisi.c struct device *dev = hba->dev; hba 525 drivers/scsi/ufs/ufs-hisi.c ret = ufs_hisi_init_common(hba); hba 531 drivers/scsi/ufs/ufs-hisi.c ufs_hisi_clk_init(hba); hba 533 drivers/scsi/ufs/ufs-hisi.c ufs_hisi_soc_init(hba); hba 536 drivers/scsi/ufs/ufs-hisi.c host = ufshcd_get_variant(hba); hba 579 drivers/scsi/ufs/ufs-hisi.c struct ufs_hba *hba = platform_get_drvdata(pdev); hba 581 drivers/scsi/ufs/ufs-hisi.c ufshcd_remove(hba); hba 96 drivers/scsi/ufs/ufs-hisi.h struct ufs_hba *hba; hba 20 drivers/scsi/ufs/ufs-mediatek.c static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) hba 25 drivers/scsi/ufs/ufs-mediatek.c ufshcd_dme_get(hba, hba 31 drivers/scsi/ufs/ufs-mediatek.c ufshcd_dme_set(hba, hba 34 drivers/scsi/ufs/ufs-mediatek.c ufshcd_dme_get(hba, hba 37 drivers/scsi/ufs/ufs-mediatek.c ufshcd_dme_set(hba, hba 40 drivers/scsi/ufs/ufs-mediatek.c ufshcd_dme_get(hba, hba 45 drivers/scsi/ufs/ufs-mediatek.c ufshcd_dme_set(hba, hba 48 drivers/scsi/ufs/ufs-mediatek.c ufshcd_dme_get(hba, hba 51 drivers/scsi/ufs/ufs-mediatek.c ufshcd_dme_set(hba, hba 56 drivers/scsi/ufs/ufs-mediatek.c static int ufs_mtk_bind_mphy(struct ufs_hba *hba) hba 58 drivers/scsi/ufs/ufs-mediatek.c struct ufs_mtk_host *host = ufshcd_get_variant(hba); hba 59 drivers/scsi/ufs/ufs-mediatek.c struct device *dev = hba->dev; hba 93 drivers/scsi/ufs/ufs-mediatek.c static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, hba 96 drivers/scsi/ufs/ufs-mediatek.c struct ufs_mtk_host *host = ufshcd_get_variant(hba); hba 131 drivers/scsi/ufs/ufs-mediatek.c static int ufs_mtk_init(struct ufs_hba *hba) hba 134 drivers/scsi/ufs/ufs-mediatek.c struct device *dev = hba->dev; hba 144 drivers/scsi/ufs/ufs-mediatek.c host->hba = hba; hba 145 drivers/scsi/ufs/ufs-mediatek.c ufshcd_set_variant(hba, host); hba 147 drivers/scsi/ufs/ufs-mediatek.c err = ufs_mtk_bind_mphy(hba); hba 158 drivers/scsi/ufs/ufs-mediatek.c ufs_mtk_setup_clocks(hba, true, POST_CHANGE); hba 163 drivers/scsi/ufs/ufs-mediatek.c ufshcd_set_variant(hba, NULL); hba 168 drivers/scsi/ufs/ufs-mediatek.c static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, hba 200 drivers/scsi/ufs/ufs-mediatek.c static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba, hba 209 drivers/scsi/ufs/ufs-mediatek.c ret = ufs_mtk_pre_pwr_change(hba, dev_max_params, hba 222 drivers/scsi/ufs/ufs-mediatek.c static int ufs_mtk_pre_link(struct ufs_hba *hba) hba 228 drivers/scsi/ufs/ufs-mediatek.c ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); hba 234 drivers/scsi/ufs/ufs-mediatek.c ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); hba 239 drivers/scsi/ufs/ufs-mediatek.c static int ufs_mtk_post_link(struct ufs_hba *hba) hba 242 drivers/scsi/ufs/ufs-mediatek.c ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); hba 245 drivers/scsi/ufs/ufs-mediatek.c ufs_mtk_cfg_unipro_cg(hba, true); hba 250 drivers/scsi/ufs/ufs-mediatek.c static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, hba 257 drivers/scsi/ufs/ufs-mediatek.c ret = ufs_mtk_pre_link(hba); hba 260 drivers/scsi/ufs/ufs-mediatek.c ret = ufs_mtk_post_link(hba); hba 270 drivers/scsi/ufs/ufs-mediatek.c static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) hba 272 drivers/scsi/ufs/ufs-mediatek.c struct ufs_mtk_host *host = ufshcd_get_variant(hba); hba 274 drivers/scsi/ufs/ufs-mediatek.c if (ufshcd_is_link_hibern8(hba)) hba 280 drivers/scsi/ufs/ufs-mediatek.c static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) hba 282 drivers/scsi/ufs/ufs-mediatek.c struct ufs_mtk_host *host = ufshcd_get_variant(hba); hba 284 drivers/scsi/ufs/ufs-mediatek.c if (ufshcd_is_link_hibern8(hba)) hba 290 drivers/scsi/ufs/ufs-mediatek.c static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba, hba 294 drivers/scsi/ufs/ufs-mediatek.c ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); hba 343 drivers/scsi/ufs/ufs-mediatek.c struct ufs_hba *hba = platform_get_drvdata(pdev); hba 346 drivers/scsi/ufs/ufs-mediatek.c ufshcd_remove(hba); hba 49 drivers/scsi/ufs/ufs-mediatek.h struct ufs_hba *hba; hba 43 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, hba 51 drivers/scsi/ufs/ufs-qcom.c static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, hba 54 drivers/scsi/ufs/ufs-qcom.c ufshcd_dump_regs(hba, offset, len * 4, prefix); hba 57 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) hba 61 drivers/scsi/ufs/ufs-qcom.c err = ufshcd_dme_get(hba, hba 64 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n", hba 123 drivers/scsi/ufs/ufs-qcom.c struct device *dev = host->hba->dev; hba 164 drivers/scsi/ufs/ufs-qcom.c struct device *dev = host->hba->dev; hba 180 drivers/scsi/ufs/ufs-qcom.c if (host->hba->lanes_per_direction > 1) { hba 193 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba) hba 197 drivers/scsi/ufs/ufs-qcom.c return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes); hba 200 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_check_hibern8(struct ufs_hba *hba) hba 207 drivers/scsi/ufs/ufs-qcom.c err = ufshcd_dme_get(hba, hba 223 drivers/scsi/ufs/ufs-qcom.c err = ufshcd_dme_get(hba, hba 229 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", hba 233 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", hba 242 drivers/scsi/ufs/ufs-qcom.c ufshcd_rmwl(host->hba, QUNIPRO_SEL, hba 249 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) hba 251 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 263 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, "%s: phy init failed, ret = %d\n", hba 271 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, "%s: phy power on failed, ret = %d\n", hba 294 drivers/scsi/ufs/ufs-qcom.c static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) hba 296 drivers/scsi/ufs/ufs-qcom.c ufshcd_writel(hba, hba 297 drivers/scsi/ufs/ufs-qcom.c ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL, hba 304 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, hba 307 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 312 drivers/scsi/ufs/ufs-qcom.c ufs_qcom_power_up_sequence(hba); hba 322 drivers/scsi/ufs/ufs-qcom.c err = ufs_qcom_check_hibern8(hba); hba 323 drivers/scsi/ufs/ufs-qcom.c ufs_qcom_enable_hw_clk_gating(hba); hba 327 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); hba 337 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, hba 341 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 374 drivers/scsi/ufs/ufs-qcom.c if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba)) hba 378 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); hba 382 drivers/scsi/ufs/ufs-qcom.c list_for_each_entry(clki, &hba->clk_list_head, list) { hba 392 drivers/scsi/ufs/ufs-qcom.c if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { hba 393 drivers/scsi/ufs/ufs-qcom.c ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); hba 413 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, hba 422 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, hba 430 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, "%s: invalid rate = %d\n", hba 438 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, hba 448 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs); hba 452 drivers/scsi/ufs/ufs-qcom.c if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) != hba 455 drivers/scsi/ufs/ufs-qcom.c ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, hba 465 drivers/scsi/ufs/ufs-qcom.c ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100), hba 481 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, hba 485 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 489 drivers/scsi/ufs/ufs-qcom.c if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, hba 491 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", hba 502 drivers/scsi/ufs/ufs-qcom.c err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, hba 512 drivers/scsi/ufs/ufs-qcom.c if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) hba 513 drivers/scsi/ufs/ufs-qcom.c err = ufshcd_dme_set(hba, hba 519 drivers/scsi/ufs/ufs-qcom.c ufs_qcom_link_startup_post_change(hba); hba 529 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) hba 531 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 535 drivers/scsi/ufs/ufs-qcom.c if (ufs_qcom_is_link_off(hba)) { hba 544 drivers/scsi/ufs/ufs-qcom.c } else if (!ufs_qcom_is_link_active(hba)) { hba 551 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) hba 553 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 557 drivers/scsi/ufs/ufs-qcom.c if (ufs_qcom_is_link_off(hba)) { hba 560 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, "%s: failed PHY power on: %d\n", hba 569 drivers/scsi/ufs/ufs-qcom.c } else if (!ufs_qcom_is_link_active(hba)) { hba 575 drivers/scsi/ufs/ufs-qcom.c hba->is_sys_suspended = false; hba 583 drivers/scsi/ufs/ufs-qcom.c struct device *dev = host->hba->dev; hba 641 drivers/scsi/ufs/ufs-qcom.c dev_err(host->hba->dev, hba 669 drivers/scsi/ufs/ufs-qcom.c dev_err(host->hba->dev, "%s: failed %d\n", __func__, err); hba 679 drivers/scsi/ufs/ufs-qcom.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 680 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 690 drivers/scsi/ufs/ufs-qcom.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 691 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 706 drivers/scsi/ufs/ufs-qcom.c struct device *dev = host->hba->dev; hba 799 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, hba 804 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 854 drivers/scsi/ufs/ufs-qcom.c if (!ufshcd_is_hs_mode(&hba->pwr_info) && hba 859 drivers/scsi/ufs/ufs-qcom.c if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, hba 862 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", hba 878 drivers/scsi/ufs/ufs-qcom.c if (ufshcd_is_hs_mode(&hba->pwr_info) && hba 890 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) hba 895 drivers/scsi/ufs/ufs-qcom.c err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), hba 901 drivers/scsi/ufs/ufs-qcom.c err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), hba 908 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba, hba 913 drivers/scsi/ufs/ufs-qcom.c if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) hba 914 drivers/scsi/ufs/ufs-qcom.c err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); hba 919 drivers/scsi/ufs/ufs-qcom.c static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) hba 921 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 938 drivers/scsi/ufs/ufs-qcom.c static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) hba 940 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 943 drivers/scsi/ufs/ufs-qcom.c hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS hba 948 drivers/scsi/ufs/ufs-qcom.c hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR; hba 950 drivers/scsi/ufs/ufs-qcom.c hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; hba 954 drivers/scsi/ufs/ufs-qcom.c hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; hba 958 drivers/scsi/ufs/ufs-qcom.c hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS hba 964 drivers/scsi/ufs/ufs-qcom.c static void ufs_qcom_set_caps(struct ufs_hba *hba) hba 966 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 968 drivers/scsi/ufs/ufs-qcom.c hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; hba 969 drivers/scsi/ufs/ufs-qcom.c hba->caps |= UFSHCD_CAP_CLK_SCALING; hba 970 drivers/scsi/ufs/ufs-qcom.c hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; hba 986 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, hba 989 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 1003 drivers/scsi/ufs/ufs-qcom.c if (ufshcd_is_hs_mode(&hba->pwr_info)) hba 1010 drivers/scsi/ufs/ufs-qcom.c if (!ufs_qcom_is_link_active(hba)) { hba 1020 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, "%s: set bus vote failed %d\n", hba 1033 drivers/scsi/ufs/ufs-qcom.c ufs_qcom_assert_reset(host->hba); hba 1046 drivers/scsi/ufs/ufs-qcom.c ufs_qcom_deassert_reset(host->hba); hba 1083 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_init(struct ufs_hba *hba) hba 1086 drivers/scsi/ufs/ufs-qcom.c struct device *dev = hba->dev; hba 1102 drivers/scsi/ufs/ufs-qcom.c host->hba = hba; hba 1103 drivers/scsi/ufs/ufs-qcom.c ufshcd_set_variant(hba, host); hba 1155 drivers/scsi/ufs/ufs-qcom.c ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, hba 1163 drivers/scsi/ufs/ufs-qcom.c host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1; hba 1186 drivers/scsi/ufs/ufs-qcom.c ufs_qcom_set_caps(hba); hba 1187 drivers/scsi/ufs/ufs-qcom.c ufs_qcom_advertise_quirks(hba); hba 1189 drivers/scsi/ufs/ufs-qcom.c ufs_qcom_setup_clocks(hba, true, POST_CHANGE); hba 1191 drivers/scsi/ufs/ufs-qcom.c if (hba->dev->id < MAX_UFS_QCOM_HOSTS) hba 1192 drivers/scsi/ufs/ufs-qcom.c ufs_qcom_hosts[hba->dev->id] = host; hba 1206 drivers/scsi/ufs/ufs-qcom.c ufshcd_set_variant(hba, NULL); hba 1211 drivers/scsi/ufs/ufs-qcom.c static void ufs_qcom_exit(struct ufs_hba *hba) hba 1213 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 1220 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, hba 1229 drivers/scsi/ufs/ufs-qcom.c err = ufshcd_dme_get(hba, hba 1241 drivers/scsi/ufs/ufs-qcom.c err = ufshcd_dme_set(hba, hba 1248 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) hba 1254 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) hba 1256 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 1262 drivers/scsi/ufs/ufs-qcom.c return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); hba 1265 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) hba 1267 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 1274 drivers/scsi/ufs/ufs-qcom.c err = ufshcd_dme_get(hba, hba 1282 drivers/scsi/ufs/ufs-qcom.c err = ufshcd_dme_set(hba, hba 1290 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba) hba 1292 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 1298 drivers/scsi/ufs/ufs-qcom.c return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75); hba 1301 drivers/scsi/ufs/ufs-qcom.c static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, hba 1304 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 1310 drivers/scsi/ufs/ufs-qcom.c err = ufs_qcom_clk_scale_up_pre_change(hba); hba 1312 drivers/scsi/ufs/ufs-qcom.c err = ufs_qcom_clk_scale_down_pre_change(hba); hba 1315 drivers/scsi/ufs/ufs-qcom.c err = ufs_qcom_clk_scale_up_post_change(hba); hba 1317 drivers/scsi/ufs/ufs-qcom.c err = ufs_qcom_clk_scale_down_post_change(hba); hba 1322 drivers/scsi/ufs/ufs-qcom.c ufs_qcom_cfg_timers(hba, hba 1334 drivers/scsi/ufs/ufs-qcom.c static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, hba 1335 drivers/scsi/ufs/ufs-qcom.c void *priv, void (*print_fn)(struct ufs_hba *hba, hba 1341 drivers/scsi/ufs/ufs-qcom.c if (unlikely(!hba)) { hba 1346 drivers/scsi/ufs/ufs-qcom.c dev_err(hba->dev, "%s: print_fn is NULL\n", __func__); hba 1350 drivers/scsi/ufs/ufs-qcom.c host = ufshcd_get_variant(hba); hba 1355 drivers/scsi/ufs/ufs-qcom.c print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv); hba 1357 drivers/scsi/ufs/ufs-qcom.c reg = ufshcd_readl(hba, REG_UFS_CFG1); hba 1359 drivers/scsi/ufs/ufs-qcom.c ufshcd_writel(hba, reg, REG_UFS_CFG1); hba 1362 drivers/scsi/ufs/ufs-qcom.c print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv); hba 1365 drivers/scsi/ufs/ufs-qcom.c print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv); hba 1368 drivers/scsi/ufs/ufs-qcom.c print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv); hba 1371 drivers/scsi/ufs/ufs-qcom.c ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1); hba 1374 drivers/scsi/ufs/ufs-qcom.c print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv); hba 1377 drivers/scsi/ufs/ufs-qcom.c print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv); hba 1380 drivers/scsi/ufs/ufs-qcom.c print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv); hba 1383 drivers/scsi/ufs/ufs-qcom.c print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv); hba 1386 drivers/scsi/ufs/ufs-qcom.c print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv); hba 1389 drivers/scsi/ufs/ufs-qcom.c print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv); hba 1392 drivers/scsi/ufs/ufs-qcom.c print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv); hba 1398 drivers/scsi/ufs/ufs-qcom.c ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, hba 1400 drivers/scsi/ufs/ufs-qcom.c ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); hba 1402 drivers/scsi/ufs/ufs-qcom.c ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1); hba 1403 drivers/scsi/ufs/ufs-qcom.c ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1); hba 1417 drivers/scsi/ufs/ufs-qcom.c dev_err(host->hba->dev, hba 1496 drivers/scsi/ufs/ufs-qcom.c pm_runtime_get_sync(host->hba->dev); hba 1497 drivers/scsi/ufs/ufs-qcom.c ufshcd_hold(host->hba, false); hba 1498 drivers/scsi/ufs/ufs-qcom.c ufshcd_rmwl(host->hba, TEST_BUS_SEL, hba 1501 drivers/scsi/ufs/ufs-qcom.c ufshcd_rmwl(host->hba, mask, hba 1510 drivers/scsi/ufs/ufs-qcom.c ufshcd_release(host->hba); hba 1511 drivers/scsi/ufs/ufs-qcom.c pm_runtime_put_sync(host->hba->dev); hba 1516 drivers/scsi/ufs/ufs-qcom.c static void ufs_qcom_testbus_read(struct ufs_hba *hba) hba 1518 drivers/scsi/ufs/ufs-qcom.c ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS "); hba 1521 drivers/scsi/ufs/ufs-qcom.c static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba) hba 1523 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 1535 drivers/scsi/ufs/ufs-qcom.c testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS); hba 1542 drivers/scsi/ufs/ufs-qcom.c static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) hba 1544 drivers/scsi/ufs/ufs-qcom.c ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, hba 1548 drivers/scsi/ufs/ufs-qcom.c ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); hba 1550 drivers/scsi/ufs/ufs-qcom.c ufs_qcom_testbus_read(hba); hba 1552 drivers/scsi/ufs/ufs-qcom.c ufs_qcom_print_unipro_testbus(hba); hba 1562 drivers/scsi/ufs/ufs-qcom.c static void ufs_qcom_device_reset(struct ufs_hba *hba) hba 1564 drivers/scsi/ufs/ufs-qcom.c struct ufs_qcom_host *host = ufshcd_get_variant(hba); hba 1631 drivers/scsi/ufs/ufs-qcom.c struct ufs_hba *hba = platform_get_drvdata(pdev); hba 1634 drivers/scsi/ufs/ufs-qcom.c ufshcd_remove(hba); hba 142 drivers/scsi/ufs/ufs-qcom.h ufs_qcom_get_controller_revision(struct ufs_hba *hba, hba 145 drivers/scsi/ufs/ufs-qcom.h u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION); hba 152 drivers/scsi/ufs/ufs-qcom.h static inline void ufs_qcom_assert_reset(struct ufs_hba *hba) hba 154 drivers/scsi/ufs/ufs-qcom.h ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET, hba 164 drivers/scsi/ufs/ufs-qcom.h static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba) hba 166 drivers/scsi/ufs/ufs-qcom.h ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET, hba 217 drivers/scsi/ufs/ufs-qcom.h struct ufs_hba *hba; hba 250 drivers/scsi/ufs/ufs-qcom.h #define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba) hba 251 drivers/scsi/ufs/ufs-qcom.h #define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba) hba 252 drivers/scsi/ufs/ufs-qcom.h #define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba) hba 39 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 48 drivers/scsi/ufs/ufs-sysfs.c spin_lock_irqsave(hba->host->host_lock, flags); hba 50 drivers/scsi/ufs/ufs-sysfs.c hba->rpm_lvl = value; hba 52 drivers/scsi/ufs/ufs-sysfs.c hba->spm_lvl = value; hba 53 drivers/scsi/ufs/ufs-sysfs.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 60 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 62 drivers/scsi/ufs/ufs-sysfs.c return sprintf(buf, "%d\n", hba->rpm_lvl); hba 74 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 77 drivers/scsi/ufs/ufs-sysfs.c ufs_pm_lvl_states[hba->rpm_lvl].dev_state)); hba 83 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 86 drivers/scsi/ufs/ufs-sysfs.c ufs_pm_lvl_states[hba->rpm_lvl].link_state)); hba 92 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 94 drivers/scsi/ufs/ufs-sysfs.c return sprintf(buf, "%d\n", hba->spm_lvl); hba 106 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 109 drivers/scsi/ufs/ufs-sysfs.c ufs_pm_lvl_states[hba->spm_lvl].dev_state)); hba 115 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 118 drivers/scsi/ufs/ufs-sysfs.c ufs_pm_lvl_states[hba->spm_lvl].link_state)); hba 121 drivers/scsi/ufs/ufs-sysfs.c static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) hba 125 drivers/scsi/ufs/ufs-sysfs.c if (!ufshcd_is_auto_hibern8_supported(hba)) hba 128 drivers/scsi/ufs/ufs-sysfs.c spin_lock_irqsave(hba->host->host_lock, flags); hba 129 drivers/scsi/ufs/ufs-sysfs.c if (hba->ahit != ahit) hba 130 drivers/scsi/ufs/ufs-sysfs.c hba->ahit = ahit; hba 131 drivers/scsi/ufs/ufs-sysfs.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 132 drivers/scsi/ufs/ufs-sysfs.c if (!pm_runtime_suspended(hba->dev)) { hba 133 drivers/scsi/ufs/ufs-sysfs.c pm_runtime_get_sync(hba->dev); hba 134 drivers/scsi/ufs/ufs-sysfs.c ufshcd_hold(hba, false); hba 135 drivers/scsi/ufs/ufs-sysfs.c ufshcd_auto_hibern8_enable(hba); hba 136 drivers/scsi/ufs/ufs-sysfs.c ufshcd_release(hba); hba 137 drivers/scsi/ufs/ufs-sysfs.c pm_runtime_put(hba->dev); hba 168 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 170 drivers/scsi/ufs/ufs-sysfs.c if (!ufshcd_is_auto_hibern8_supported(hba)) hba 173 drivers/scsi/ufs/ufs-sysfs.c return snprintf(buf, PAGE_SIZE, "%d\n", ufshcd_ahit_to_us(hba->ahit)); hba 180 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 183 drivers/scsi/ufs/ufs-sysfs.c if (!ufshcd_is_auto_hibern8_supported(hba)) hba 192 drivers/scsi/ufs/ufs-sysfs.c ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer)); hba 220 drivers/scsi/ufs/ufs-sysfs.c static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba, hba 233 drivers/scsi/ufs/ufs-sysfs.c ret = ufshcd_read_desc_param(hba, desc_id, desc_index, hba 262 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = dev_get_drvdata(dev); \ hba 263 drivers/scsi/ufs/ufs-sysfs.c return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \ hba 456 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = dev_get_drvdata(dev); \ hba 457 drivers/scsi/ufs/ufs-sysfs.c return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, \ hba 573 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = dev_get_drvdata(dev); \ hba 581 drivers/scsi/ufs/ufs-sysfs.c ret = ufshcd_query_descriptor_retry(hba, \ hba 591 drivers/scsi/ufs/ufs-sysfs.c ret = ufshcd_read_string_desc(hba, index, &desc_buf, \ hba 627 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = dev_get_drvdata(dev); \ hba 628 drivers/scsi/ufs/ufs-sysfs.c if (ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \ hba 665 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = dev_get_drvdata(dev); \ hba 667 drivers/scsi/ufs/ufs-sysfs.c if (ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \ hba 734 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = shost_priv(sdev->host); \ hba 738 drivers/scsi/ufs/ufs-sysfs.c return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \ hba 787 drivers/scsi/ufs/ufs-sysfs.c struct ufs_hba *hba = shost_priv(sdev->host); hba 790 drivers/scsi/ufs/ufs-sysfs.c if (ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, hba 9 drivers/scsi/ufs/ufs_bsg.c static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len, hba 19 drivers/scsi/ufs/ufs_bsg.c ret = ufshcd_map_desc_id_to_length(hba, desc_id, desc_len); hba 28 drivers/scsi/ufs/ufs_bsg.c static int ufs_bsg_verify_query_size(struct ufs_hba *hba, hba 36 drivers/scsi/ufs/ufs_bsg.c dev_err(hba->dev, "not enough space assigned\n"); hba 43 drivers/scsi/ufs/ufs_bsg.c static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job, hba 56 drivers/scsi/ufs/ufs_bsg.c if (ufs_bsg_get_query_desc_size(hba, desc_len, qr)) { hba 57 drivers/scsi/ufs/ufs_bsg.c dev_err(hba->dev, "Illegal desc size\n"); hba 62 drivers/scsi/ufs/ufs_bsg.c dev_err(hba->dev, "Illegal desc size\n"); hba 85 drivers/scsi/ufs/ufs_bsg.c struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent)); hba 95 drivers/scsi/ufs/ufs_bsg.c ret = ufs_bsg_verify_query_size(hba, req_len, reply_len); hba 101 drivers/scsi/ufs/ufs_bsg.c pm_runtime_get_sync(hba->dev); hba 107 drivers/scsi/ufs/ufs_bsg.c ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff, hba 115 drivers/scsi/ufs/ufs_bsg.c ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req, hba 119 drivers/scsi/ufs/ufs_bsg.c dev_err(hba->dev, hba 125 drivers/scsi/ufs/ufs_bsg.c ret = ufshcd_send_uic_cmd(hba, &uc); hba 127 drivers/scsi/ufs/ufs_bsg.c dev_err(hba->dev, hba 135 drivers/scsi/ufs/ufs_bsg.c dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode); hba 140 drivers/scsi/ufs/ufs_bsg.c pm_runtime_put_sync(hba->dev); hba 168 drivers/scsi/ufs/ufs_bsg.c void ufs_bsg_remove(struct ufs_hba *hba) hba 170 drivers/scsi/ufs/ufs_bsg.c struct device *bsg_dev = &hba->bsg_dev; hba 172 drivers/scsi/ufs/ufs_bsg.c if (!hba->bsg_queue) hba 175 drivers/scsi/ufs/ufs_bsg.c bsg_remove_queue(hba->bsg_queue); hba 192 drivers/scsi/ufs/ufs_bsg.c int ufs_bsg_probe(struct ufs_hba *hba) hba 194 drivers/scsi/ufs/ufs_bsg.c struct device *bsg_dev = &hba->bsg_dev; hba 195 drivers/scsi/ufs/ufs_bsg.c struct Scsi_Host *shost = hba->host; hba 217 drivers/scsi/ufs/ufs_bsg.c hba->bsg_queue = q; hba 16 drivers/scsi/ufs/ufs_bsg.h void ufs_bsg_remove(struct ufs_hba *hba); hba 17 drivers/scsi/ufs/ufs_bsg.h int ufs_bsg_probe(struct ufs_hba *hba); hba 19 drivers/scsi/ufs/ufs_bsg.h static inline void ufs_bsg_remove(struct ufs_hba *hba) {} hba 20 drivers/scsi/ufs/ufs_bsg.h static inline int ufs_bsg_probe(struct ufs_hba *hba) {return 0; } hba 16 drivers/scsi/ufs/ufshcd-dwc.c int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba, hba 23 drivers/scsi/ufs/ufshcd-dwc.c ret = ufshcd_dme_set_attr(hba, v[attr_node].attr_sel, hba 42 drivers/scsi/ufs/ufshcd-dwc.c static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val) hba 44 drivers/scsi/ufs/ufshcd-dwc.c ufshcd_writel(hba, divider_val, DWC_UFS_REG_HCLKDIV); hba 54 drivers/scsi/ufs/ufshcd-dwc.c static int ufshcd_dwc_link_is_up(struct ufs_hba *hba) hba 58 drivers/scsi/ufs/ufshcd-dwc.c ufshcd_dme_get(hba, UIC_ARG_MIB(VS_POWERSTATE), &dme_result); hba 61 drivers/scsi/ufs/ufshcd-dwc.c ufshcd_set_link_active(hba); hba 81 drivers/scsi/ufs/ufshcd-dwc.c static int ufshcd_dwc_connection_setup(struct ufs_hba *hba) hba 104 drivers/scsi/ufs/ufshcd-dwc.c return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, ARRAY_SIZE(setup_attrs)); hba 115 drivers/scsi/ufs/ufshcd-dwc.c int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba, hba 121 drivers/scsi/ufs/ufshcd-dwc.c ufshcd_dwc_program_clk_div(hba, DWC_UFS_REG_HCLKDIV_DIV_125); hba 123 drivers/scsi/ufs/ufshcd-dwc.c if (hba->vops->phy_initialization) { hba 124 drivers/scsi/ufs/ufshcd-dwc.c err = hba->vops->phy_initialization(hba); hba 126 drivers/scsi/ufs/ufshcd-dwc.c dev_err(hba->dev, "Phy setup failed (%d)\n", hba 132 drivers/scsi/ufs/ufshcd-dwc.c err = ufshcd_dwc_link_is_up(hba); hba 134 drivers/scsi/ufs/ufshcd-dwc.c dev_err(hba->dev, "Link is not up\n"); hba 138 drivers/scsi/ufs/ufshcd-dwc.c err = ufshcd_dwc_connection_setup(hba); hba 140 drivers/scsi/ufs/ufshcd-dwc.c dev_err(hba->dev, "Connection setup failed (%d)\n", hba 19 drivers/scsi/ufs/ufshcd-dwc.h int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba, hba 21 drivers/scsi/ufs/ufshcd-dwc.h int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba, hba 40 drivers/scsi/ufs/ufshcd-pci.c static int ufs_intel_disable_lcc(struct ufs_hba *hba) hba 45 drivers/scsi/ufs/ufshcd-pci.c ufshcd_dme_get(hba, attr, &lcc_enable); hba 47 drivers/scsi/ufs/ufshcd-pci.c ufshcd_dme_set(hba, attr, 0); hba 52 drivers/scsi/ufs/ufshcd-pci.c static int ufs_intel_link_startup_notify(struct ufs_hba *hba, hba 59 drivers/scsi/ufs/ufshcd-pci.c err = ufs_intel_disable_lcc(hba); hba 132 drivers/scsi/ufs/ufshcd-pci.c struct ufs_hba *hba = pci_get_drvdata(pdev); hba 136 drivers/scsi/ufs/ufshcd-pci.c ufshcd_remove(hba); hba 137 drivers/scsi/ufs/ufshcd-pci.c ufshcd_dealloc_host(hba); hba 150 drivers/scsi/ufs/ufshcd-pci.c struct ufs_hba *hba; hba 170 drivers/scsi/ufs/ufshcd-pci.c err = ufshcd_alloc_host(&pdev->dev, &hba); hba 176 drivers/scsi/ufs/ufshcd-pci.c hba->vops = (struct ufs_hba_variant_ops *)id->driver_data; hba 178 drivers/scsi/ufs/ufshcd-pci.c err = ufshcd_init(hba, mmio_base, pdev->irq); hba 181 drivers/scsi/ufs/ufshcd-pci.c ufshcd_dealloc_host(hba); hba 185 drivers/scsi/ufs/ufshcd-pci.c pci_set_drvdata(pdev, hba); hba 46 drivers/scsi/ufs/ufshcd-pltfrm.c static int ufshcd_parse_clock_info(struct ufs_hba *hba) hba 51 drivers/scsi/ufs/ufshcd-pltfrm.c struct device *dev = hba->dev; hba 122 drivers/scsi/ufs/ufshcd-pltfrm.c list_add_tail(&clki->list, &hba->clk_list_head); hba 194 drivers/scsi/ufs/ufshcd-pltfrm.c static int ufshcd_parse_regulator_info(struct ufs_hba *hba) hba 197 drivers/scsi/ufs/ufshcd-pltfrm.c struct device *dev = hba->dev; hba 198 drivers/scsi/ufs/ufshcd-pltfrm.c struct ufs_vreg_info *info = &hba->vreg_info; hba 270 drivers/scsi/ufs/ufshcd-pltfrm.c static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba) hba 272 drivers/scsi/ufs/ufshcd-pltfrm.c struct device *dev = hba->dev; hba 276 drivers/scsi/ufs/ufshcd-pltfrm.c &hba->lanes_per_direction); hba 278 drivers/scsi/ufs/ufshcd-pltfrm.c dev_dbg(hba->dev, hba 281 drivers/scsi/ufs/ufshcd-pltfrm.c hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION; hba 392 drivers/scsi/ufs/ufshcd-pltfrm.c struct ufs_hba *hba; hba 410 drivers/scsi/ufs/ufshcd-pltfrm.c err = ufshcd_alloc_host(dev, &hba); hba 416 drivers/scsi/ufs/ufshcd-pltfrm.c hba->vops = vops; hba 418 drivers/scsi/ufs/ufshcd-pltfrm.c err = ufshcd_parse_clock_info(hba); hba 424 drivers/scsi/ufs/ufshcd-pltfrm.c err = ufshcd_parse_regulator_info(hba); hba 431 drivers/scsi/ufs/ufshcd-pltfrm.c ufshcd_init_lanes_per_dir(hba); hba 433 drivers/scsi/ufs/ufshcd-pltfrm.c err = ufshcd_init(hba, mmio_base, irq); hba 439 drivers/scsi/ufs/ufshcd-pltfrm.c platform_set_drvdata(pdev, hba); hba 447 drivers/scsi/ufs/ufshcd-pltfrm.c ufshcd_dealloc_host(hba); hba 108 drivers/scsi/ufs/ufshcd.c int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, hba 122 drivers/scsi/ufs/ufshcd.c regs[pos / 4] = ufshcd_readl(hba, offset + pos); hba 240 drivers/scsi/ufs/ufshcd.c static void ufshcd_tmc_handler(struct ufs_hba *hba); hba 242 drivers/scsi/ufs/ufshcd.c static int ufshcd_reset_and_restore(struct ufs_hba *hba); hba 244 drivers/scsi/ufs/ufshcd.c static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); hba 245 drivers/scsi/ufs/ufshcd.c static void ufshcd_hba_exit(struct ufs_hba *hba); hba 246 drivers/scsi/ufs/ufshcd.c static int ufshcd_probe_hba(struct ufs_hba *hba); hba 247 drivers/scsi/ufs/ufshcd.c static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, hba 249 drivers/scsi/ufs/ufshcd.c static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); hba 250 drivers/scsi/ufs/ufshcd.c static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); hba 251 drivers/scsi/ufs/ufshcd.c static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); hba 252 drivers/scsi/ufs/ufshcd.c static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); hba 253 drivers/scsi/ufs/ufshcd.c static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); hba 254 drivers/scsi/ufs/ufshcd.c static void ufshcd_resume_clkscaling(struct ufs_hba *hba); hba 255 drivers/scsi/ufs/ufshcd.c static void ufshcd_suspend_clkscaling(struct ufs_hba *hba); hba 256 drivers/scsi/ufs/ufshcd.c static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba); hba 257 drivers/scsi/ufs/ufshcd.c static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); hba 259 drivers/scsi/ufs/ufshcd.c static int ufshcd_change_power_mode(struct ufs_hba *hba, hba 261 drivers/scsi/ufs/ufshcd.c static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag) hba 263 drivers/scsi/ufs/ufshcd.c return tag >= 0 && tag < hba->nutrs; hba 266 drivers/scsi/ufs/ufshcd.c static inline int ufshcd_enable_irq(struct ufs_hba *hba) hba 270 drivers/scsi/ufs/ufshcd.c if (!hba->is_irq_enabled) { hba 271 drivers/scsi/ufs/ufshcd.c ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba 272 drivers/scsi/ufs/ufshcd.c hba); hba 274 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: request_irq failed, ret=%d\n", hba 276 drivers/scsi/ufs/ufshcd.c hba->is_irq_enabled = true; hba 282 drivers/scsi/ufs/ufshcd.c static inline void ufshcd_disable_irq(struct ufs_hba *hba) hba 284 drivers/scsi/ufs/ufshcd.c if (hba->is_irq_enabled) { hba 285 drivers/scsi/ufs/ufshcd.c free_irq(hba->irq, hba); hba 286 drivers/scsi/ufs/ufshcd.c hba->is_irq_enabled = false; hba 290 drivers/scsi/ufs/ufshcd.c static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) hba 292 drivers/scsi/ufs/ufshcd.c if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) hba 293 drivers/scsi/ufs/ufshcd.c scsi_unblock_requests(hba->host); hba 296 drivers/scsi/ufs/ufshcd.c static void ufshcd_scsi_block_requests(struct ufs_hba *hba) hba 298 drivers/scsi/ufs/ufshcd.c if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) hba 299 drivers/scsi/ufs/ufshcd.c scsi_block_requests(hba->host); hba 302 drivers/scsi/ufs/ufshcd.c static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, hba 305 drivers/scsi/ufs/ufshcd.c struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; hba 307 drivers/scsi/ufs/ufshcd.c trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb); hba 310 drivers/scsi/ufs/ufshcd.c static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag, hba 313 drivers/scsi/ufs/ufshcd.c struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; hba 315 drivers/scsi/ufs/ufshcd.c trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr); hba 318 drivers/scsi/ufs/ufshcd.c static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, hba 321 drivers/scsi/ufs/ufshcd.c int off = (int)tag - hba->nutrs; hba 322 drivers/scsi/ufs/ufshcd.c struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off]; hba 324 drivers/scsi/ufs/ufshcd.c trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header, hba 328 drivers/scsi/ufs/ufshcd.c static void ufshcd_add_command_trace(struct ufs_hba *hba, hba 334 drivers/scsi/ufs/ufshcd.c struct ufshcd_lrb *lrbp = &hba->lrb[tag]; hba 340 drivers/scsi/ufs/ufshcd.c ufshcd_add_cmd_upiu_trace(hba, tag, str); hba 346 drivers/scsi/ufs/ufshcd.c ufshcd_add_cmd_upiu_trace(hba, tag, str); hba 361 drivers/scsi/ufs/ufshcd.c intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); hba 362 drivers/scsi/ufs/ufshcd.c doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); hba 363 drivers/scsi/ufs/ufshcd.c trace_ufshcd_command(dev_name(hba->dev), str, tag, hba 367 drivers/scsi/ufs/ufshcd.c static void ufshcd_print_clk_freqs(struct ufs_hba *hba) hba 370 drivers/scsi/ufs/ufshcd.c struct list_head *head = &hba->clk_list_head; hba 378 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "clk: %s, rate: %u\n", hba 383 drivers/scsi/ufs/ufshcd.c static void ufshcd_print_err_hist(struct ufs_hba *hba, hba 395 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p, hba 401 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "No record of %s errors\n", err_name); hba 404 drivers/scsi/ufs/ufshcd.c static void ufshcd_print_host_regs(struct ufs_hba *hba) hba 406 drivers/scsi/ufs/ufshcd.c ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); hba 407 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n", hba 408 drivers/scsi/ufs/ufshcd.c hba->ufs_version, hba->capabilities); hba 409 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 411 drivers/scsi/ufs/ufshcd.c (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks); hba 412 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 414 drivers/scsi/ufs/ufshcd.c ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp), hba 415 drivers/scsi/ufs/ufshcd.c hba->ufs_stats.hibern8_exit_cnt); hba 417 drivers/scsi/ufs/ufshcd.c ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err"); hba 418 drivers/scsi/ufs/ufshcd.c ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err"); hba 419 drivers/scsi/ufs/ufshcd.c ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err"); hba 420 drivers/scsi/ufs/ufshcd.c ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err"); hba 421 drivers/scsi/ufs/ufshcd.c ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err"); hba 422 drivers/scsi/ufs/ufshcd.c ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err, hba 424 drivers/scsi/ufs/ufshcd.c ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err"); hba 425 drivers/scsi/ufs/ufshcd.c ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err, hba 427 drivers/scsi/ufs/ufshcd.c ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail"); hba 428 drivers/scsi/ufs/ufshcd.c ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err, hba 430 drivers/scsi/ufs/ufshcd.c ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset"); hba 431 drivers/scsi/ufs/ufshcd.c ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset"); hba 432 drivers/scsi/ufs/ufshcd.c ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort"); hba 434 drivers/scsi/ufs/ufshcd.c ufshcd_print_clk_freqs(hba); hba 436 drivers/scsi/ufs/ufshcd.c if (hba->vops && hba->vops->dbg_register_dump) hba 437 drivers/scsi/ufs/ufshcd.c hba->vops->dbg_register_dump(hba); hba 441 drivers/scsi/ufs/ufshcd.c void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) hba 447 drivers/scsi/ufs/ufshcd.c for_each_set_bit(tag, &bitmap, hba->nutrs) { hba 448 drivers/scsi/ufs/ufshcd.c lrbp = &hba->lrb[tag]; hba 450 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", hba 452 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", hba 454 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 460 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag, hba 464 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag, hba 471 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 482 drivers/scsi/ufs/ufshcd.c static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) hba 486 drivers/scsi/ufs/ufshcd.c for_each_set_bit(tag, &bitmap, hba->nutmrs) { hba 487 drivers/scsi/ufs/ufshcd.c struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag]; hba 489 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag); hba 494 drivers/scsi/ufs/ufshcd.c static void ufshcd_print_host_state(struct ufs_hba *hba) hba 496 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); hba 497 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n", hba 498 drivers/scsi/ufs/ufshcd.c hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks); hba 499 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", hba 500 drivers/scsi/ufs/ufshcd.c hba->saved_err, hba->saved_uic_err); hba 501 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", hba 502 drivers/scsi/ufs/ufshcd.c hba->curr_dev_pwr_mode, hba->uic_link_state); hba 503 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n", hba 504 drivers/scsi/ufs/ufshcd.c hba->pm_op_in_progress, hba->is_sys_suspended); hba 505 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n", hba 506 drivers/scsi/ufs/ufshcd.c hba->auto_bkops_enabled, hba->host->host_self_blocked); hba 507 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state); hba 508 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", hba 509 drivers/scsi/ufs/ufshcd.c hba->eh_flags, hba->req_abort_count); hba 510 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n", hba 511 drivers/scsi/ufs/ufshcd.c hba->capabilities, hba->caps); hba 512 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks, hba 513 drivers/scsi/ufs/ufshcd.c hba->dev_quirks); hba 521 drivers/scsi/ufs/ufshcd.c static void ufshcd_print_pwr_info(struct ufs_hba *hba) hba 533 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", hba 535 drivers/scsi/ufs/ufshcd.c hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, hba 536 drivers/scsi/ufs/ufshcd.c hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, hba 537 drivers/scsi/ufs/ufshcd.c names[hba->pwr_info.pwr_rx], hba 538 drivers/scsi/ufs/ufshcd.c names[hba->pwr_info.pwr_tx], hba 539 drivers/scsi/ufs/ufshcd.c hba->pwr_info.hs_rate); hba 554 drivers/scsi/ufs/ufshcd.c int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, hba 564 drivers/scsi/ufs/ufshcd.c while ((ufshcd_readl(hba, reg) & mask) != val) { hba 570 drivers/scsi/ufs/ufshcd.c if ((ufshcd_readl(hba, reg) & mask) != val) hba 585 drivers/scsi/ufs/ufshcd.c static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) hba 589 drivers/scsi/ufs/ufshcd.c switch (hba->ufs_version) { hba 612 drivers/scsi/ufs/ufshcd.c static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) hba 614 drivers/scsi/ufs/ufshcd.c if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) hba 615 drivers/scsi/ufs/ufshcd.c return ufshcd_vops_get_ufs_hci_version(hba); hba 617 drivers/scsi/ufs/ufshcd.c return ufshcd_readl(hba, REG_UFS_VERSION); hba 627 drivers/scsi/ufs/ufshcd.c static inline bool ufshcd_is_device_present(struct ufs_hba *hba) hba 629 drivers/scsi/ufs/ufshcd.c return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & hba 654 drivers/scsi/ufs/ufshcd.c static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot) hba 663 drivers/scsi/ufs/ufshcd.c tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs); hba 664 drivers/scsi/ufs/ufshcd.c if (tag >= hba->nutmrs) hba 666 drivers/scsi/ufs/ufshcd.c } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use)); hba 674 drivers/scsi/ufs/ufshcd.c static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot) hba 676 drivers/scsi/ufs/ufshcd.c clear_bit_unlock(slot, &hba->tm_slots_in_use); hba 684 drivers/scsi/ufs/ufshcd.c static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) hba 686 drivers/scsi/ufs/ufshcd.c if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) hba 687 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); hba 689 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, ~(1 << pos), hba 698 drivers/scsi/ufs/ufshcd.c static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos) hba 700 drivers/scsi/ufs/ufshcd.c if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) hba 701 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); hba 703 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); hba 711 drivers/scsi/ufs/ufshcd.c static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag) hba 713 drivers/scsi/ufs/ufshcd.c __clear_bit(tag, &hba->outstanding_reqs); hba 734 drivers/scsi/ufs/ufshcd.c static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) hba 736 drivers/scsi/ufs/ufshcd.c return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & hba 747 drivers/scsi/ufs/ufshcd.c static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) hba 749 drivers/scsi/ufs/ufshcd.c return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); hba 809 drivers/scsi/ufs/ufshcd.c ufshcd_reset_intr_aggr(struct ufs_hba *hba) hba 811 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, INT_AGGR_ENABLE | hba 823 drivers/scsi/ufs/ufshcd.c ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) hba 825 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | hba 835 drivers/scsi/ufs/ufshcd.c static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) hba 837 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); hba 846 drivers/scsi/ufs/ufshcd.c static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) hba 848 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, hba 850 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, hba 858 drivers/scsi/ufs/ufshcd.c static inline void ufshcd_hba_start(struct ufs_hba *hba) hba 860 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); hba 869 drivers/scsi/ufs/ufshcd.c static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) hba 871 drivers/scsi/ufs/ufshcd.c return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE) hba 875 drivers/scsi/ufs/ufshcd.c u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) hba 878 drivers/scsi/ufs/ufshcd.c if ((hba->ufs_version == UFSHCI_VERSION_10) || hba 879 drivers/scsi/ufs/ufshcd.c (hba->ufs_version == UFSHCI_VERSION_11)) hba 886 drivers/scsi/ufs/ufshcd.c static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) hba 897 drivers/scsi/ufs/ufshcd.c if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6) hba 903 drivers/scsi/ufs/ufshcd.c static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) hba 907 drivers/scsi/ufs/ufshcd.c struct list_head *head = &hba->clk_list_head; hba 914 drivers/scsi/ufs/ufshcd.c ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); hba 927 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", hba 932 drivers/scsi/ufs/ufshcd.c trace_ufshcd_clk_scaling(dev_name(hba->dev), hba 946 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", hba 951 drivers/scsi/ufs/ufshcd.c trace_ufshcd_clk_scaling(dev_name(hba->dev), hba 958 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, hba 962 drivers/scsi/ufs/ufshcd.c ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); hba 966 drivers/scsi/ufs/ufshcd.c trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), hba 979 drivers/scsi/ufs/ufshcd.c static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, hba 983 drivers/scsi/ufs/ufshcd.c struct list_head *head = &hba->clk_list_head; hba 1005 drivers/scsi/ufs/ufshcd.c static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, hba 1015 drivers/scsi/ufs/ufshcd.c ufshcd_hold(hba, false); hba 1016 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1023 drivers/scsi/ufs/ufshcd.c if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { hba 1028 drivers/scsi/ufs/ufshcd.c tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); hba 1029 drivers/scsi/ufs/ufshcd.c tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); hba 1037 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1049 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1053 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 1059 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1060 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 1073 drivers/scsi/ufs/ufshcd.c static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) hba 1080 drivers/scsi/ufs/ufshcd.c memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info, hba 1083 drivers/scsi/ufs/ufshcd.c memcpy(&new_pwr_info, &hba->pwr_info, hba 1086 drivers/scsi/ufs/ufshcd.c if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN hba 1087 drivers/scsi/ufs/ufshcd.c || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) { hba 1089 drivers/scsi/ufs/ufshcd.c memcpy(&hba->clk_scaling.saved_pwr_info.info, hba 1090 drivers/scsi/ufs/ufshcd.c &hba->pwr_info, hba 1100 drivers/scsi/ufs/ufshcd.c ret = ufshcd_change_power_mode(hba, &new_pwr_info); hba 1103 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", hba 1105 drivers/scsi/ufs/ufshcd.c hba->pwr_info.gear_tx, hba->pwr_info.gear_rx, hba 1111 drivers/scsi/ufs/ufshcd.c static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) hba 1119 drivers/scsi/ufs/ufshcd.c ufshcd_scsi_block_requests(hba); hba 1120 drivers/scsi/ufs/ufshcd.c down_write(&hba->clk_scaling_lock); hba 1121 drivers/scsi/ufs/ufshcd.c if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) { hba 1123 drivers/scsi/ufs/ufshcd.c up_write(&hba->clk_scaling_lock); hba 1124 drivers/scsi/ufs/ufshcd.c ufshcd_scsi_unblock_requests(hba); hba 1130 drivers/scsi/ufs/ufshcd.c static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba) hba 1132 drivers/scsi/ufs/ufshcd.c up_write(&hba->clk_scaling_lock); hba 1133 drivers/scsi/ufs/ufshcd.c ufshcd_scsi_unblock_requests(hba); hba 1145 drivers/scsi/ufs/ufshcd.c static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) hba 1150 drivers/scsi/ufs/ufshcd.c ufshcd_hold(hba, false); hba 1152 drivers/scsi/ufs/ufshcd.c ret = ufshcd_clock_scaling_prepare(hba); hba 1158 drivers/scsi/ufs/ufshcd.c ret = ufshcd_scale_gear(hba, false); hba 1163 drivers/scsi/ufs/ufshcd.c ret = ufshcd_scale_clks(hba, scale_up); hba 1166 drivers/scsi/ufs/ufshcd.c ufshcd_scale_gear(hba, true); hba 1172 drivers/scsi/ufs/ufshcd.c ret = ufshcd_scale_gear(hba, true); hba 1174 drivers/scsi/ufs/ufshcd.c ufshcd_scale_clks(hba, false); hba 1179 drivers/scsi/ufs/ufshcd.c ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); hba 1182 drivers/scsi/ufs/ufshcd.c ufshcd_clock_scaling_unprepare(hba); hba 1183 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 1189 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = container_of(work, struct ufs_hba, hba 1193 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, irq_flags); hba 1194 drivers/scsi/ufs/ufshcd.c if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) { hba 1195 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, irq_flags); hba 1198 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.is_suspended = true; hba 1199 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, irq_flags); hba 1201 drivers/scsi/ufs/ufshcd.c __ufshcd_suspend_clkscaling(hba); hba 1206 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = container_of(work, struct ufs_hba, hba 1210 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, irq_flags); hba 1211 drivers/scsi/ufs/ufshcd.c if (!hba->clk_scaling.is_suspended) { hba 1212 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, irq_flags); hba 1215 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.is_suspended = false; hba 1216 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, irq_flags); hba 1218 drivers/scsi/ufs/ufshcd.c devfreq_resume_device(hba->devfreq); hba 1225 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 1228 drivers/scsi/ufs/ufshcd.c struct list_head *clk_list = &hba->clk_list_head; hba 1232 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_clkscaling_supported(hba)) hba 1235 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, irq_flags); hba 1236 drivers/scsi/ufs/ufshcd.c if (ufshcd_eh_in_progress(hba)) { hba 1237 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, irq_flags); hba 1241 drivers/scsi/ufs/ufshcd.c if (!hba->clk_scaling.active_reqs) hba 1245 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, irq_flags); hba 1249 drivers/scsi/ufs/ufshcd.c clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list); hba 1251 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) { hba 1252 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, irq_flags); hba 1256 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, irq_flags); hba 1259 drivers/scsi/ufs/ufshcd.c ret = ufshcd_devfreq_scale(hba, scale_up); hba 1261 drivers/scsi/ufs/ufshcd.c trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), hba 1267 drivers/scsi/ufs/ufshcd.c queue_work(hba->clk_scaling.workq, hba 1268 drivers/scsi/ufs/ufshcd.c &hba->clk_scaling.suspend_work); hba 1277 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 1278 drivers/scsi/ufs/ufshcd.c struct ufs_clk_scaling *scaling = &hba->clk_scaling; hba 1281 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_clkscaling_supported(hba)) hba 1286 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1301 drivers/scsi/ufs/ufshcd.c if (hba->outstanding_reqs) { hba 1308 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1318 drivers/scsi/ufs/ufshcd.c static int ufshcd_devfreq_init(struct ufs_hba *hba) hba 1320 drivers/scsi/ufs/ufshcd.c struct list_head *clk_list = &hba->clk_list_head; hba 1330 drivers/scsi/ufs/ufshcd.c dev_pm_opp_add(hba->dev, clki->min_freq, 0); hba 1331 drivers/scsi/ufs/ufshcd.c dev_pm_opp_add(hba->dev, clki->max_freq, 0); hba 1333 drivers/scsi/ufs/ufshcd.c devfreq = devfreq_add_device(hba->dev, hba 1339 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); hba 1341 drivers/scsi/ufs/ufshcd.c dev_pm_opp_remove(hba->dev, clki->min_freq); hba 1342 drivers/scsi/ufs/ufshcd.c dev_pm_opp_remove(hba->dev, clki->max_freq); hba 1346 drivers/scsi/ufs/ufshcd.c hba->devfreq = devfreq; hba 1351 drivers/scsi/ufs/ufshcd.c static void ufshcd_devfreq_remove(struct ufs_hba *hba) hba 1353 drivers/scsi/ufs/ufshcd.c struct list_head *clk_list = &hba->clk_list_head; hba 1356 drivers/scsi/ufs/ufshcd.c if (!hba->devfreq) hba 1359 drivers/scsi/ufs/ufshcd.c devfreq_remove_device(hba->devfreq); hba 1360 drivers/scsi/ufs/ufshcd.c hba->devfreq = NULL; hba 1363 drivers/scsi/ufs/ufshcd.c dev_pm_opp_remove(hba->dev, clki->min_freq); hba 1364 drivers/scsi/ufs/ufshcd.c dev_pm_opp_remove(hba->dev, clki->max_freq); hba 1367 drivers/scsi/ufs/ufshcd.c static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba) hba 1371 drivers/scsi/ufs/ufshcd.c devfreq_suspend_device(hba->devfreq); hba 1372 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1373 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.window_start_t = 0; hba 1374 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1377 drivers/scsi/ufs/ufshcd.c static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) hba 1382 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_clkscaling_supported(hba)) hba 1385 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1386 drivers/scsi/ufs/ufshcd.c if (!hba->clk_scaling.is_suspended) { hba 1388 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.is_suspended = true; hba 1390 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1393 drivers/scsi/ufs/ufshcd.c __ufshcd_suspend_clkscaling(hba); hba 1396 drivers/scsi/ufs/ufshcd.c static void ufshcd_resume_clkscaling(struct ufs_hba *hba) hba 1401 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_clkscaling_supported(hba)) hba 1404 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1405 drivers/scsi/ufs/ufshcd.c if (hba->clk_scaling.is_suspended) { hba 1407 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.is_suspended = false; hba 1409 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1412 drivers/scsi/ufs/ufshcd.c devfreq_resume_device(hba->devfreq); hba 1418 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 1420 drivers/scsi/ufs/ufshcd.c return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed); hba 1426 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 1434 drivers/scsi/ufs/ufshcd.c if (value == hba->clk_scaling.is_allowed) hba 1437 drivers/scsi/ufs/ufshcd.c pm_runtime_get_sync(hba->dev); hba 1438 drivers/scsi/ufs/ufshcd.c ufshcd_hold(hba, false); hba 1440 drivers/scsi/ufs/ufshcd.c cancel_work_sync(&hba->clk_scaling.suspend_work); hba 1441 drivers/scsi/ufs/ufshcd.c cancel_work_sync(&hba->clk_scaling.resume_work); hba 1443 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.is_allowed = value; hba 1446 drivers/scsi/ufs/ufshcd.c ufshcd_resume_clkscaling(hba); hba 1448 drivers/scsi/ufs/ufshcd.c ufshcd_suspend_clkscaling(hba); hba 1449 drivers/scsi/ufs/ufshcd.c err = ufshcd_devfreq_scale(hba, true); hba 1451 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: failed to scale clocks up %d\n", hba 1455 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 1456 drivers/scsi/ufs/ufshcd.c pm_runtime_put_sync(hba->dev); hba 1461 drivers/scsi/ufs/ufshcd.c static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba) hba 1463 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; hba 1464 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; hba 1465 drivers/scsi/ufs/ufshcd.c sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); hba 1466 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; hba 1467 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.enable_attr.attr.mode = 0644; hba 1468 drivers/scsi/ufs/ufshcd.c if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr)) hba 1469 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); hba 1476 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = container_of(work, struct ufs_hba, hba 1479 drivers/scsi/ufs/ufshcd.c cancel_delayed_work_sync(&hba->clk_gating.gate_work); hba 1481 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1482 drivers/scsi/ufs/ufshcd.c if (hba->clk_gating.state == CLKS_ON) { hba 1483 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1487 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1488 drivers/scsi/ufs/ufshcd.c ufshcd_setup_clocks(hba, true); hba 1491 drivers/scsi/ufs/ufshcd.c if (ufshcd_can_hibern8_during_gating(hba)) { hba 1493 drivers/scsi/ufs/ufshcd.c hba->clk_gating.is_suspended = true; hba 1494 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_link_hibern8(hba)) { hba 1495 drivers/scsi/ufs/ufshcd.c ret = ufshcd_uic_hibern8_exit(hba); hba 1497 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: hibern8 exit failed %d\n", hba 1500 drivers/scsi/ufs/ufshcd.c ufshcd_set_link_active(hba); hba 1502 drivers/scsi/ufs/ufshcd.c hba->clk_gating.is_suspended = false; hba 1505 drivers/scsi/ufs/ufshcd.c ufshcd_scsi_unblock_requests(hba); hba 1514 drivers/scsi/ufs/ufshcd.c int ufshcd_hold(struct ufs_hba *hba, bool async) hba 1519 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_clkgating_allowed(hba)) hba 1521 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1522 drivers/scsi/ufs/ufshcd.c hba->clk_gating.active_reqs++; hba 1524 drivers/scsi/ufs/ufshcd.c if (ufshcd_eh_in_progress(hba)) { hba 1525 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1530 drivers/scsi/ufs/ufshcd.c switch (hba->clk_gating.state) { hba 1540 drivers/scsi/ufs/ufshcd.c if (ufshcd_can_hibern8_during_gating(hba) && hba 1541 drivers/scsi/ufs/ufshcd.c ufshcd_is_link_hibern8(hba)) { hba 1544 drivers/scsi/ufs/ufshcd.c hba->clk_gating.active_reqs--; hba 1547 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1548 drivers/scsi/ufs/ufshcd.c flush_work(&hba->clk_gating.ungate_work); hba 1549 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1554 drivers/scsi/ufs/ufshcd.c if (cancel_delayed_work(&hba->clk_gating.gate_work)) { hba 1555 drivers/scsi/ufs/ufshcd.c hba->clk_gating.state = CLKS_ON; hba 1556 drivers/scsi/ufs/ufshcd.c trace_ufshcd_clk_gating(dev_name(hba->dev), hba 1557 drivers/scsi/ufs/ufshcd.c hba->clk_gating.state); hba 1567 drivers/scsi/ufs/ufshcd.c ufshcd_scsi_block_requests(hba); hba 1568 drivers/scsi/ufs/ufshcd.c hba->clk_gating.state = REQ_CLKS_ON; hba 1569 drivers/scsi/ufs/ufshcd.c trace_ufshcd_clk_gating(dev_name(hba->dev), hba 1570 drivers/scsi/ufs/ufshcd.c hba->clk_gating.state); hba 1571 drivers/scsi/ufs/ufshcd.c queue_work(hba->clk_gating.clk_gating_workq, hba 1572 drivers/scsi/ufs/ufshcd.c &hba->clk_gating.ungate_work); hba 1581 drivers/scsi/ufs/ufshcd.c hba->clk_gating.active_reqs--; hba 1585 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1586 drivers/scsi/ufs/ufshcd.c flush_work(&hba->clk_gating.ungate_work); hba 1588 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1591 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", hba 1592 drivers/scsi/ufs/ufshcd.c __func__, hba->clk_gating.state); hba 1595 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1603 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = container_of(work, struct ufs_hba, hba 1607 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1614 drivers/scsi/ufs/ufshcd.c if (hba->clk_gating.is_suspended || hba 1615 drivers/scsi/ufs/ufshcd.c (hba->clk_gating.state == REQ_CLKS_ON)) { hba 1616 drivers/scsi/ufs/ufshcd.c hba->clk_gating.state = CLKS_ON; hba 1617 drivers/scsi/ufs/ufshcd.c trace_ufshcd_clk_gating(dev_name(hba->dev), hba 1618 drivers/scsi/ufs/ufshcd.c hba->clk_gating.state); hba 1622 drivers/scsi/ufs/ufshcd.c if (hba->clk_gating.active_reqs hba 1623 drivers/scsi/ufs/ufshcd.c || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL hba 1624 drivers/scsi/ufs/ufshcd.c || hba->lrb_in_use || hba->outstanding_tasks hba 1625 drivers/scsi/ufs/ufshcd.c || hba->active_uic_cmd || hba->uic_async_done) hba 1628 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1631 drivers/scsi/ufs/ufshcd.c if (ufshcd_can_hibern8_during_gating(hba)) { hba 1632 drivers/scsi/ufs/ufshcd.c if (ufshcd_uic_hibern8_enter(hba)) { hba 1633 drivers/scsi/ufs/ufshcd.c hba->clk_gating.state = CLKS_ON; hba 1634 drivers/scsi/ufs/ufshcd.c trace_ufshcd_clk_gating(dev_name(hba->dev), hba 1635 drivers/scsi/ufs/ufshcd.c hba->clk_gating.state); hba 1638 drivers/scsi/ufs/ufshcd.c ufshcd_set_link_hibern8(hba); hba 1641 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_link_active(hba)) hba 1642 drivers/scsi/ufs/ufshcd.c ufshcd_setup_clocks(hba, false); hba 1645 drivers/scsi/ufs/ufshcd.c __ufshcd_setup_clocks(hba, false, true); hba 1656 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1657 drivers/scsi/ufs/ufshcd.c if (hba->clk_gating.state == REQ_CLKS_OFF) { hba 1658 drivers/scsi/ufs/ufshcd.c hba->clk_gating.state = CLKS_OFF; hba 1659 drivers/scsi/ufs/ufshcd.c trace_ufshcd_clk_gating(dev_name(hba->dev), hba 1660 drivers/scsi/ufs/ufshcd.c hba->clk_gating.state); hba 1663 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1669 drivers/scsi/ufs/ufshcd.c static void __ufshcd_release(struct ufs_hba *hba) hba 1671 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_clkgating_allowed(hba)) hba 1674 drivers/scsi/ufs/ufshcd.c hba->clk_gating.active_reqs--; hba 1676 drivers/scsi/ufs/ufshcd.c if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended hba 1677 drivers/scsi/ufs/ufshcd.c || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL hba 1678 drivers/scsi/ufs/ufshcd.c || hba->lrb_in_use || hba->outstanding_tasks hba 1679 drivers/scsi/ufs/ufshcd.c || hba->active_uic_cmd || hba->uic_async_done hba 1680 drivers/scsi/ufs/ufshcd.c || ufshcd_eh_in_progress(hba)) hba 1683 drivers/scsi/ufs/ufshcd.c hba->clk_gating.state = REQ_CLKS_OFF; hba 1684 drivers/scsi/ufs/ufshcd.c trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); hba 1685 drivers/scsi/ufs/ufshcd.c queue_delayed_work(hba->clk_gating.clk_gating_workq, hba 1686 drivers/scsi/ufs/ufshcd.c &hba->clk_gating.gate_work, hba 1687 drivers/scsi/ufs/ufshcd.c msecs_to_jiffies(hba->clk_gating.delay_ms)); hba 1690 drivers/scsi/ufs/ufshcd.c void ufshcd_release(struct ufs_hba *hba) hba 1694 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1695 drivers/scsi/ufs/ufshcd.c __ufshcd_release(hba); hba 1696 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1703 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 1705 drivers/scsi/ufs/ufshcd.c return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms); hba 1711 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 1717 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1718 drivers/scsi/ufs/ufshcd.c hba->clk_gating.delay_ms = value; hba 1719 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1726 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 1728 drivers/scsi/ufs/ufshcd.c return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled); hba 1734 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = dev_get_drvdata(dev); hba 1742 drivers/scsi/ufs/ufshcd.c if (value == hba->clk_gating.is_enabled) hba 1746 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 1748 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 1749 drivers/scsi/ufs/ufshcd.c hba->clk_gating.active_reqs++; hba 1750 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 1753 drivers/scsi/ufs/ufshcd.c hba->clk_gating.is_enabled = value; hba 1758 drivers/scsi/ufs/ufshcd.c static void ufshcd_init_clk_scaling(struct ufs_hba *hba) hba 1762 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_clkscaling_supported(hba)) hba 1765 drivers/scsi/ufs/ufshcd.c INIT_WORK(&hba->clk_scaling.suspend_work, hba 1767 drivers/scsi/ufs/ufshcd.c INIT_WORK(&hba->clk_scaling.resume_work, hba 1771 drivers/scsi/ufs/ufshcd.c hba->host->host_no); hba 1772 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); hba 1774 drivers/scsi/ufs/ufshcd.c ufshcd_clkscaling_init_sysfs(hba); hba 1777 drivers/scsi/ufs/ufshcd.c static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) hba 1779 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_clkscaling_supported(hba)) hba 1782 drivers/scsi/ufs/ufshcd.c destroy_workqueue(hba->clk_scaling.workq); hba 1783 drivers/scsi/ufs/ufshcd.c ufshcd_devfreq_remove(hba); hba 1786 drivers/scsi/ufs/ufshcd.c static void ufshcd_init_clk_gating(struct ufs_hba *hba) hba 1790 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_clkgating_allowed(hba)) hba 1793 drivers/scsi/ufs/ufshcd.c hba->clk_gating.delay_ms = 150; hba 1794 drivers/scsi/ufs/ufshcd.c INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); hba 1795 drivers/scsi/ufs/ufshcd.c INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); hba 1798 drivers/scsi/ufs/ufshcd.c hba->host->host_no); hba 1799 drivers/scsi/ufs/ufshcd.c hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name, hba 1802 drivers/scsi/ufs/ufshcd.c hba->clk_gating.is_enabled = true; hba 1804 drivers/scsi/ufs/ufshcd.c hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; hba 1805 drivers/scsi/ufs/ufshcd.c hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; hba 1806 drivers/scsi/ufs/ufshcd.c sysfs_attr_init(&hba->clk_gating.delay_attr.attr); hba 1807 drivers/scsi/ufs/ufshcd.c hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; hba 1808 drivers/scsi/ufs/ufshcd.c hba->clk_gating.delay_attr.attr.mode = 0644; hba 1809 drivers/scsi/ufs/ufshcd.c if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) hba 1810 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); hba 1812 drivers/scsi/ufs/ufshcd.c hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show; hba 1813 drivers/scsi/ufs/ufshcd.c hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store; hba 1814 drivers/scsi/ufs/ufshcd.c sysfs_attr_init(&hba->clk_gating.enable_attr.attr); hba 1815 drivers/scsi/ufs/ufshcd.c hba->clk_gating.enable_attr.attr.name = "clkgate_enable"; hba 1816 drivers/scsi/ufs/ufshcd.c hba->clk_gating.enable_attr.attr.mode = 0644; hba 1817 drivers/scsi/ufs/ufshcd.c if (device_create_file(hba->dev, &hba->clk_gating.enable_attr)) hba 1818 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n"); hba 1821 drivers/scsi/ufs/ufshcd.c static void ufshcd_exit_clk_gating(struct ufs_hba *hba) hba 1823 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_clkgating_allowed(hba)) hba 1825 drivers/scsi/ufs/ufshcd.c device_remove_file(hba->dev, &hba->clk_gating.delay_attr); hba 1826 drivers/scsi/ufs/ufshcd.c device_remove_file(hba->dev, &hba->clk_gating.enable_attr); hba 1827 drivers/scsi/ufs/ufshcd.c cancel_work_sync(&hba->clk_gating.ungate_work); hba 1828 drivers/scsi/ufs/ufshcd.c cancel_delayed_work_sync(&hba->clk_gating.gate_work); hba 1829 drivers/scsi/ufs/ufshcd.c destroy_workqueue(hba->clk_gating.clk_gating_workq); hba 1833 drivers/scsi/ufs/ufshcd.c static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) hba 1837 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_clkscaling_supported(hba)) hba 1840 drivers/scsi/ufs/ufshcd.c if (!hba->clk_scaling.active_reqs++) hba 1843 drivers/scsi/ufs/ufshcd.c if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress) hba 1847 drivers/scsi/ufs/ufshcd.c queue_work(hba->clk_scaling.workq, hba 1848 drivers/scsi/ufs/ufshcd.c &hba->clk_scaling.resume_work); hba 1850 drivers/scsi/ufs/ufshcd.c if (!hba->clk_scaling.window_start_t) { hba 1851 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.window_start_t = jiffies; hba 1852 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.tot_busy_t = 0; hba 1853 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.is_busy_started = false; hba 1856 drivers/scsi/ufs/ufshcd.c if (!hba->clk_scaling.is_busy_started) { hba 1857 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.busy_start_t = ktime_get(); hba 1858 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.is_busy_started = true; hba 1862 drivers/scsi/ufs/ufshcd.c static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) hba 1864 drivers/scsi/ufs/ufshcd.c struct ufs_clk_scaling *scaling = &hba->clk_scaling; hba 1866 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_clkscaling_supported(hba)) hba 1869 drivers/scsi/ufs/ufshcd.c if (!hba->outstanding_reqs && scaling->is_busy_started) { hba 1882 drivers/scsi/ufs/ufshcd.c void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) hba 1884 drivers/scsi/ufs/ufshcd.c hba->lrb[task_tag].issue_time_stamp = ktime_get(); hba 1885 drivers/scsi/ufs/ufshcd.c hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0); hba 1886 drivers/scsi/ufs/ufshcd.c ufshcd_clk_scaling_start_busy(hba); hba 1887 drivers/scsi/ufs/ufshcd.c __set_bit(task_tag, &hba->outstanding_reqs); hba 1888 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); hba 1891 drivers/scsi/ufs/ufshcd.c ufshcd_add_command_trace(hba, task_tag, "send"); hba 1920 drivers/scsi/ufs/ufshcd.c int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) hba 1922 drivers/scsi/ufs/ufshcd.c struct ufs_query_res *query_res = &hba->dev_cmd.query.response; hba 1927 drivers/scsi/ufs/ufshcd.c if (hba->dev_cmd.query.descriptor && hba 1938 drivers/scsi/ufs/ufshcd.c hba->dev_cmd.query.request.upiu_req.length); hba 1940 drivers/scsi/ufs/ufshcd.c memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); hba 1942 drivers/scsi/ufs/ufshcd.c dev_warn(hba->dev, hba 1956 drivers/scsi/ufs/ufshcd.c static inline void ufshcd_hba_capabilities(struct ufs_hba *hba) hba 1958 drivers/scsi/ufs/ufshcd.c hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); hba 1961 drivers/scsi/ufs/ufshcd.c hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; hba 1962 drivers/scsi/ufs/ufshcd.c hba->nutmrs = hba 1963 drivers/scsi/ufs/ufshcd.c ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; hba 1972 drivers/scsi/ufs/ufshcd.c static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) hba 1974 drivers/scsi/ufs/ufshcd.c if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) hba 1987 drivers/scsi/ufs/ufshcd.c static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) hba 1989 drivers/scsi/ufs/ufshcd.c return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; hba 2000 drivers/scsi/ufs/ufshcd.c ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) hba 2002 drivers/scsi/ufs/ufshcd.c WARN_ON(hba->active_uic_cmd); hba 2004 drivers/scsi/ufs/ufshcd.c hba->active_uic_cmd = uic_cmd; hba 2007 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); hba 2008 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); hba 2009 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); hba 2012 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, hba 2025 drivers/scsi/ufs/ufshcd.c ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) hba 2036 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 2037 drivers/scsi/ufs/ufshcd.c hba->active_uic_cmd = NULL; hba 2038 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 2054 drivers/scsi/ufs/ufshcd.c __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, hba 2057 drivers/scsi/ufs/ufshcd.c if (!ufshcd_ready_for_uic_cmd(hba)) { hba 2058 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 2066 drivers/scsi/ufs/ufshcd.c ufshcd_dispatch_uic_cmd(hba, uic_cmd); hba 2078 drivers/scsi/ufs/ufshcd.c int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) hba 2083 drivers/scsi/ufs/ufshcd.c ufshcd_hold(hba, false); hba 2084 drivers/scsi/ufs/ufshcd.c mutex_lock(&hba->uic_cmd_mutex); hba 2085 drivers/scsi/ufs/ufshcd.c ufshcd_add_delay_before_dme_cmd(hba); hba 2087 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 2088 drivers/scsi/ufs/ufshcd.c ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); hba 2089 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 2091 drivers/scsi/ufs/ufshcd.c ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); hba 2093 drivers/scsi/ufs/ufshcd.c mutex_unlock(&hba->uic_cmd_mutex); hba 2095 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 2106 drivers/scsi/ufs/ufshcd.c static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) hba 2120 drivers/scsi/ufs/ufshcd.c if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) hba 2151 drivers/scsi/ufs/ufshcd.c static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) hba 2153 drivers/scsi/ufs/ufshcd.c u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); hba 2155 drivers/scsi/ufs/ufshcd.c if (hba->ufs_version == UFSHCI_VERSION_10) { hba 2163 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); hba 2171 drivers/scsi/ufs/ufshcd.c static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) hba 2173 drivers/scsi/ufs/ufshcd.c u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); hba 2175 drivers/scsi/ufs/ufshcd.c if (hba->ufs_version == UFSHCI_VERSION_10) { hba 2185 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); hba 2274 drivers/scsi/ufs/ufshcd.c static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, hba 2278 drivers/scsi/ufs/ufshcd.c struct ufs_query *query = &hba->dev_cmd.query; hba 2329 drivers/scsi/ufs/ufshcd.c static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) hba 2334 drivers/scsi/ufs/ufshcd.c if ((hba->ufs_version == UFSHCI_VERSION_10) || hba 2335 drivers/scsi/ufs/ufshcd.c (hba->ufs_version == UFSHCI_VERSION_11)) hba 2341 drivers/scsi/ufs/ufshcd.c if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) hba 2342 drivers/scsi/ufs/ufshcd.c ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags); hba 2343 drivers/scsi/ufs/ufshcd.c else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) hba 2357 drivers/scsi/ufs/ufshcd.c static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) hba 2362 drivers/scsi/ufs/ufshcd.c if ((hba->ufs_version == UFSHCI_VERSION_10) || hba 2363 drivers/scsi/ufs/ufshcd.c (hba->ufs_version == UFSHCI_VERSION_11)) hba 2400 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba; hba 2405 drivers/scsi/ufs/ufshcd.c hba = shost_priv(host); hba 2408 drivers/scsi/ufs/ufshcd.c if (!ufshcd_valid_tag(hba, tag)) { hba 2409 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 2415 drivers/scsi/ufs/ufshcd.c if (!down_read_trylock(&hba->clk_scaling_lock)) hba 2418 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 2419 drivers/scsi/ufs/ufshcd.c switch (hba->ufshcd_state) { hba 2431 drivers/scsi/ufs/ufshcd.c dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n", hba 2432 drivers/scsi/ufs/ufshcd.c __func__, hba->ufshcd_state); hba 2439 drivers/scsi/ufs/ufshcd.c if (ufshcd_eh_in_progress(hba)) { hba 2444 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 2446 drivers/scsi/ufs/ufshcd.c hba->req_abort_count = 0; hba 2449 drivers/scsi/ufs/ufshcd.c if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { hba 2460 drivers/scsi/ufs/ufshcd.c err = ufshcd_hold(hba, true); hba 2463 drivers/scsi/ufs/ufshcd.c clear_bit_unlock(tag, &hba->lrb_in_use); hba 2466 drivers/scsi/ufs/ufshcd.c WARN_ON(hba->clk_gating.state != CLKS_ON); hba 2468 drivers/scsi/ufs/ufshcd.c lrbp = &hba->lrb[tag]; hba 2476 drivers/scsi/ufs/ufshcd.c lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; hba 2479 drivers/scsi/ufs/ufshcd.c ufshcd_comp_scsi_upiu(hba, lrbp); hba 2481 drivers/scsi/ufs/ufshcd.c err = ufshcd_map_sg(hba, lrbp); hba 2483 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 2485 drivers/scsi/ufs/ufshcd.c clear_bit_unlock(tag, &hba->lrb_in_use); hba 2492 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 2493 drivers/scsi/ufs/ufshcd.c ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false)); hba 2494 drivers/scsi/ufs/ufshcd.c ufshcd_send_command(hba, tag); hba 2496 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 2498 drivers/scsi/ufs/ufshcd.c up_read(&hba->clk_scaling_lock); hba 2502 drivers/scsi/ufs/ufshcd.c static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, hba 2511 drivers/scsi/ufs/ufshcd.c hba->dev_cmd.type = cmd_type; hba 2513 drivers/scsi/ufs/ufshcd.c return ufshcd_comp_devman_upiu(hba, lrbp); hba 2517 drivers/scsi/ufs/ufshcd.c ufshcd_clear_cmd(struct ufs_hba *hba, int tag) hba 2524 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 2525 drivers/scsi/ufs/ufshcd.c ufshcd_utrl_clear(hba, tag); hba 2526 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 2532 drivers/scsi/ufs/ufshcd.c err = ufshcd_wait_for_register(hba, hba 2540 drivers/scsi/ufs/ufshcd.c ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) hba 2542 drivers/scsi/ufs/ufshcd.c struct ufs_query_res *query_res = &hba->dev_cmd.query.response; hba 2556 drivers/scsi/ufs/ufshcd.c ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) hba 2561 drivers/scsi/ufs/ufshcd.c hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); hba 2566 drivers/scsi/ufs/ufshcd.c if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { hba 2568 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: unexpected response %x\n", hba 2573 drivers/scsi/ufs/ufshcd.c err = ufshcd_check_query_response(hba, lrbp); hba 2575 drivers/scsi/ufs/ufshcd.c err = ufshcd_copy_query_response(hba, lrbp); hba 2580 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", hba 2585 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", hba 2593 drivers/scsi/ufs/ufshcd.c static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, hba 2600 drivers/scsi/ufs/ufshcd.c time_left = wait_for_completion_timeout(hba->dev_cmd.complete, hba 2605 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 2606 drivers/scsi/ufs/ufshcd.c hba->dev_cmd.complete = NULL; hba 2610 drivers/scsi/ufs/ufshcd.c err = ufshcd_dev_cmd_completion(hba, lrbp); hba 2612 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 2616 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", hba 2618 drivers/scsi/ufs/ufshcd.c if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) hba 2626 drivers/scsi/ufs/ufshcd.c ufshcd_outstanding_req_clear(hba, lrbp->task_tag); hba 2643 drivers/scsi/ufs/ufshcd.c static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out) hba 2653 drivers/scsi/ufs/ufshcd.c tmp = ~hba->lrb_in_use; hba 2654 drivers/scsi/ufs/ufshcd.c tag = find_last_bit(&tmp, hba->nutrs); hba 2655 drivers/scsi/ufs/ufshcd.c if (tag >= hba->nutrs) hba 2657 drivers/scsi/ufs/ufshcd.c } while (test_and_set_bit_lock(tag, &hba->lrb_in_use)); hba 2665 drivers/scsi/ufs/ufshcd.c static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) hba 2667 drivers/scsi/ufs/ufshcd.c clear_bit_unlock(tag, &hba->lrb_in_use); hba 2679 drivers/scsi/ufs/ufshcd.c static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, hba 2688 drivers/scsi/ufs/ufshcd.c down_read(&hba->clk_scaling_lock); hba 2695 drivers/scsi/ufs/ufshcd.c wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); hba 2698 drivers/scsi/ufs/ufshcd.c lrbp = &hba->lrb[tag]; hba 2700 drivers/scsi/ufs/ufshcd.c err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); hba 2704 drivers/scsi/ufs/ufshcd.c hba->dev_cmd.complete = &wait; hba 2706 drivers/scsi/ufs/ufshcd.c ufshcd_add_query_upiu_trace(hba, tag, "query_send"); hba 2709 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 2710 drivers/scsi/ufs/ufshcd.c ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false)); hba 2711 drivers/scsi/ufs/ufshcd.c ufshcd_send_command(hba, tag); hba 2712 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 2714 drivers/scsi/ufs/ufshcd.c err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); hba 2716 drivers/scsi/ufs/ufshcd.c ufshcd_add_query_upiu_trace(hba, tag, hba 2720 drivers/scsi/ufs/ufshcd.c ufshcd_put_dev_cmd_tag(hba, tag); hba 2721 drivers/scsi/ufs/ufshcd.c wake_up(&hba->dev_cmd.tag_wq); hba 2722 drivers/scsi/ufs/ufshcd.c up_read(&hba->clk_scaling_lock); hba 2736 drivers/scsi/ufs/ufshcd.c static inline void ufshcd_init_query(struct ufs_hba *hba, hba 2740 drivers/scsi/ufs/ufshcd.c *request = &hba->dev_cmd.query.request; hba 2741 drivers/scsi/ufs/ufshcd.c *response = &hba->dev_cmd.query.response; hba 2750 drivers/scsi/ufs/ufshcd.c static int ufshcd_query_flag_retry(struct ufs_hba *hba, hba 2757 drivers/scsi/ufs/ufshcd.c ret = ufshcd_query_flag(hba, opcode, idn, flag_res); hba 2759 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, hba 2767 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 2782 drivers/scsi/ufs/ufshcd.c int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, hba 2790 drivers/scsi/ufs/ufshcd.c BUG_ON(!hba); hba 2792 drivers/scsi/ufs/ufshcd.c ufshcd_hold(hba, false); hba 2793 drivers/scsi/ufs/ufshcd.c mutex_lock(&hba->dev_cmd.lock); hba 2794 drivers/scsi/ufs/ufshcd.c ufshcd_init_query(hba, &request, &response, opcode, idn, index, hba 2807 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Invalid argument for read request\n", hba 2814 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 2821 drivers/scsi/ufs/ufshcd.c err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); hba 2824 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 2835 drivers/scsi/ufs/ufshcd.c mutex_unlock(&hba->dev_cmd.lock); hba 2836 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 2851 drivers/scsi/ufs/ufshcd.c int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, hba 2858 drivers/scsi/ufs/ufshcd.c BUG_ON(!hba); hba 2860 drivers/scsi/ufs/ufshcd.c ufshcd_hold(hba, false); hba 2862 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", hba 2868 drivers/scsi/ufs/ufshcd.c mutex_lock(&hba->dev_cmd.lock); hba 2869 drivers/scsi/ufs/ufshcd.c ufshcd_init_query(hba, &request, &response, opcode, idn, index, hba 2881 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", hba 2887 drivers/scsi/ufs/ufshcd.c err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); hba 2890 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", hba 2898 drivers/scsi/ufs/ufshcd.c mutex_unlock(&hba->dev_cmd.lock); hba 2900 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 2917 drivers/scsi/ufs/ufshcd.c static int ufshcd_query_attr_retry(struct ufs_hba *hba, hba 2925 drivers/scsi/ufs/ufshcd.c ret = ufshcd_query_attr(hba, opcode, idn, index, hba 2928 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", hba 2935 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 2941 drivers/scsi/ufs/ufshcd.c static int __ufshcd_query_descriptor(struct ufs_hba *hba, hba 2949 drivers/scsi/ufs/ufshcd.c BUG_ON(!hba); hba 2951 drivers/scsi/ufs/ufshcd.c ufshcd_hold(hba, false); hba 2953 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", hba 2960 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", hba 2966 drivers/scsi/ufs/ufshcd.c mutex_lock(&hba->dev_cmd.lock); hba 2967 drivers/scsi/ufs/ufshcd.c ufshcd_init_query(hba, &request, &response, opcode, idn, index, hba 2969 drivers/scsi/ufs/ufshcd.c hba->dev_cmd.query.descriptor = desc_buf; hba 2980 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 2987 drivers/scsi/ufs/ufshcd.c err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); hba 2990 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", hba 2998 drivers/scsi/ufs/ufshcd.c hba->dev_cmd.query.descriptor = NULL; hba 2999 drivers/scsi/ufs/ufshcd.c mutex_unlock(&hba->dev_cmd.lock); hba 3001 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 3019 drivers/scsi/ufs/ufshcd.c int ufshcd_query_descriptor_retry(struct ufs_hba *hba, hba 3029 drivers/scsi/ufs/ufshcd.c err = __ufshcd_query_descriptor(hba, opcode, idn, index, hba 3047 drivers/scsi/ufs/ufshcd.c static int ufshcd_read_desc_length(struct ufs_hba *hba, hba 3059 drivers/scsi/ufs/ufshcd.c ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, hba 3064 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Failed to get descriptor header id %d", hba 3068 drivers/scsi/ufs/ufshcd.c dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch", hba 3087 drivers/scsi/ufs/ufshcd.c int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, hba 3092 drivers/scsi/ufs/ufshcd.c *desc_len = hba->desc_size.dev_desc; hba 3095 drivers/scsi/ufs/ufshcd.c *desc_len = hba->desc_size.pwr_desc; hba 3098 drivers/scsi/ufs/ufshcd.c *desc_len = hba->desc_size.geom_desc; hba 3101 drivers/scsi/ufs/ufshcd.c *desc_len = hba->desc_size.conf_desc; hba 3104 drivers/scsi/ufs/ufshcd.c *desc_len = hba->desc_size.unit_desc; hba 3107 drivers/scsi/ufs/ufshcd.c *desc_len = hba->desc_size.interc_desc; hba 3113 drivers/scsi/ufs/ufshcd.c *desc_len = hba->desc_size.hlth_desc; hba 3138 drivers/scsi/ufs/ufshcd.c int ufshcd_read_desc_param(struct ufs_hba *hba, hba 3157 drivers/scsi/ufs/ufshcd.c ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); hba 3161 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Failed to get full descriptor length", hba 3177 drivers/scsi/ufs/ufshcd.c ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, hba 3182 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d", hba 3189 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header", hba 3207 drivers/scsi/ufs/ufshcd.c static inline int ufshcd_read_desc(struct ufs_hba *hba, hba 3213 drivers/scsi/ufs/ufshcd.c return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); hba 3216 drivers/scsi/ufs/ufshcd.c static inline int ufshcd_read_power_desc(struct ufs_hba *hba, hba 3220 drivers/scsi/ufs/ufshcd.c return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); hba 3223 drivers/scsi/ufs/ufshcd.c static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) hba 3225 drivers/scsi/ufs/ufshcd.c return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); hba 3261 drivers/scsi/ufs/ufshcd.c int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, hba 3275 drivers/scsi/ufs/ufshcd.c ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, hba 3279 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", hba 3286 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, "String Desc is of zero length\n"); hba 3341 drivers/scsi/ufs/ufshcd.c static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, hba 3354 drivers/scsi/ufs/ufshcd.c return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, hba 3371 drivers/scsi/ufs/ufshcd.c static int ufshcd_memory_alloc(struct ufs_hba *hba) hba 3376 drivers/scsi/ufs/ufshcd.c ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); hba 3377 drivers/scsi/ufs/ufshcd.c hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, hba 3379 drivers/scsi/ufs/ufshcd.c &hba->ucdl_dma_addr, hba 3388 drivers/scsi/ufs/ufshcd.c if (!hba->ucdl_base_addr || hba 3389 drivers/scsi/ufs/ufshcd.c WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { hba 3390 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 3399 drivers/scsi/ufs/ufshcd.c utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); hba 3400 drivers/scsi/ufs/ufshcd.c hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, hba 3402 drivers/scsi/ufs/ufshcd.c &hba->utrdl_dma_addr, hba 3404 drivers/scsi/ufs/ufshcd.c if (!hba->utrdl_base_addr || hba 3405 drivers/scsi/ufs/ufshcd.c WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { hba 3406 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 3415 drivers/scsi/ufs/ufshcd.c utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; hba 3416 drivers/scsi/ufs/ufshcd.c hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, hba 3418 drivers/scsi/ufs/ufshcd.c &hba->utmrdl_dma_addr, hba 3420 drivers/scsi/ufs/ufshcd.c if (!hba->utmrdl_base_addr || hba 3421 drivers/scsi/ufs/ufshcd.c WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { hba 3422 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 3428 drivers/scsi/ufs/ufshcd.c hba->lrb = devm_kcalloc(hba->dev, hba 3429 drivers/scsi/ufs/ufshcd.c hba->nutrs, sizeof(struct ufshcd_lrb), hba 3431 drivers/scsi/ufs/ufshcd.c if (!hba->lrb) { hba 3432 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "LRB Memory allocation failed\n"); hba 3453 drivers/scsi/ufs/ufshcd.c static void ufshcd_host_memory_configure(struct ufs_hba *hba) hba 3464 drivers/scsi/ufs/ufshcd.c utrdlp = hba->utrdl_base_addr; hba 3465 drivers/scsi/ufs/ufshcd.c cmd_descp = hba->ucdl_base_addr; hba 3473 drivers/scsi/ufs/ufshcd.c cmd_desc_dma_addr = hba->ucdl_dma_addr; hba 3475 drivers/scsi/ufs/ufshcd.c for (i = 0; i < hba->nutrs; i++) { hba 3485 drivers/scsi/ufs/ufshcd.c if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) { hba 3501 drivers/scsi/ufs/ufshcd.c hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); hba 3502 drivers/scsi/ufs/ufshcd.c hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr + hba 3504 drivers/scsi/ufs/ufshcd.c hba->lrb[i].ucd_req_ptr = hba 3506 drivers/scsi/ufs/ufshcd.c hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr; hba 3507 drivers/scsi/ufs/ufshcd.c hba->lrb[i].ucd_rsp_ptr = hba 3509 drivers/scsi/ufs/ufshcd.c hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr + hba 3511 drivers/scsi/ufs/ufshcd.c hba->lrb[i].ucd_prdt_ptr = hba 3513 drivers/scsi/ufs/ufshcd.c hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr + hba 3529 drivers/scsi/ufs/ufshcd.c static int ufshcd_dme_link_startup(struct ufs_hba *hba) hba 3536 drivers/scsi/ufs/ufshcd.c ret = ufshcd_send_uic_cmd(hba, &uic_cmd); hba 3538 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, hba 3551 drivers/scsi/ufs/ufshcd.c static int ufshcd_dme_reset(struct ufs_hba *hba) hba 3558 drivers/scsi/ufs/ufshcd.c ret = ufshcd_send_uic_cmd(hba, &uic_cmd); hba 3560 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 3574 drivers/scsi/ufs/ufshcd.c static int ufshcd_dme_enable(struct ufs_hba *hba) hba 3581 drivers/scsi/ufs/ufshcd.c ret = ufshcd_send_uic_cmd(hba, &uic_cmd); hba 3583 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 3589 drivers/scsi/ufs/ufshcd.c static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) hba 3594 drivers/scsi/ufs/ufshcd.c if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) hba 3601 drivers/scsi/ufs/ufshcd.c if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { hba 3607 drivers/scsi/ufs/ufshcd.c hba->last_dme_cmd_tstamp)); hba 3630 drivers/scsi/ufs/ufshcd.c int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, hba 3650 drivers/scsi/ufs/ufshcd.c ret = ufshcd_send_uic_cmd(hba, &uic_cmd); hba 3652 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", hba 3657 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", hba 3674 drivers/scsi/ufs/ufshcd.c int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, hba 3689 drivers/scsi/ufs/ufshcd.c if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { hba 3690 drivers/scsi/ufs/ufshcd.c orig_pwr_info = hba->pwr_info; hba 3705 drivers/scsi/ufs/ufshcd.c ret = ufshcd_change_power_mode(hba, &temp_pwr_info); hba 3717 drivers/scsi/ufs/ufshcd.c ret = ufshcd_send_uic_cmd(hba, &uic_cmd); hba 3719 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", hba 3724 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", hba 3731 drivers/scsi/ufs/ufshcd.c if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) hba 3733 drivers/scsi/ufs/ufshcd.c ufshcd_change_power_mode(hba, &orig_pwr_info); hba 3755 drivers/scsi/ufs/ufshcd.c static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) hba 3763 drivers/scsi/ufs/ufshcd.c mutex_lock(&hba->uic_cmd_mutex); hba 3765 drivers/scsi/ufs/ufshcd.c ufshcd_add_delay_before_dme_cmd(hba); hba 3767 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 3768 drivers/scsi/ufs/ufshcd.c hba->uic_async_done = &uic_async_done; hba 3769 drivers/scsi/ufs/ufshcd.c if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { hba 3770 drivers/scsi/ufs/ufshcd.c ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); hba 3778 drivers/scsi/ufs/ufshcd.c ret = __ufshcd_send_uic_cmd(hba, cmd, false); hba 3779 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 3781 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 3787 drivers/scsi/ufs/ufshcd.c if (!wait_for_completion_timeout(hba->uic_async_done, hba 3789 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 3796 drivers/scsi/ufs/ufshcd.c status = ufshcd_get_upmcrs(hba); hba 3798 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 3805 drivers/scsi/ufs/ufshcd.c ufshcd_print_host_state(hba); hba 3806 drivers/scsi/ufs/ufshcd.c ufshcd_print_pwr_info(hba); hba 3807 drivers/scsi/ufs/ufshcd.c ufshcd_print_host_regs(hba); hba 3810 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 3811 drivers/scsi/ufs/ufshcd.c hba->active_uic_cmd = NULL; hba 3812 drivers/scsi/ufs/ufshcd.c hba->uic_async_done = NULL; hba 3814 drivers/scsi/ufs/ufshcd.c ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); hba 3815 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 3816 drivers/scsi/ufs/ufshcd.c mutex_unlock(&hba->uic_cmd_mutex); hba 3829 drivers/scsi/ufs/ufshcd.c static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) hba 3834 drivers/scsi/ufs/ufshcd.c if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { hba 3835 drivers/scsi/ufs/ufshcd.c ret = ufshcd_dme_set(hba, hba 3838 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", hba 3847 drivers/scsi/ufs/ufshcd.c ufshcd_hold(hba, false); hba 3848 drivers/scsi/ufs/ufshcd.c ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); hba 3849 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 3855 drivers/scsi/ufs/ufshcd.c static int ufshcd_link_recovery(struct ufs_hba *hba) hba 3860 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 3861 drivers/scsi/ufs/ufshcd.c hba->ufshcd_state = UFSHCD_STATE_RESET; hba 3862 drivers/scsi/ufs/ufshcd.c ufshcd_set_eh_in_progress(hba); hba 3863 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 3865 drivers/scsi/ufs/ufshcd.c ret = ufshcd_host_reset_and_restore(hba); hba 3867 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 3869 drivers/scsi/ufs/ufshcd.c hba->ufshcd_state = UFSHCD_STATE_ERROR; hba 3870 drivers/scsi/ufs/ufshcd.c ufshcd_clear_eh_in_progress(hba); hba 3871 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 3874 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: link recovery failed, err %d", hba 3880 drivers/scsi/ufs/ufshcd.c static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) hba 3886 drivers/scsi/ufs/ufshcd.c ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); hba 3889 drivers/scsi/ufs/ufshcd.c ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); hba 3890 drivers/scsi/ufs/ufshcd.c trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", hba 3896 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", hba 3905 drivers/scsi/ufs/ufshcd.c err = ufshcd_link_recovery(hba); hba 3907 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: link recovery failed", __func__); hba 3913 drivers/scsi/ufs/ufshcd.c ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, hba 3919 drivers/scsi/ufs/ufshcd.c static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) hba 3924 drivers/scsi/ufs/ufshcd.c ret = __ufshcd_uic_hibern8_enter(hba); hba 3932 drivers/scsi/ufs/ufshcd.c static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) hba 3938 drivers/scsi/ufs/ufshcd.c ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); hba 3941 drivers/scsi/ufs/ufshcd.c ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); hba 3942 drivers/scsi/ufs/ufshcd.c trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", hba 3946 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", hba 3948 drivers/scsi/ufs/ufshcd.c ret = ufshcd_link_recovery(hba); hba 3950 drivers/scsi/ufs/ufshcd.c ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, hba 3952 drivers/scsi/ufs/ufshcd.c hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get(); hba 3953 drivers/scsi/ufs/ufshcd.c hba->ufs_stats.hibern8_exit_cnt++; hba 3959 drivers/scsi/ufs/ufshcd.c void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) hba 3963 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit) hba 3966 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 3967 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); hba 3968 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 3976 drivers/scsi/ufs/ufshcd.c static void ufshcd_init_pwr_info(struct ufs_hba *hba) hba 3978 drivers/scsi/ufs/ufshcd.c hba->pwr_info.gear_rx = UFS_PWM_G1; hba 3979 drivers/scsi/ufs/ufshcd.c hba->pwr_info.gear_tx = UFS_PWM_G1; hba 3980 drivers/scsi/ufs/ufshcd.c hba->pwr_info.lane_rx = 1; hba 3981 drivers/scsi/ufs/ufshcd.c hba->pwr_info.lane_tx = 1; hba 3982 drivers/scsi/ufs/ufshcd.c hba->pwr_info.pwr_rx = SLOWAUTO_MODE; hba 3983 drivers/scsi/ufs/ufshcd.c hba->pwr_info.pwr_tx = SLOWAUTO_MODE; hba 3984 drivers/scsi/ufs/ufshcd.c hba->pwr_info.hs_rate = 0; hba 3991 drivers/scsi/ufs/ufshcd.c static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) hba 3993 drivers/scsi/ufs/ufshcd.c struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; hba 3995 drivers/scsi/ufs/ufshcd.c if (hba->max_pwr_info.is_valid) hba 4003 drivers/scsi/ufs/ufshcd.c ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), hba 4005 drivers/scsi/ufs/ufshcd.c ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), hba 4009 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", hba 4021 drivers/scsi/ufs/ufshcd.c ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); hba 4023 drivers/scsi/ufs/ufshcd.c ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), hba 4026 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", hba 4033 drivers/scsi/ufs/ufshcd.c ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), hba 4036 drivers/scsi/ufs/ufshcd.c ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), hba 4039 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", hba 4046 drivers/scsi/ufs/ufshcd.c hba->max_pwr_info.is_valid = true; hba 4050 drivers/scsi/ufs/ufshcd.c static int ufshcd_change_power_mode(struct ufs_hba *hba, hba 4056 drivers/scsi/ufs/ufshcd.c if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && hba 4057 drivers/scsi/ufs/ufshcd.c pwr_mode->gear_tx == hba->pwr_info.gear_tx && hba 4058 drivers/scsi/ufs/ufshcd.c pwr_mode->lane_rx == hba->pwr_info.lane_rx && hba 4059 drivers/scsi/ufs/ufshcd.c pwr_mode->lane_tx == hba->pwr_info.lane_tx && hba 4060 drivers/scsi/ufs/ufshcd.c pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && hba 4061 drivers/scsi/ufs/ufshcd.c pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && hba 4062 drivers/scsi/ufs/ufshcd.c pwr_mode->hs_rate == hba->pwr_info.hs_rate) { hba 4063 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, "%s: power already configured\n", __func__); hba 4073 drivers/scsi/ufs/ufshcd.c ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); hba 4074 drivers/scsi/ufs/ufshcd.c ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), hba 4078 drivers/scsi/ufs/ufshcd.c ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); hba 4080 drivers/scsi/ufs/ufshcd.c ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); hba 4082 drivers/scsi/ufs/ufshcd.c ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); hba 4083 drivers/scsi/ufs/ufshcd.c ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), hba 4087 drivers/scsi/ufs/ufshcd.c ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); hba 4089 drivers/scsi/ufs/ufshcd.c ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); hba 4095 drivers/scsi/ufs/ufshcd.c ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), hba 4098 drivers/scsi/ufs/ufshcd.c ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 hba 4102 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 4105 drivers/scsi/ufs/ufshcd.c ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, hba 4108 drivers/scsi/ufs/ufshcd.c memcpy(&hba->pwr_info, pwr_mode, hba 4120 drivers/scsi/ufs/ufshcd.c int ufshcd_config_pwr_mode(struct ufs_hba *hba, hba 4126 drivers/scsi/ufs/ufshcd.c ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, hba 4132 drivers/scsi/ufs/ufshcd.c ret = ufshcd_change_power_mode(hba, &final_params); hba 4134 drivers/scsi/ufs/ufshcd.c ufshcd_print_pwr_info(hba); hba 4146 drivers/scsi/ufs/ufshcd.c static int ufshcd_complete_dev_init(struct ufs_hba *hba) hba 4152 drivers/scsi/ufs/ufshcd.c err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, hba 4155 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 4163 drivers/scsi/ufs/ufshcd.c err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, hba 4167 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 4171 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 4191 drivers/scsi/ufs/ufshcd.c static int ufshcd_make_hba_operational(struct ufs_hba *hba) hba 4197 drivers/scsi/ufs/ufshcd.c ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); hba 4200 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_intr_aggr_allowed(hba)) hba 4201 drivers/scsi/ufs/ufshcd.c ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); hba 4203 drivers/scsi/ufs/ufshcd.c ufshcd_disable_intr_aggr(hba); hba 4206 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), hba 4208 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), hba 4210 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), hba 4212 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), hba 4224 drivers/scsi/ufs/ufshcd.c reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); hba 4226 drivers/scsi/ufs/ufshcd.c ufshcd_enable_run_stop_reg(hba); hba 4228 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 4243 drivers/scsi/ufs/ufshcd.c static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep) hba 4247 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); hba 4248 drivers/scsi/ufs/ufshcd.c err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, hba 4252 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Controller disable failed\n", __func__); hba 4265 drivers/scsi/ufs/ufshcd.c static int ufshcd_hba_execute_hce(struct ufs_hba *hba) hba 4269 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_hba_active(hba)) hba 4271 drivers/scsi/ufs/ufshcd.c ufshcd_hba_stop(hba, true); hba 4274 drivers/scsi/ufs/ufshcd.c ufshcd_set_link_off(hba); hba 4276 drivers/scsi/ufs/ufshcd.c ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); hba 4279 drivers/scsi/ufs/ufshcd.c ufshcd_hba_start(hba); hba 4295 drivers/scsi/ufs/ufshcd.c while (ufshcd_is_hba_active(hba)) { hba 4299 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 4307 drivers/scsi/ufs/ufshcd.c ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); hba 4309 drivers/scsi/ufs/ufshcd.c ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); hba 4314 drivers/scsi/ufs/ufshcd.c static int ufshcd_hba_enable(struct ufs_hba *hba) hba 4318 drivers/scsi/ufs/ufshcd.c if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) { hba 4319 drivers/scsi/ufs/ufshcd.c ufshcd_set_link_off(hba); hba 4320 drivers/scsi/ufs/ufshcd.c ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); hba 4323 drivers/scsi/ufs/ufshcd.c ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); hba 4324 drivers/scsi/ufs/ufshcd.c ret = ufshcd_dme_reset(hba); hba 4326 drivers/scsi/ufs/ufshcd.c ret = ufshcd_dme_enable(hba); hba 4328 drivers/scsi/ufs/ufshcd.c ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); hba 4330 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 4334 drivers/scsi/ufs/ufshcd.c ret = ufshcd_hba_execute_hce(hba); hba 4339 drivers/scsi/ufs/ufshcd.c static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) hba 4344 drivers/scsi/ufs/ufshcd.c ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), hba 4347 drivers/scsi/ufs/ufshcd.c ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), hba 4351 drivers/scsi/ufs/ufshcd.c err = ufshcd_dme_set(hba, hba 4356 drivers/scsi/ufs/ufshcd.c err = ufshcd_dme_peer_set(hba, hba 4361 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", hba 4370 drivers/scsi/ufs/ufshcd.c static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) hba 4372 drivers/scsi/ufs/ufshcd.c return ufshcd_disable_tx_lcc(hba, true); hba 4389 drivers/scsi/ufs/ufshcd.c static int ufshcd_link_startup(struct ufs_hba *hba) hba 4399 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_ufs_dev_active(hba)) hba 4404 drivers/scsi/ufs/ufshcd.c ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); hba 4406 drivers/scsi/ufs/ufshcd.c ret = ufshcd_dme_link_startup(hba); hba 4409 drivers/scsi/ufs/ufshcd.c if (!ret && !ufshcd_is_device_present(hba)) { hba 4410 drivers/scsi/ufs/ufshcd.c ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err, hba 4412 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Device not present\n", __func__); hba 4422 drivers/scsi/ufs/ufshcd.c if (ret && ufshcd_hba_enable(hba)) { hba 4423 drivers/scsi/ufs/ufshcd.c ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err, hba 4431 drivers/scsi/ufs/ufshcd.c ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err, hba 4443 drivers/scsi/ufs/ufshcd.c ufshcd_init_pwr_info(hba); hba 4444 drivers/scsi/ufs/ufshcd.c ufshcd_print_pwr_info(hba); hba 4446 drivers/scsi/ufs/ufshcd.c if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { hba 4447 drivers/scsi/ufs/ufshcd.c ret = ufshcd_disable_device_tx_lcc(hba); hba 4453 drivers/scsi/ufs/ufshcd.c ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE); hba 4457 drivers/scsi/ufs/ufshcd.c ret = ufshcd_make_hba_operational(hba); hba 4460 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "link startup failed %d\n", ret); hba 4461 drivers/scsi/ufs/ufshcd.c ufshcd_print_host_state(hba); hba 4462 drivers/scsi/ufs/ufshcd.c ufshcd_print_pwr_info(hba); hba 4463 drivers/scsi/ufs/ufshcd.c ufshcd_print_host_regs(hba); hba 4478 drivers/scsi/ufs/ufshcd.c static int ufshcd_verify_dev_init(struct ufs_hba *hba) hba 4483 drivers/scsi/ufs/ufshcd.c ufshcd_hold(hba, false); hba 4484 drivers/scsi/ufs/ufshcd.c mutex_lock(&hba->dev_cmd.lock); hba 4486 drivers/scsi/ufs/ufshcd.c err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, hba 4492 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); hba 4494 drivers/scsi/ufs/ufshcd.c mutex_unlock(&hba->dev_cmd.lock); hba 4495 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 4498 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); hba 4515 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba; hba 4517 drivers/scsi/ufs/ufshcd.c hba = shost_priv(sdev->host); hba 4519 drivers/scsi/ufs/ufshcd.c lun_qdepth = hba->nutrs; hba 4520 drivers/scsi/ufs/ufshcd.c ret = ufshcd_read_unit_desc_param(hba, hba 4531 drivers/scsi/ufs/ufshcd.c lun_qdepth = hba->nutrs; hba 4533 drivers/scsi/ufs/ufshcd.c lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); hba 4535 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", hba 4551 drivers/scsi/ufs/ufshcd.c static int ufshcd_get_lu_wp(struct ufs_hba *hba, hba 4567 drivers/scsi/ufs/ufshcd.c ret = ufshcd_read_unit_desc_param(hba, hba 4582 drivers/scsi/ufs/ufshcd.c static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, hba 4585 drivers/scsi/ufs/ufshcd.c if (hba->dev_info.f_power_on_wp_en && hba 4586 drivers/scsi/ufs/ufshcd.c !hba->dev_info.is_lu_power_on_wp) { hba 4589 drivers/scsi/ufs/ufshcd.c if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), hba 4592 drivers/scsi/ufs/ufshcd.c hba->dev_info.is_lu_power_on_wp = true; hba 4604 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba; hba 4606 drivers/scsi/ufs/ufshcd.c hba = shost_priv(sdev->host); hba 4622 drivers/scsi/ufs/ufshcd.c ufshcd_get_lu_power_on_wp_status(hba, sdev); hba 4636 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = shost_priv(sdev->host); hba 4638 drivers/scsi/ufs/ufshcd.c if (depth > hba->nutrs) hba 4639 drivers/scsi/ufs/ufshcd.c depth = hba->nutrs; hba 4661 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba; hba 4663 drivers/scsi/ufs/ufshcd.c hba = shost_priv(sdev->host); hba 4668 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 4669 drivers/scsi/ufs/ufshcd.c hba->sdev_ufs_device = NULL; hba 4670 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 4717 drivers/scsi/ufs/ufshcd.c ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) hba 4729 drivers/scsi/ufs/ufshcd.c hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); hba 4757 drivers/scsi/ufs/ufshcd.c if (!hba->pm_op_in_progress && hba 4759 drivers/scsi/ufs/ufshcd.c schedule_work(&hba->eeh_work); hba 4764 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 4768 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 4789 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 4792 drivers/scsi/ufs/ufshcd.c ufshcd_print_host_regs(hba); hba 4793 drivers/scsi/ufs/ufshcd.c ufshcd_print_host_state(hba); hba 4797 drivers/scsi/ufs/ufshcd.c if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) hba 4798 drivers/scsi/ufs/ufshcd.c ufshcd_print_trs(hba, 1 << lrbp->task_tag, true); hba 4807 drivers/scsi/ufs/ufshcd.c static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) hba 4809 drivers/scsi/ufs/ufshcd.c if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { hba 4810 drivers/scsi/ufs/ufshcd.c hba->active_uic_cmd->argument2 |= hba 4811 drivers/scsi/ufs/ufshcd.c ufshcd_get_uic_cmd_result(hba); hba 4812 drivers/scsi/ufs/ufshcd.c hba->active_uic_cmd->argument3 = hba 4813 drivers/scsi/ufs/ufshcd.c ufshcd_get_dme_attr_val(hba); hba 4814 drivers/scsi/ufs/ufshcd.c complete(&hba->active_uic_cmd->done); hba 4817 drivers/scsi/ufs/ufshcd.c if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) hba 4818 drivers/scsi/ufs/ufshcd.c complete(hba->uic_async_done); hba 4826 drivers/scsi/ufs/ufshcd.c static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, hba 4834 drivers/scsi/ufs/ufshcd.c for_each_set_bit(index, &completed_reqs, hba->nutrs) { hba 4835 drivers/scsi/ufs/ufshcd.c lrbp = &hba->lrb[index]; hba 4838 drivers/scsi/ufs/ufshcd.c ufshcd_add_command_trace(hba, index, "complete"); hba 4839 drivers/scsi/ufs/ufshcd.c result = ufshcd_transfer_rsp_status(hba, lrbp); hba 4844 drivers/scsi/ufs/ufshcd.c clear_bit_unlock(index, &hba->lrb_in_use); hba 4847 drivers/scsi/ufs/ufshcd.c __ufshcd_release(hba); hba 4850 drivers/scsi/ufs/ufshcd.c if (hba->dev_cmd.complete) { hba 4851 drivers/scsi/ufs/ufshcd.c ufshcd_add_command_trace(hba, index, hba 4853 drivers/scsi/ufs/ufshcd.c complete(hba->dev_cmd.complete); hba 4856 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_clkscaling_supported(hba)) hba 4857 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.active_reqs--; hba 4863 drivers/scsi/ufs/ufshcd.c hba->outstanding_reqs ^= completed_reqs; hba 4865 drivers/scsi/ufs/ufshcd.c ufshcd_clk_scaling_update_busy(hba); hba 4868 drivers/scsi/ufs/ufshcd.c wake_up(&hba->dev_cmd.tag_wq); hba 4875 drivers/scsi/ufs/ufshcd.c static void ufshcd_transfer_req_compl(struct ufs_hba *hba) hba 4887 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_intr_aggr_allowed(hba) && hba 4888 drivers/scsi/ufs/ufshcd.c !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) hba 4889 drivers/scsi/ufs/ufshcd.c ufshcd_reset_intr_aggr(hba); hba 4891 drivers/scsi/ufs/ufshcd.c tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); hba 4892 drivers/scsi/ufs/ufshcd.c completed_reqs = tr_doorbell ^ hba->outstanding_reqs; hba 4894 drivers/scsi/ufs/ufshcd.c __ufshcd_transfer_req_compl(hba, completed_reqs); hba 4907 drivers/scsi/ufs/ufshcd.c static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) hba 4912 drivers/scsi/ufs/ufshcd.c if (!(hba->ee_ctrl_mask & mask)) hba 4915 drivers/scsi/ufs/ufshcd.c val = hba->ee_ctrl_mask & ~mask; hba 4917 drivers/scsi/ufs/ufshcd.c err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, hba 4920 drivers/scsi/ufs/ufshcd.c hba->ee_ctrl_mask &= ~mask; hba 4935 drivers/scsi/ufs/ufshcd.c static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) hba 4940 drivers/scsi/ufs/ufshcd.c if (hba->ee_ctrl_mask & mask) hba 4943 drivers/scsi/ufs/ufshcd.c val = hba->ee_ctrl_mask | mask; hba 4945 drivers/scsi/ufs/ufshcd.c err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, hba 4948 drivers/scsi/ufs/ufshcd.c hba->ee_ctrl_mask |= mask; hba 4964 drivers/scsi/ufs/ufshcd.c static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) hba 4968 drivers/scsi/ufs/ufshcd.c if (hba->auto_bkops_enabled) hba 4971 drivers/scsi/ufs/ufshcd.c err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, hba 4974 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: failed to enable bkops %d\n", hba 4979 drivers/scsi/ufs/ufshcd.c hba->auto_bkops_enabled = true; hba 4980 drivers/scsi/ufs/ufshcd.c trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled"); hba 4983 drivers/scsi/ufs/ufshcd.c err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); hba 4985 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: failed to disable exception event %d\n", hba 5003 drivers/scsi/ufs/ufshcd.c static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) hba 5007 drivers/scsi/ufs/ufshcd.c if (!hba->auto_bkops_enabled) hba 5014 drivers/scsi/ufs/ufshcd.c err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); hba 5016 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: failed to enable exception event %d\n", hba 5021 drivers/scsi/ufs/ufshcd.c err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, hba 5024 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: failed to disable bkops %d\n", hba 5026 drivers/scsi/ufs/ufshcd.c ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); hba 5030 drivers/scsi/ufs/ufshcd.c hba->auto_bkops_enabled = false; hba 5031 drivers/scsi/ufs/ufshcd.c trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); hba 5032 drivers/scsi/ufs/ufshcd.c hba->is_urgent_bkops_lvl_checked = false; hba 5046 drivers/scsi/ufs/ufshcd.c static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) hba 5048 drivers/scsi/ufs/ufshcd.c if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) { hba 5049 drivers/scsi/ufs/ufshcd.c hba->auto_bkops_enabled = false; hba 5050 drivers/scsi/ufs/ufshcd.c hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; hba 5051 drivers/scsi/ufs/ufshcd.c ufshcd_enable_auto_bkops(hba); hba 5053 drivers/scsi/ufs/ufshcd.c hba->auto_bkops_enabled = true; hba 5054 drivers/scsi/ufs/ufshcd.c hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; hba 5055 drivers/scsi/ufs/ufshcd.c ufshcd_disable_auto_bkops(hba); hba 5057 drivers/scsi/ufs/ufshcd.c hba->is_urgent_bkops_lvl_checked = false; hba 5060 drivers/scsi/ufs/ufshcd.c static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) hba 5062 drivers/scsi/ufs/ufshcd.c return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, hba 5082 drivers/scsi/ufs/ufshcd.c static int ufshcd_bkops_ctrl(struct ufs_hba *hba, hba 5088 drivers/scsi/ufs/ufshcd.c err = ufshcd_get_bkops_status(hba, &curr_status); hba 5090 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", hba 5094 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: invalid BKOPS status %d\n", hba 5101 drivers/scsi/ufs/ufshcd.c err = ufshcd_enable_auto_bkops(hba); hba 5103 drivers/scsi/ufs/ufshcd.c err = ufshcd_disable_auto_bkops(hba); hba 5104 drivers/scsi/ufs/ufshcd.c hba->urgent_bkops_lvl = curr_status; hba 5119 drivers/scsi/ufs/ufshcd.c static int ufshcd_urgent_bkops(struct ufs_hba *hba) hba 5121 drivers/scsi/ufs/ufshcd.c return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); hba 5124 drivers/scsi/ufs/ufshcd.c static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) hba 5126 drivers/scsi/ufs/ufshcd.c return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, hba 5130 drivers/scsi/ufs/ufshcd.c static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba) hba 5135 drivers/scsi/ufs/ufshcd.c if (hba->is_urgent_bkops_lvl_checked) hba 5138 drivers/scsi/ufs/ufshcd.c err = ufshcd_get_bkops_status(hba, &curr_status); hba 5140 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", hba 5152 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", hba 5155 drivers/scsi/ufs/ufshcd.c hba->urgent_bkops_lvl = curr_status; hba 5156 drivers/scsi/ufs/ufshcd.c hba->is_urgent_bkops_lvl_checked = true; hba 5160 drivers/scsi/ufs/ufshcd.c err = ufshcd_enable_auto_bkops(hba); hba 5163 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", hba 5176 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba; hba 5179 drivers/scsi/ufs/ufshcd.c hba = container_of(work, struct ufs_hba, eeh_work); hba 5181 drivers/scsi/ufs/ufshcd.c pm_runtime_get_sync(hba->dev); hba 5182 drivers/scsi/ufs/ufshcd.c scsi_block_requests(hba->host); hba 5183 drivers/scsi/ufs/ufshcd.c err = ufshcd_get_ee_status(hba, &status); hba 5185 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: failed to get exception status %d\n", hba 5190 drivers/scsi/ufs/ufshcd.c status &= hba->ee_ctrl_mask; hba 5193 drivers/scsi/ufs/ufshcd.c ufshcd_bkops_exception_event_handler(hba); hba 5196 drivers/scsi/ufs/ufshcd.c scsi_unblock_requests(hba->host); hba 5197 drivers/scsi/ufs/ufshcd.c pm_runtime_put_sync(hba->dev); hba 5202 drivers/scsi/ufs/ufshcd.c static void ufshcd_complete_requests(struct ufs_hba *hba) hba 5204 drivers/scsi/ufs/ufshcd.c ufshcd_transfer_req_compl(hba); hba 5205 drivers/scsi/ufs/ufshcd.c ufshcd_tmc_handler(hba); hba 5215 drivers/scsi/ufs/ufshcd.c static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba) hba 5220 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 5225 drivers/scsi/ufs/ufshcd.c if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) hba 5228 drivers/scsi/ufs/ufshcd.c if ((hba->saved_err & DEVICE_FATAL_ERROR) || hba 5229 drivers/scsi/ufs/ufshcd.c ((hba->saved_err & UIC_ERROR) && hba 5230 drivers/scsi/ufs/ufshcd.c (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) hba 5233 drivers/scsi/ufs/ufshcd.c if ((hba->saved_err & UIC_ERROR) && hba 5234 drivers/scsi/ufs/ufshcd.c (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { hba 5239 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 5241 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 5247 drivers/scsi/ufs/ufshcd.c if ((hba->saved_err & INT_FATAL_ERRORS) || hba 5248 drivers/scsi/ufs/ufshcd.c ((hba->saved_err & UIC_ERROR) && hba 5249 drivers/scsi/ufs/ufshcd.c (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) hba 5259 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 5260 drivers/scsi/ufs/ufshcd.c err = ufshcd_verify_dev_init(hba); hba 5261 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 5267 drivers/scsi/ufs/ufshcd.c if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) hba 5268 drivers/scsi/ufs/ufshcd.c hba->saved_err &= ~UIC_ERROR; hba 5270 drivers/scsi/ufs/ufshcd.c hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; hba 5271 drivers/scsi/ufs/ufshcd.c if (!hba->saved_uic_err) { hba 5277 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 5287 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba; hba 5295 drivers/scsi/ufs/ufshcd.c hba = container_of(work, struct ufs_hba, eh_work); hba 5297 drivers/scsi/ufs/ufshcd.c pm_runtime_get_sync(hba->dev); hba 5298 drivers/scsi/ufs/ufshcd.c ufshcd_hold(hba, false); hba 5300 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 5301 drivers/scsi/ufs/ufshcd.c if (hba->ufshcd_state == UFSHCD_STATE_RESET) hba 5304 drivers/scsi/ufs/ufshcd.c hba->ufshcd_state = UFSHCD_STATE_RESET; hba 5305 drivers/scsi/ufs/ufshcd.c ufshcd_set_eh_in_progress(hba); hba 5308 drivers/scsi/ufs/ufshcd.c ufshcd_complete_requests(hba); hba 5310 drivers/scsi/ufs/ufshcd.c if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { hba 5313 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 5315 drivers/scsi/ufs/ufshcd.c ret = ufshcd_quirk_dl_nac_errors(hba); hba 5316 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 5320 drivers/scsi/ufs/ufshcd.c if ((hba->saved_err & INT_FATAL_ERRORS) || hba 5321 drivers/scsi/ufs/ufshcd.c (hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) || hba 5322 drivers/scsi/ufs/ufshcd.c ((hba->saved_err & UIC_ERROR) && hba 5323 drivers/scsi/ufs/ufshcd.c (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR | hba 5337 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 5339 drivers/scsi/ufs/ufshcd.c for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { hba 5340 drivers/scsi/ufs/ufshcd.c if (ufshcd_clear_cmd(hba, tag)) { hba 5347 drivers/scsi/ufs/ufshcd.c for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { hba 5348 drivers/scsi/ufs/ufshcd.c if (ufshcd_clear_tm_cmd(hba, tag)) { hba 5355 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 5358 drivers/scsi/ufs/ufshcd.c ufshcd_complete_requests(hba); hba 5366 drivers/scsi/ufs/ufshcd.c unsigned long max_doorbells = (1UL << hba->nutrs) - 1; hba 5375 drivers/scsi/ufs/ufshcd.c if (hba->outstanding_reqs == max_doorbells) hba 5376 drivers/scsi/ufs/ufshcd.c __ufshcd_transfer_req_compl(hba, hba 5377 drivers/scsi/ufs/ufshcd.c (1UL << (hba->nutrs - 1))); hba 5379 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 5380 drivers/scsi/ufs/ufshcd.c err = ufshcd_reset_and_restore(hba); hba 5381 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 5383 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: reset and restore failed\n", hba 5385 drivers/scsi/ufs/ufshcd.c hba->ufshcd_state = UFSHCD_STATE_ERROR; hba 5391 drivers/scsi/ufs/ufshcd.c scsi_report_bus_reset(hba->host, 0); hba 5392 drivers/scsi/ufs/ufshcd.c hba->saved_err = 0; hba 5393 drivers/scsi/ufs/ufshcd.c hba->saved_uic_err = 0; hba 5398 drivers/scsi/ufs/ufshcd.c hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; hba 5399 drivers/scsi/ufs/ufshcd.c if (hba->saved_err || hba->saved_uic_err) hba 5400 drivers/scsi/ufs/ufshcd.c dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", hba 5401 drivers/scsi/ufs/ufshcd.c __func__, hba->saved_err, hba->saved_uic_err); hba 5404 drivers/scsi/ufs/ufshcd.c ufshcd_clear_eh_in_progress(hba); hba 5407 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 5408 drivers/scsi/ufs/ufshcd.c ufshcd_scsi_unblock_requests(hba); hba 5409 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 5410 drivers/scsi/ufs/ufshcd.c pm_runtime_put_sync(hba->dev); hba 5417 drivers/scsi/ufs/ufshcd.c static void ufshcd_update_uic_error(struct ufs_hba *hba) hba 5422 drivers/scsi/ufs/ufshcd.c reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); hba 5430 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__); hba 5431 drivers/scsi/ufs/ufshcd.c ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg); hba 5435 drivers/scsi/ufs/ufshcd.c reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); hba 5437 drivers/scsi/ufs/ufshcd.c ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg); hba 5440 drivers/scsi/ufs/ufshcd.c hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; hba 5441 drivers/scsi/ufs/ufshcd.c else if (hba->dev_quirks & hba 5444 drivers/scsi/ufs/ufshcd.c hba->uic_error |= hba 5447 drivers/scsi/ufs/ufshcd.c hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; hba 5451 drivers/scsi/ufs/ufshcd.c reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); hba 5453 drivers/scsi/ufs/ufshcd.c ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg); hba 5454 drivers/scsi/ufs/ufshcd.c hba->uic_error |= UFSHCD_UIC_NL_ERROR; hba 5457 drivers/scsi/ufs/ufshcd.c reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); hba 5459 drivers/scsi/ufs/ufshcd.c ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg); hba 5460 drivers/scsi/ufs/ufshcd.c hba->uic_error |= UFSHCD_UIC_TL_ERROR; hba 5463 drivers/scsi/ufs/ufshcd.c reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); hba 5465 drivers/scsi/ufs/ufshcd.c ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg); hba 5466 drivers/scsi/ufs/ufshcd.c hba->uic_error |= UFSHCD_UIC_DME_ERROR; hba 5469 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", hba 5470 drivers/scsi/ufs/ufshcd.c __func__, hba->uic_error); hba 5473 drivers/scsi/ufs/ufshcd.c static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, hba 5476 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_auto_hibern8_supported(hba) || hba 5477 drivers/scsi/ufs/ufshcd.c !ufshcd_is_auto_hibern8_enabled(hba)) hba 5483 drivers/scsi/ufs/ufshcd.c if (hba->active_uic_cmd && hba 5484 drivers/scsi/ufs/ufshcd.c (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER || hba 5485 drivers/scsi/ufs/ufshcd.c hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT)) hba 5495 drivers/scsi/ufs/ufshcd.c static void ufshcd_check_errors(struct ufs_hba *hba) hba 5499 drivers/scsi/ufs/ufshcd.c if (hba->errors & INT_FATAL_ERRORS) { hba 5500 drivers/scsi/ufs/ufshcd.c ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors); hba 5504 drivers/scsi/ufs/ufshcd.c if (hba->errors & UIC_ERROR) { hba 5505 drivers/scsi/ufs/ufshcd.c hba->uic_error = 0; hba 5506 drivers/scsi/ufs/ufshcd.c ufshcd_update_uic_error(hba); hba 5507 drivers/scsi/ufs/ufshcd.c if (hba->uic_error) hba 5511 drivers/scsi/ufs/ufshcd.c if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) { hba 5512 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 5514 drivers/scsi/ufs/ufshcd.c __func__, (hba->errors & UIC_HIBERNATE_ENTER) ? hba 5516 drivers/scsi/ufs/ufshcd.c hba->errors, ufshcd_get_upmcrs(hba)); hba 5517 drivers/scsi/ufs/ufshcd.c ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err, hba 5518 drivers/scsi/ufs/ufshcd.c hba->errors); hba 5527 drivers/scsi/ufs/ufshcd.c hba->saved_err |= hba->errors; hba 5528 drivers/scsi/ufs/ufshcd.c hba->saved_uic_err |= hba->uic_error; hba 5531 drivers/scsi/ufs/ufshcd.c if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { hba 5533 drivers/scsi/ufs/ufshcd.c ufshcd_scsi_block_requests(hba); hba 5535 drivers/scsi/ufs/ufshcd.c hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED; hba 5538 drivers/scsi/ufs/ufshcd.c if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) { hba 5539 drivers/scsi/ufs/ufshcd.c bool pr_prdt = !!(hba->saved_err & hba 5542 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n", hba 5543 drivers/scsi/ufs/ufshcd.c __func__, hba->saved_err, hba 5544 drivers/scsi/ufs/ufshcd.c hba->saved_uic_err); hba 5546 drivers/scsi/ufs/ufshcd.c ufshcd_print_host_regs(hba); hba 5547 drivers/scsi/ufs/ufshcd.c ufshcd_print_pwr_info(hba); hba 5548 drivers/scsi/ufs/ufshcd.c ufshcd_print_tmrs(hba, hba->outstanding_tasks); hba 5549 drivers/scsi/ufs/ufshcd.c ufshcd_print_trs(hba, hba->outstanding_reqs, hba 5552 drivers/scsi/ufs/ufshcd.c schedule_work(&hba->eh_work); hba 5567 drivers/scsi/ufs/ufshcd.c static void ufshcd_tmc_handler(struct ufs_hba *hba) hba 5571 drivers/scsi/ufs/ufshcd.c tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); hba 5572 drivers/scsi/ufs/ufshcd.c hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; hba 5573 drivers/scsi/ufs/ufshcd.c wake_up(&hba->tm_wq); hba 5581 drivers/scsi/ufs/ufshcd.c static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) hba 5583 drivers/scsi/ufs/ufshcd.c hba->errors = UFSHCD_ERROR_MASK & intr_status; hba 5585 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_auto_hibern8_error(hba, intr_status)) hba 5586 drivers/scsi/ufs/ufshcd.c hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); hba 5588 drivers/scsi/ufs/ufshcd.c if (hba->errors) hba 5589 drivers/scsi/ufs/ufshcd.c ufshcd_check_errors(hba); hba 5592 drivers/scsi/ufs/ufshcd.c ufshcd_uic_cmd_compl(hba, intr_status); hba 5595 drivers/scsi/ufs/ufshcd.c ufshcd_tmc_handler(hba); hba 5598 drivers/scsi/ufs/ufshcd.c ufshcd_transfer_req_compl(hba); hba 5613 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = __hba; hba 5614 drivers/scsi/ufs/ufshcd.c int retries = hba->nutrs; hba 5616 drivers/scsi/ufs/ufshcd.c spin_lock(hba->host->host_lock); hba 5617 drivers/scsi/ufs/ufshcd.c intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); hba 5627 drivers/scsi/ufs/ufshcd.c intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); hba 5629 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); hba 5631 drivers/scsi/ufs/ufshcd.c ufshcd_sl_intr(hba, enabled_intr_status); hba 5635 drivers/scsi/ufs/ufshcd.c intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); hba 5638 drivers/scsi/ufs/ufshcd.c spin_unlock(hba->host->host_lock); hba 5642 drivers/scsi/ufs/ufshcd.c static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) hba 5648 drivers/scsi/ufs/ufshcd.c if (!test_bit(tag, &hba->outstanding_tasks)) hba 5651 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 5652 drivers/scsi/ufs/ufshcd.c ufshcd_utmrl_clear(hba, tag); hba 5653 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 5656 drivers/scsi/ufs/ufshcd.c err = ufshcd_wait_for_register(hba, hba 5663 drivers/scsi/ufs/ufshcd.c static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, hba 5666 drivers/scsi/ufs/ufshcd.c struct Scsi_Host *host = hba->host; hba 5675 drivers/scsi/ufs/ufshcd.c wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); hba 5676 drivers/scsi/ufs/ufshcd.c ufshcd_hold(hba, false); hba 5679 drivers/scsi/ufs/ufshcd.c task_tag = hba->nutrs + free_slot; hba 5683 drivers/scsi/ufs/ufshcd.c memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq)); hba 5684 drivers/scsi/ufs/ufshcd.c ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function); hba 5687 drivers/scsi/ufs/ufshcd.c __set_bit(free_slot, &hba->outstanding_tasks); hba 5692 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); hba 5698 drivers/scsi/ufs/ufshcd.c ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send"); hba 5701 drivers/scsi/ufs/ufshcd.c err = wait_event_timeout(hba->tm_wq, hba 5702 drivers/scsi/ufs/ufshcd.c test_bit(free_slot, &hba->tm_condition), hba 5705 drivers/scsi/ufs/ufshcd.c ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err"); hba 5706 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", hba 5708 drivers/scsi/ufs/ufshcd.c if (ufshcd_clear_tm_cmd(hba, free_slot)) hba 5709 drivers/scsi/ufs/ufshcd.c dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", hba 5714 drivers/scsi/ufs/ufshcd.c memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq)); hba 5716 drivers/scsi/ufs/ufshcd.c ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete"); hba 5719 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 5720 drivers/scsi/ufs/ufshcd.c __clear_bit(free_slot, &hba->outstanding_tasks); hba 5721 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 5723 drivers/scsi/ufs/ufshcd.c clear_bit(free_slot, &hba->tm_condition); hba 5724 drivers/scsi/ufs/ufshcd.c ufshcd_put_tm_slot(hba, free_slot); hba 5725 drivers/scsi/ufs/ufshcd.c wake_up(&hba->tm_tag_wq); hba 5727 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 5741 drivers/scsi/ufs/ufshcd.c static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, hba 5763 drivers/scsi/ufs/ufshcd.c err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function); hba 5769 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", hba 5794 drivers/scsi/ufs/ufshcd.c static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, hba 5808 drivers/scsi/ufs/ufshcd.c down_read(&hba->clk_scaling_lock); hba 5810 drivers/scsi/ufs/ufshcd.c wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); hba 5813 drivers/scsi/ufs/ufshcd.c lrbp = &hba->lrb[tag]; hba 5822 drivers/scsi/ufs/ufshcd.c hba->dev_cmd.type = cmd_type; hba 5824 drivers/scsi/ufs/ufshcd.c switch (hba->ufs_version) { hba 5852 drivers/scsi/ufs/ufshcd.c hba->dev_cmd.complete = &wait; hba 5856 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 5857 drivers/scsi/ufs/ufshcd.c ufshcd_send_command(hba, tag); hba 5858 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 5865 drivers/scsi/ufs/ufshcd.c ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT); hba 5878 drivers/scsi/ufs/ufshcd.c dev_warn(hba->dev, "rsp size is bigger than buffer"); hba 5884 drivers/scsi/ufs/ufshcd.c ufshcd_put_dev_cmd_tag(hba, tag); hba 5885 drivers/scsi/ufs/ufshcd.c wake_up(&hba->dev_cmd.tag_wq); hba 5886 drivers/scsi/ufs/ufshcd.c up_read(&hba->clk_scaling_lock); hba 5905 drivers/scsi/ufs/ufshcd.c int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, hba 5923 drivers/scsi/ufs/ufshcd.c ufshcd_hold(hba, false); hba 5924 drivers/scsi/ufs/ufshcd.c mutex_lock(&hba->dev_cmd.lock); hba 5925 drivers/scsi/ufs/ufshcd.c err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu, hba 5928 drivers/scsi/ufs/ufshcd.c mutex_unlock(&hba->dev_cmd.lock); hba 5929 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 5938 drivers/scsi/ufs/ufshcd.c err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f); hba 5944 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__, hba 5971 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba; hba 5980 drivers/scsi/ufs/ufshcd.c hba = shost_priv(host); hba 5983 drivers/scsi/ufs/ufshcd.c lrbp = &hba->lrb[tag]; hba 5984 drivers/scsi/ufs/ufshcd.c err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); hba 5992 drivers/scsi/ufs/ufshcd.c for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { hba 5993 drivers/scsi/ufs/ufshcd.c if (hba->lrb[pos].lun == lrbp->lun) { hba 5994 drivers/scsi/ufs/ufshcd.c err = ufshcd_clear_cmd(hba, pos); hba 6000 drivers/scsi/ufs/ufshcd.c ufshcd_transfer_req_compl(hba); hba 6004 drivers/scsi/ufs/ufshcd.c hba->req_abort_count = 0; hba 6005 drivers/scsi/ufs/ufshcd.c ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err); hba 6009 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); hba 6015 drivers/scsi/ufs/ufshcd.c static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) hba 6020 drivers/scsi/ufs/ufshcd.c for_each_set_bit(tag, &bitmap, hba->nutrs) { hba 6021 drivers/scsi/ufs/ufshcd.c lrbp = &hba->lrb[tag]; hba 6041 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba; hba 6051 drivers/scsi/ufs/ufshcd.c hba = shost_priv(host); hba 6053 drivers/scsi/ufs/ufshcd.c lrbp = &hba->lrb[tag]; hba 6054 drivers/scsi/ufs/ufshcd.c if (!ufshcd_valid_tag(hba, tag)) { hba 6055 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 6071 drivers/scsi/ufs/ufshcd.c ufshcd_hold(hba, false); hba 6072 drivers/scsi/ufs/ufshcd.c reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); hba 6074 drivers/scsi/ufs/ufshcd.c if (!(test_bit(tag, &hba->outstanding_reqs))) { hba 6075 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 6077 drivers/scsi/ufs/ufshcd.c __func__, tag, hba->outstanding_reqs, reg); hba 6082 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 6088 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); hba 6097 drivers/scsi/ufs/ufshcd.c scsi_print_command(hba->lrb[tag].cmd); hba 6098 drivers/scsi/ufs/ufshcd.c if (!hba->req_abort_count) { hba 6099 drivers/scsi/ufs/ufshcd.c ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0); hba 6100 drivers/scsi/ufs/ufshcd.c ufshcd_print_host_regs(hba); hba 6101 drivers/scsi/ufs/ufshcd.c ufshcd_print_host_state(hba); hba 6102 drivers/scsi/ufs/ufshcd.c ufshcd_print_pwr_info(hba); hba 6103 drivers/scsi/ufs/ufshcd.c ufshcd_print_trs(hba, 1 << tag, true); hba 6105 drivers/scsi/ufs/ufshcd.c ufshcd_print_trs(hba, 1 << tag, false); hba 6107 drivers/scsi/ufs/ufshcd.c hba->req_abort_count++; hba 6116 drivers/scsi/ufs/ufshcd.c err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, hba 6120 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n", hba 6128 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", hba 6130 drivers/scsi/ufs/ufshcd.c reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); hba 6137 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", hba 6141 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 6155 drivers/scsi/ufs/ufshcd.c err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, hba 6160 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", hba 6166 drivers/scsi/ufs/ufshcd.c err = ufshcd_clear_cmd(hba, tag); hba 6168 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", hba 6176 drivers/scsi/ufs/ufshcd.c ufshcd_outstanding_req_clear(hba, tag); hba 6177 drivers/scsi/ufs/ufshcd.c hba->lrb[tag].cmd = NULL; hba 6180 drivers/scsi/ufs/ufshcd.c clear_bit_unlock(tag, &hba->lrb_in_use); hba 6181 drivers/scsi/ufs/ufshcd.c wake_up(&hba->dev_cmd.tag_wq); hba 6187 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); hba 6188 drivers/scsi/ufs/ufshcd.c ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); hba 6196 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 6210 drivers/scsi/ufs/ufshcd.c static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) hba 6219 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 6220 drivers/scsi/ufs/ufshcd.c ufshcd_hba_stop(hba, false); hba 6221 drivers/scsi/ufs/ufshcd.c hba->silence_err_logs = true; hba 6222 drivers/scsi/ufs/ufshcd.c ufshcd_complete_requests(hba); hba 6223 drivers/scsi/ufs/ufshcd.c hba->silence_err_logs = false; hba 6224 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 6227 drivers/scsi/ufs/ufshcd.c ufshcd_scale_clks(hba, true); hba 6229 drivers/scsi/ufs/ufshcd.c err = ufshcd_hba_enable(hba); hba 6234 drivers/scsi/ufs/ufshcd.c err = ufshcd_probe_hba(hba); hba 6236 drivers/scsi/ufs/ufshcd.c if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) hba 6240 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); hba 6241 drivers/scsi/ufs/ufshcd.c ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err); hba 6254 drivers/scsi/ufs/ufshcd.c static int ufshcd_reset_and_restore(struct ufs_hba *hba) hba 6261 drivers/scsi/ufs/ufshcd.c ufshcd_vops_device_reset(hba); hba 6263 drivers/scsi/ufs/ufshcd.c err = ufshcd_host_reset_and_restore(hba); hba 6279 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba; hba 6281 drivers/scsi/ufs/ufshcd.c hba = shost_priv(cmd->device->host); hba 6283 drivers/scsi/ufs/ufshcd.c ufshcd_hold(hba, false); hba 6291 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 6292 drivers/scsi/ufs/ufshcd.c if (!(work_pending(&hba->eh_work) || hba 6293 drivers/scsi/ufs/ufshcd.c hba->ufshcd_state == UFSHCD_STATE_RESET || hba 6294 drivers/scsi/ufs/ufshcd.c hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED)) hba 6296 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 6297 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, "%s: reset in progress\n", __func__); hba 6298 drivers/scsi/ufs/ufshcd.c flush_work(&hba->eh_work); hba 6301 drivers/scsi/ufs/ufshcd.c hba->ufshcd_state = UFSHCD_STATE_RESET; hba 6302 drivers/scsi/ufs/ufshcd.c ufshcd_set_eh_in_progress(hba); hba 6303 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 6305 drivers/scsi/ufs/ufshcd.c err = ufshcd_reset_and_restore(hba); hba 6307 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 6310 drivers/scsi/ufs/ufshcd.c hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; hba 6313 drivers/scsi/ufs/ufshcd.c hba->ufshcd_state = UFSHCD_STATE_ERROR; hba 6315 drivers/scsi/ufs/ufshcd.c ufshcd_clear_eh_in_progress(hba); hba 6316 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 6318 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 6376 drivers/scsi/ufs/ufshcd.c static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, hba 6381 drivers/scsi/ufs/ufshcd.c if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || hba 6382 drivers/scsi/ufs/ufshcd.c !hba->vreg_info.vccq2) { hba 6383 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 6389 drivers/scsi/ufs/ufshcd.c if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA) hba 6391 drivers/scsi/ufs/ufshcd.c hba->vreg_info.vcc->max_uA, hba 6395 drivers/scsi/ufs/ufshcd.c if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA) hba 6397 drivers/scsi/ufs/ufshcd.c hba->vreg_info.vccq->max_uA, hba 6401 drivers/scsi/ufs/ufshcd.c if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA) hba 6403 drivers/scsi/ufs/ufshcd.c hba->vreg_info.vccq2->max_uA, hba 6410 drivers/scsi/ufs/ufshcd.c static void ufshcd_init_icc_levels(struct ufs_hba *hba) hba 6413 drivers/scsi/ufs/ufshcd.c int buff_len = hba->desc_size.pwr_desc; hba 6420 drivers/scsi/ufs/ufshcd.c ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); hba 6422 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 6428 drivers/scsi/ufs/ufshcd.c hba->init_prefetch_data.icc_level = hba 6429 drivers/scsi/ufs/ufshcd.c ufshcd_find_max_sup_active_icc_level(hba, hba 6431 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, "%s: setting icc_level 0x%x", hba 6432 drivers/scsi/ufs/ufshcd.c __func__, hba->init_prefetch_data.icc_level); hba 6434 drivers/scsi/ufs/ufshcd.c ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, hba 6436 drivers/scsi/ufs/ufshcd.c &hba->init_prefetch_data.icc_level); hba 6439 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 6441 drivers/scsi/ufs/ufshcd.c __func__, hba->init_prefetch_data.icc_level , ret); hba 6473 drivers/scsi/ufs/ufshcd.c static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) hba 6479 drivers/scsi/ufs/ufshcd.c hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, hba 6481 drivers/scsi/ufs/ufshcd.c if (IS_ERR(hba->sdev_ufs_device)) { hba 6482 drivers/scsi/ufs/ufshcd.c ret = PTR_ERR(hba->sdev_ufs_device); hba 6483 drivers/scsi/ufs/ufshcd.c hba->sdev_ufs_device = NULL; hba 6486 drivers/scsi/ufs/ufshcd.c scsi_device_put(hba->sdev_ufs_device); hba 6488 drivers/scsi/ufs/ufshcd.c sdev_rpmb = __scsi_add_device(hba->host, 0, 0, hba 6496 drivers/scsi/ufs/ufshcd.c sdev_boot = __scsi_add_device(hba->host, 0, 0, hba 6499 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__); hba 6505 drivers/scsi/ufs/ufshcd.c scsi_remove_device(hba->sdev_ufs_device); hba 6510 drivers/scsi/ufs/ufshcd.c static int ufs_get_device_desc(struct ufs_hba *hba, hba 6521 drivers/scsi/ufs/ufshcd.c buff_len = max_t(size_t, hba->desc_size.dev_desc, hba 6529 drivers/scsi/ufs/ufshcd.c err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); hba 6531 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", hba 6544 drivers/scsi/ufs/ufshcd.c err = ufshcd_read_string_desc(hba, model_index, hba 6547 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", hba 6569 drivers/scsi/ufs/ufshcd.c static void ufs_fixup_device_setup(struct ufs_hba *hba, hba 6580 drivers/scsi/ufs/ufshcd.c hba->dev_quirks |= f->quirk; hba 6595 drivers/scsi/ufs/ufshcd.c static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba) hba 6600 drivers/scsi/ufs/ufshcd.c ret = ufshcd_dme_peer_get(hba, hba 6612 drivers/scsi/ufs/ufshcd.c ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), hba 6630 drivers/scsi/ufs/ufshcd.c static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) hba 6636 drivers/scsi/ufs/ufshcd.c ret = ufshcd_dme_get(hba, hba 6643 drivers/scsi/ufs/ufshcd.c ret = ufshcd_dme_peer_get(hba, hba 6655 drivers/scsi/ufs/ufshcd.c ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), hba 6672 drivers/scsi/ufs/ufshcd.c static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) hba 6680 drivers/scsi/ufs/ufshcd.c ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), hba 6685 drivers/scsi/ufs/ufshcd.c ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), hba 6692 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d", hba 6699 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d", hba 6704 drivers/scsi/ufs/ufshcd.c ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); hba 6708 drivers/scsi/ufs/ufshcd.c ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), hba 6723 drivers/scsi/ufs/ufshcd.c ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), hba 6731 drivers/scsi/ufs/ufshcd.c static void ufshcd_tune_unipro_params(struct ufs_hba *hba, hba 6734 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { hba 6735 drivers/scsi/ufs/ufshcd.c ufshcd_tune_pa_tactivate(hba); hba 6736 drivers/scsi/ufs/ufshcd.c ufshcd_tune_pa_hibern8time(hba); hba 6739 drivers/scsi/ufs/ufshcd.c if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) hba 6741 drivers/scsi/ufs/ufshcd.c ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); hba 6743 drivers/scsi/ufs/ufshcd.c if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) hba 6744 drivers/scsi/ufs/ufshcd.c ufshcd_quirk_tune_host_pa_tactivate(hba); hba 6746 drivers/scsi/ufs/ufshcd.c ufshcd_vops_apply_dev_quirks(hba, card); hba 6749 drivers/scsi/ufs/ufshcd.c static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) hba 6751 drivers/scsi/ufs/ufshcd.c hba->ufs_stats.hibern8_exit_cnt = 0; hba 6752 drivers/scsi/ufs/ufshcd.c hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); hba 6753 drivers/scsi/ufs/ufshcd.c hba->req_abort_count = 0; hba 6756 drivers/scsi/ufs/ufshcd.c static void ufshcd_init_desc_sizes(struct ufs_hba *hba) hba 6760 drivers/scsi/ufs/ufshcd.c err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0, hba 6761 drivers/scsi/ufs/ufshcd.c &hba->desc_size.dev_desc); hba 6763 drivers/scsi/ufs/ufshcd.c hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; hba 6765 drivers/scsi/ufs/ufshcd.c err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0, hba 6766 drivers/scsi/ufs/ufshcd.c &hba->desc_size.pwr_desc); hba 6768 drivers/scsi/ufs/ufshcd.c hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; hba 6770 drivers/scsi/ufs/ufshcd.c err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0, hba 6771 drivers/scsi/ufs/ufshcd.c &hba->desc_size.interc_desc); hba 6773 drivers/scsi/ufs/ufshcd.c hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; hba 6775 drivers/scsi/ufs/ufshcd.c err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0, hba 6776 drivers/scsi/ufs/ufshcd.c &hba->desc_size.conf_desc); hba 6778 drivers/scsi/ufs/ufshcd.c hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; hba 6780 drivers/scsi/ufs/ufshcd.c err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0, hba 6781 drivers/scsi/ufs/ufshcd.c &hba->desc_size.unit_desc); hba 6783 drivers/scsi/ufs/ufshcd.c hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; hba 6785 drivers/scsi/ufs/ufshcd.c err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0, hba 6786 drivers/scsi/ufs/ufshcd.c &hba->desc_size.geom_desc); hba 6788 drivers/scsi/ufs/ufshcd.c hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; hba 6790 drivers/scsi/ufs/ufshcd.c err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0, hba 6791 drivers/scsi/ufs/ufshcd.c &hba->desc_size.hlth_desc); hba 6793 drivers/scsi/ufs/ufshcd.c hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; hba 6816 drivers/scsi/ufs/ufshcd.c void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk) hba 6822 drivers/scsi/ufs/ufshcd.c hba->dev_ref_clk_freq = hba 6825 drivers/scsi/ufs/ufshcd.c if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) hba 6826 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 6830 drivers/scsi/ufs/ufshcd.c static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba) hba 6834 drivers/scsi/ufs/ufshcd.c u32 freq = hba->dev_ref_clk_freq; hba 6836 drivers/scsi/ufs/ufshcd.c err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, hba 6840 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n", hba 6848 drivers/scsi/ufs/ufshcd.c err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, hba 6852 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n", hba 6857 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n", hba 6870 drivers/scsi/ufs/ufshcd.c static int ufshcd_probe_hba(struct ufs_hba *hba) hba 6876 drivers/scsi/ufs/ufshcd.c ret = ufshcd_link_startup(hba); hba 6881 drivers/scsi/ufs/ufshcd.c hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; hba 6882 drivers/scsi/ufs/ufshcd.c hba->is_urgent_bkops_lvl_checked = false; hba 6885 drivers/scsi/ufs/ufshcd.c ufshcd_clear_dbg_ufs_stats(hba); hba 6888 drivers/scsi/ufs/ufshcd.c ufshcd_set_link_active(hba); hba 6890 drivers/scsi/ufs/ufshcd.c ret = ufshcd_verify_dev_init(hba); hba 6894 drivers/scsi/ufs/ufshcd.c ret = ufshcd_complete_dev_init(hba); hba 6899 drivers/scsi/ufs/ufshcd.c ufshcd_init_desc_sizes(hba); hba 6901 drivers/scsi/ufs/ufshcd.c ret = ufs_get_device_desc(hba, &card); hba 6903 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", hba 6908 drivers/scsi/ufs/ufshcd.c ufs_fixup_device_setup(hba, &card); hba 6909 drivers/scsi/ufs/ufshcd.c ufshcd_tune_unipro_params(hba, &card); hba 6913 drivers/scsi/ufs/ufshcd.c ufshcd_set_ufs_dev_active(hba); hba 6914 drivers/scsi/ufs/ufshcd.c ufshcd_force_reset_auto_bkops(hba); hba 6915 drivers/scsi/ufs/ufshcd.c hba->wlun_dev_clr_ua = true; hba 6917 drivers/scsi/ufs/ufshcd.c if (ufshcd_get_max_pwr_mode(hba)) { hba 6918 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 6926 drivers/scsi/ufs/ufshcd.c if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) hba 6927 drivers/scsi/ufs/ufshcd.c ufshcd_set_dev_ref_clk(hba); hba 6928 drivers/scsi/ufs/ufshcd.c ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); hba 6930 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", hba 6937 drivers/scsi/ufs/ufshcd.c hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; hba 6940 drivers/scsi/ufs/ufshcd.c ufshcd_auto_hibern8_enable(hba); hba 6946 drivers/scsi/ufs/ufshcd.c if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { hba 6950 drivers/scsi/ufs/ufshcd.c memset(&hba->dev_info, 0, sizeof(hba->dev_info)); hba 6951 drivers/scsi/ufs/ufshcd.c if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, hba 6953 drivers/scsi/ufs/ufshcd.c hba->dev_info.f_power_on_wp_en = flag; hba 6955 drivers/scsi/ufs/ufshcd.c if (!hba->is_init_prefetch) hba 6956 drivers/scsi/ufs/ufshcd.c ufshcd_init_icc_levels(hba); hba 6959 drivers/scsi/ufs/ufshcd.c ret = ufshcd_scsi_add_wlus(hba); hba 6964 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_clkscaling_supported(hba)) { hba 6965 drivers/scsi/ufs/ufshcd.c memcpy(&hba->clk_scaling.saved_pwr_info.info, hba 6966 drivers/scsi/ufs/ufshcd.c &hba->pwr_info, hba 6968 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.saved_pwr_info.is_valid = true; hba 6969 drivers/scsi/ufs/ufshcd.c if (!hba->devfreq) { hba 6970 drivers/scsi/ufs/ufshcd.c ret = ufshcd_devfreq_init(hba); hba 6974 drivers/scsi/ufs/ufshcd.c hba->clk_scaling.is_allowed = true; hba 6977 drivers/scsi/ufs/ufshcd.c ufs_bsg_probe(hba); hba 6979 drivers/scsi/ufs/ufshcd.c scsi_scan_host(hba->host); hba 6980 drivers/scsi/ufs/ufshcd.c pm_runtime_put_sync(hba->dev); hba 6983 drivers/scsi/ufs/ufshcd.c if (!hba->is_init_prefetch) hba 6984 drivers/scsi/ufs/ufshcd.c hba->is_init_prefetch = true; hba 6991 drivers/scsi/ufs/ufshcd.c if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { hba 6992 drivers/scsi/ufs/ufshcd.c pm_runtime_put_sync(hba->dev); hba 6993 drivers/scsi/ufs/ufshcd.c ufshcd_exit_clk_scaling(hba); hba 6994 drivers/scsi/ufs/ufshcd.c ufshcd_hba_exit(hba); hba 6997 drivers/scsi/ufs/ufshcd.c trace_ufshcd_init(dev_name(hba->dev), ret, hba 6999 drivers/scsi/ufs/ufshcd.c hba->curr_dev_pwr_mode, hba->uic_link_state); hba 7010 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = (struct ufs_hba *)data; hba 7012 drivers/scsi/ufs/ufshcd.c ufshcd_probe_hba(hba); hba 7019 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba; hba 7027 drivers/scsi/ufs/ufshcd.c hba = shost_priv(host); hba 7028 drivers/scsi/ufs/ufshcd.c if (!hba) hba 7033 drivers/scsi/ufs/ufshcd.c for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) { hba 7034 drivers/scsi/ufs/ufshcd.c if (hba->lrb[index].cmd == scmd) { hba 7106 drivers/scsi/ufs/ufshcd.c static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, hba 7109 drivers/scsi/ufs/ufshcd.c return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); hba 7112 drivers/scsi/ufs/ufshcd.c static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, hba 7118 drivers/scsi/ufs/ufshcd.c return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); hba 7196 drivers/scsi/ufs/ufshcd.c static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) hba 7199 drivers/scsi/ufs/ufshcd.c struct device *dev = hba->dev; hba 7200 drivers/scsi/ufs/ufshcd.c struct ufs_vreg_info *info = &hba->vreg_info; hba 7223 drivers/scsi/ufs/ufshcd.c static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) hba 7225 drivers/scsi/ufs/ufshcd.c struct ufs_vreg_info *info = &hba->vreg_info; hba 7227 drivers/scsi/ufs/ufshcd.c return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); hba 7247 drivers/scsi/ufs/ufshcd.c static int ufshcd_init_vreg(struct ufs_hba *hba) hba 7250 drivers/scsi/ufs/ufshcd.c struct device *dev = hba->dev; hba 7251 drivers/scsi/ufs/ufshcd.c struct ufs_vreg_info *info = &hba->vreg_info; hba 7266 drivers/scsi/ufs/ufshcd.c static int ufshcd_init_hba_vreg(struct ufs_hba *hba) hba 7268 drivers/scsi/ufs/ufshcd.c struct ufs_vreg_info *info = &hba->vreg_info; hba 7271 drivers/scsi/ufs/ufshcd.c return ufshcd_get_vreg(hba->dev, info->vdd_hba); hba 7276 drivers/scsi/ufs/ufshcd.c static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, hba 7281 drivers/scsi/ufs/ufshcd.c struct list_head *head = &hba->clk_list_head; hba 7295 drivers/scsi/ufs/ufshcd.c ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); hba 7309 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", hba 7317 drivers/scsi/ufs/ufshcd.c dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, hba 7328 drivers/scsi/ufs/ufshcd.c ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); hba 7340 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 7341 drivers/scsi/ufs/ufshcd.c hba->clk_gating.state = CLKS_ON; hba 7342 drivers/scsi/ufs/ufshcd.c trace_ufshcd_clk_gating(dev_name(hba->dev), hba 7343 drivers/scsi/ufs/ufshcd.c hba->clk_gating.state); hba 7344 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 7348 drivers/scsi/ufs/ufshcd.c trace_ufshcd_profile_clk_gating(dev_name(hba->dev), hba 7354 drivers/scsi/ufs/ufshcd.c static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) hba 7356 drivers/scsi/ufs/ufshcd.c return __ufshcd_setup_clocks(hba, on, false); hba 7359 drivers/scsi/ufs/ufshcd.c static int ufshcd_init_clocks(struct ufs_hba *hba) hba 7363 drivers/scsi/ufs/ufshcd.c struct device *dev = hba->dev; hba 7364 drivers/scsi/ufs/ufshcd.c struct list_head *head = &hba->clk_list_head; hba 7387 drivers/scsi/ufs/ufshcd.c ufshcd_parse_dev_ref_clk_freq(hba, clki->clk); hba 7392 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", hba 7406 drivers/scsi/ufs/ufshcd.c static int ufshcd_variant_hba_init(struct ufs_hba *hba) hba 7410 drivers/scsi/ufs/ufshcd.c if (!hba->vops) hba 7413 drivers/scsi/ufs/ufshcd.c err = ufshcd_vops_init(hba); hba 7417 drivers/scsi/ufs/ufshcd.c err = ufshcd_vops_setup_regulators(hba, true); hba 7424 drivers/scsi/ufs/ufshcd.c ufshcd_vops_exit(hba); hba 7427 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s: variant %s init failed err %d\n", hba 7428 drivers/scsi/ufs/ufshcd.c __func__, ufshcd_get_var_name(hba), err); hba 7432 drivers/scsi/ufs/ufshcd.c static void ufshcd_variant_hba_exit(struct ufs_hba *hba) hba 7434 drivers/scsi/ufs/ufshcd.c if (!hba->vops) hba 7437 drivers/scsi/ufs/ufshcd.c ufshcd_vops_setup_regulators(hba, false); hba 7439 drivers/scsi/ufs/ufshcd.c ufshcd_vops_exit(hba); hba 7442 drivers/scsi/ufs/ufshcd.c static int ufshcd_hba_init(struct ufs_hba *hba) hba 7453 drivers/scsi/ufs/ufshcd.c err = ufshcd_init_hba_vreg(hba); hba 7457 drivers/scsi/ufs/ufshcd.c err = ufshcd_setup_hba_vreg(hba, true); hba 7461 drivers/scsi/ufs/ufshcd.c err = ufshcd_init_clocks(hba); hba 7465 drivers/scsi/ufs/ufshcd.c err = ufshcd_setup_clocks(hba, true); hba 7469 drivers/scsi/ufs/ufshcd.c err = ufshcd_init_vreg(hba); hba 7473 drivers/scsi/ufs/ufshcd.c err = ufshcd_setup_vreg(hba, true); hba 7477 drivers/scsi/ufs/ufshcd.c err = ufshcd_variant_hba_init(hba); hba 7481 drivers/scsi/ufs/ufshcd.c hba->is_powered = true; hba 7485 drivers/scsi/ufs/ufshcd.c ufshcd_setup_vreg(hba, false); hba 7487 drivers/scsi/ufs/ufshcd.c ufshcd_setup_clocks(hba, false); hba 7489 drivers/scsi/ufs/ufshcd.c ufshcd_setup_hba_vreg(hba, false); hba 7494 drivers/scsi/ufs/ufshcd.c static void ufshcd_hba_exit(struct ufs_hba *hba) hba 7496 drivers/scsi/ufs/ufshcd.c if (hba->is_powered) { hba 7497 drivers/scsi/ufs/ufshcd.c ufshcd_variant_hba_exit(hba); hba 7498 drivers/scsi/ufs/ufshcd.c ufshcd_setup_vreg(hba, false); hba 7499 drivers/scsi/ufs/ufshcd.c ufshcd_suspend_clkscaling(hba); hba 7500 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_clkscaling_supported(hba)) hba 7501 drivers/scsi/ufs/ufshcd.c if (hba->devfreq) hba 7502 drivers/scsi/ufs/ufshcd.c ufshcd_suspend_clkscaling(hba); hba 7503 drivers/scsi/ufs/ufshcd.c ufshcd_setup_clocks(hba, false); hba 7504 drivers/scsi/ufs/ufshcd.c ufshcd_setup_hba_vreg(hba, false); hba 7505 drivers/scsi/ufs/ufshcd.c hba->is_powered = false; hba 7510 drivers/scsi/ufs/ufshcd.c ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp) hba 7547 drivers/scsi/ufs/ufshcd.c static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, hba 7556 drivers/scsi/ufs/ufshcd.c spin_lock_irqsave(hba->host->host_lock, flags); hba 7557 drivers/scsi/ufs/ufshcd.c sdp = hba->sdev_ufs_device; hba 7567 drivers/scsi/ufs/ufshcd.c spin_unlock_irqrestore(hba->host->host_lock, flags); hba 7578 drivers/scsi/ufs/ufshcd.c hba->host->eh_noresume = 1; hba 7579 drivers/scsi/ufs/ufshcd.c if (hba->wlun_dev_clr_ua) { hba 7580 drivers/scsi/ufs/ufshcd.c ret = ufshcd_send_request_sense(hba, sdp); hba 7584 drivers/scsi/ufs/ufshcd.c hba->wlun_dev_clr_ua = false; hba 7605 drivers/scsi/ufs/ufshcd.c hba->curr_dev_pwr_mode = pwr_mode; hba 7608 drivers/scsi/ufs/ufshcd.c hba->host->eh_noresume = 0; hba 7612 drivers/scsi/ufs/ufshcd.c static int ufshcd_link_state_transition(struct ufs_hba *hba, hba 7618 drivers/scsi/ufs/ufshcd.c if (req_link_state == hba->uic_link_state) hba 7622 drivers/scsi/ufs/ufshcd.c ret = ufshcd_uic_hibern8_enter(hba); hba 7624 drivers/scsi/ufs/ufshcd.c ufshcd_set_link_hibern8(hba); hba 7634 drivers/scsi/ufs/ufshcd.c !hba->auto_bkops_enabled))) { hba 7642 drivers/scsi/ufs/ufshcd.c ret = ufshcd_uic_hibern8_enter(hba); hba 7649 drivers/scsi/ufs/ufshcd.c ufshcd_hba_stop(hba, true); hba 7654 drivers/scsi/ufs/ufshcd.c ufshcd_set_link_off(hba); hba 7661 drivers/scsi/ufs/ufshcd.c static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) hba 7669 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_link_active(hba) && hba 7670 drivers/scsi/ufs/ufshcd.c hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) hba 7685 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && hba 7686 drivers/scsi/ufs/ufshcd.c !hba->dev_info.is_lu_power_on_wp) { hba 7687 drivers/scsi/ufs/ufshcd.c ufshcd_setup_vreg(hba, false); hba 7688 drivers/scsi/ufs/ufshcd.c } else if (!ufshcd_is_ufs_dev_active(hba)) { hba 7689 drivers/scsi/ufs/ufshcd.c ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); hba 7690 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_link_active(hba)) { hba 7691 drivers/scsi/ufs/ufshcd.c ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); hba 7692 drivers/scsi/ufs/ufshcd.c ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); hba 7697 drivers/scsi/ufs/ufshcd.c static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) hba 7701 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && hba 7702 drivers/scsi/ufs/ufshcd.c !hba->dev_info.is_lu_power_on_wp) { hba 7703 drivers/scsi/ufs/ufshcd.c ret = ufshcd_setup_vreg(hba, true); hba 7704 drivers/scsi/ufs/ufshcd.c } else if (!ufshcd_is_ufs_dev_active(hba)) { hba 7705 drivers/scsi/ufs/ufshcd.c if (!ret && !ufshcd_is_link_active(hba)) { hba 7706 drivers/scsi/ufs/ufshcd.c ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); hba 7709 drivers/scsi/ufs/ufshcd.c ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); hba 7713 drivers/scsi/ufs/ufshcd.c ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); hba 7718 drivers/scsi/ufs/ufshcd.c ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); hba 7720 drivers/scsi/ufs/ufshcd.c ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); hba 7725 drivers/scsi/ufs/ufshcd.c static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) hba 7727 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_link_off(hba)) hba 7728 drivers/scsi/ufs/ufshcd.c ufshcd_setup_hba_vreg(hba, false); hba 7731 drivers/scsi/ufs/ufshcd.c static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) hba 7733 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_link_off(hba)) hba 7734 drivers/scsi/ufs/ufshcd.c ufshcd_setup_hba_vreg(hba, true); hba 7753 drivers/scsi/ufs/ufshcd.c static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) hba 7760 drivers/scsi/ufs/ufshcd.c hba->pm_op_in_progress = 1; hba 7763 drivers/scsi/ufs/ufshcd.c hba->rpm_lvl : hba->spm_lvl; hba 7775 drivers/scsi/ufs/ufshcd.c ufshcd_hold(hba, false); hba 7776 drivers/scsi/ufs/ufshcd.c hba->clk_gating.is_suspended = true; hba 7778 drivers/scsi/ufs/ufshcd.c if (hba->clk_scaling.is_allowed) { hba 7779 drivers/scsi/ufs/ufshcd.c cancel_work_sync(&hba->clk_scaling.suspend_work); hba 7780 drivers/scsi/ufs/ufshcd.c cancel_work_sync(&hba->clk_scaling.resume_work); hba 7781 drivers/scsi/ufs/ufshcd.c ufshcd_suspend_clkscaling(hba); hba 7789 drivers/scsi/ufs/ufshcd.c if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && hba 7790 drivers/scsi/ufs/ufshcd.c (req_link_state == hba->uic_link_state)) hba 7794 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { hba 7800 drivers/scsi/ufs/ufshcd.c if (ufshcd_can_autobkops_during_suspend(hba)) { hba 7806 drivers/scsi/ufs/ufshcd.c ret = ufshcd_urgent_bkops(hba); hba 7811 drivers/scsi/ufs/ufshcd.c ufshcd_disable_auto_bkops(hba); hba 7815 drivers/scsi/ufs/ufshcd.c if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && hba 7816 drivers/scsi/ufs/ufshcd.c ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || hba 7819 drivers/scsi/ufs/ufshcd.c ufshcd_disable_auto_bkops(hba); hba 7820 drivers/scsi/ufs/ufshcd.c ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); hba 7825 drivers/scsi/ufs/ufshcd.c ret = ufshcd_link_state_transition(hba, req_link_state, 1); hba 7829 drivers/scsi/ufs/ufshcd.c ufshcd_vreg_set_lpm(hba); hba 7837 drivers/scsi/ufs/ufshcd.c ret = ufshcd_vops_suspend(hba, pm_op); hba 7841 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_link_active(hba)) hba 7842 drivers/scsi/ufs/ufshcd.c ufshcd_setup_clocks(hba, false); hba 7845 drivers/scsi/ufs/ufshcd.c __ufshcd_setup_clocks(hba, false, true); hba 7847 drivers/scsi/ufs/ufshcd.c hba->clk_gating.state = CLKS_OFF; hba 7848 drivers/scsi/ufs/ufshcd.c trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); hba 7853 drivers/scsi/ufs/ufshcd.c ufshcd_disable_irq(hba); hba 7855 drivers/scsi/ufs/ufshcd.c ufshcd_hba_vreg_set_lpm(hba); hba 7859 drivers/scsi/ufs/ufshcd.c if (hba->clk_scaling.is_allowed) hba 7860 drivers/scsi/ufs/ufshcd.c ufshcd_resume_clkscaling(hba); hba 7861 drivers/scsi/ufs/ufshcd.c ufshcd_vreg_set_hpm(hba); hba 7862 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) hba 7863 drivers/scsi/ufs/ufshcd.c ufshcd_set_link_active(hba); hba 7864 drivers/scsi/ufs/ufshcd.c else if (ufshcd_is_link_off(hba)) hba 7865 drivers/scsi/ufs/ufshcd.c ufshcd_host_reset_and_restore(hba); hba 7867 drivers/scsi/ufs/ufshcd.c if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) hba 7868 drivers/scsi/ufs/ufshcd.c ufshcd_disable_auto_bkops(hba); hba 7870 drivers/scsi/ufs/ufshcd.c if (hba->clk_scaling.is_allowed) hba 7871 drivers/scsi/ufs/ufshcd.c ufshcd_resume_clkscaling(hba); hba 7872 drivers/scsi/ufs/ufshcd.c hba->clk_gating.is_suspended = false; hba 7873 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 7875 drivers/scsi/ufs/ufshcd.c hba->pm_op_in_progress = 0; hba 7877 drivers/scsi/ufs/ufshcd.c ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret); hba 7891 drivers/scsi/ufs/ufshcd.c static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) hba 7896 drivers/scsi/ufs/ufshcd.c hba->pm_op_in_progress = 1; hba 7897 drivers/scsi/ufs/ufshcd.c old_link_state = hba->uic_link_state; hba 7899 drivers/scsi/ufs/ufshcd.c ufshcd_hba_vreg_set_hpm(hba); hba 7901 drivers/scsi/ufs/ufshcd.c ret = ufshcd_setup_clocks(hba, true); hba 7906 drivers/scsi/ufs/ufshcd.c ret = ufshcd_enable_irq(hba); hba 7910 drivers/scsi/ufs/ufshcd.c ret = ufshcd_vreg_set_hpm(hba); hba 7919 drivers/scsi/ufs/ufshcd.c ret = ufshcd_vops_resume(hba, pm_op); hba 7923 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_link_hibern8(hba)) { hba 7924 drivers/scsi/ufs/ufshcd.c ret = ufshcd_uic_hibern8_exit(hba); hba 7926 drivers/scsi/ufs/ufshcd.c ufshcd_set_link_active(hba); hba 7929 drivers/scsi/ufs/ufshcd.c } else if (ufshcd_is_link_off(hba)) { hba 7930 drivers/scsi/ufs/ufshcd.c ret = ufshcd_host_reset_and_restore(hba); hba 7935 drivers/scsi/ufs/ufshcd.c if (ret || !ufshcd_is_link_active(hba)) hba 7939 drivers/scsi/ufs/ufshcd.c if (!ufshcd_is_ufs_dev_active(hba)) { hba 7940 drivers/scsi/ufs/ufshcd.c ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); hba 7945 drivers/scsi/ufs/ufshcd.c if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) hba 7946 drivers/scsi/ufs/ufshcd.c ufshcd_enable_auto_bkops(hba); hba 7952 drivers/scsi/ufs/ufshcd.c ufshcd_urgent_bkops(hba); hba 7954 drivers/scsi/ufs/ufshcd.c hba->clk_gating.is_suspended = false; hba 7956 drivers/scsi/ufs/ufshcd.c if (hba->clk_scaling.is_allowed) hba 7957 drivers/scsi/ufs/ufshcd.c ufshcd_resume_clkscaling(hba); hba 7960 drivers/scsi/ufs/ufshcd.c ufshcd_auto_hibern8_enable(hba); hba 7963 drivers/scsi/ufs/ufshcd.c ufshcd_release(hba); hba 7968 drivers/scsi/ufs/ufshcd.c ufshcd_link_state_transition(hba, old_link_state, 0); hba 7970 drivers/scsi/ufs/ufshcd.c ufshcd_vops_suspend(hba, pm_op); hba 7972 drivers/scsi/ufs/ufshcd.c ufshcd_vreg_set_lpm(hba); hba 7974 drivers/scsi/ufs/ufshcd.c ufshcd_disable_irq(hba); hba 7975 drivers/scsi/ufs/ufshcd.c if (hba->clk_scaling.is_allowed) hba 7976 drivers/scsi/ufs/ufshcd.c ufshcd_suspend_clkscaling(hba); hba 7977 drivers/scsi/ufs/ufshcd.c ufshcd_setup_clocks(hba, false); hba 7979 drivers/scsi/ufs/ufshcd.c hba->pm_op_in_progress = 0; hba 7981 drivers/scsi/ufs/ufshcd.c ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret); hba 7993 drivers/scsi/ufs/ufshcd.c int ufshcd_system_suspend(struct ufs_hba *hba) hba 7998 drivers/scsi/ufs/ufshcd.c if (!hba || !hba->is_powered) hba 8001 drivers/scsi/ufs/ufshcd.c if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == hba 8002 drivers/scsi/ufs/ufshcd.c hba->curr_dev_pwr_mode) && hba 8003 drivers/scsi/ufs/ufshcd.c (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) == hba 8004 drivers/scsi/ufs/ufshcd.c hba->uic_link_state)) hba 8007 drivers/scsi/ufs/ufshcd.c if (pm_runtime_suspended(hba->dev)) { hba 8016 drivers/scsi/ufs/ufshcd.c ret = ufshcd_runtime_resume(hba); hba 8021 drivers/scsi/ufs/ufshcd.c ret = ufshcd_suspend(hba, UFS_SYSTEM_PM); hba 8023 drivers/scsi/ufs/ufshcd.c trace_ufshcd_system_suspend(dev_name(hba->dev), ret, hba 8025 drivers/scsi/ufs/ufshcd.c hba->curr_dev_pwr_mode, hba->uic_link_state); hba 8027 drivers/scsi/ufs/ufshcd.c hba->is_sys_suspended = true; hba 8039 drivers/scsi/ufs/ufshcd.c int ufshcd_system_resume(struct ufs_hba *hba) hba 8044 drivers/scsi/ufs/ufshcd.c if (!hba) hba 8047 drivers/scsi/ufs/ufshcd.c if (!hba->is_powered || pm_runtime_suspended(hba->dev)) hba 8054 drivers/scsi/ufs/ufshcd.c ret = ufshcd_resume(hba, UFS_SYSTEM_PM); hba 8056 drivers/scsi/ufs/ufshcd.c trace_ufshcd_system_resume(dev_name(hba->dev), ret, hba 8058 drivers/scsi/ufs/ufshcd.c hba->curr_dev_pwr_mode, hba->uic_link_state); hba 8060 drivers/scsi/ufs/ufshcd.c hba->is_sys_suspended = false; hba 8073 drivers/scsi/ufs/ufshcd.c int ufshcd_runtime_suspend(struct ufs_hba *hba) hba 8078 drivers/scsi/ufs/ufshcd.c if (!hba) hba 8081 drivers/scsi/ufs/ufshcd.c if (!hba->is_powered) hba 8084 drivers/scsi/ufs/ufshcd.c ret = ufshcd_suspend(hba, UFS_RUNTIME_PM); hba 8086 drivers/scsi/ufs/ufshcd.c trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, hba 8088 drivers/scsi/ufs/ufshcd.c hba->curr_dev_pwr_mode, hba->uic_link_state); hba 8114 drivers/scsi/ufs/ufshcd.c int ufshcd_runtime_resume(struct ufs_hba *hba) hba 8119 drivers/scsi/ufs/ufshcd.c if (!hba) hba 8122 drivers/scsi/ufs/ufshcd.c if (!hba->is_powered) hba 8125 drivers/scsi/ufs/ufshcd.c ret = ufshcd_resume(hba, UFS_RUNTIME_PM); hba 8127 drivers/scsi/ufs/ufshcd.c trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, hba 8129 drivers/scsi/ufs/ufshcd.c hba->curr_dev_pwr_mode, hba->uic_link_state); hba 8134 drivers/scsi/ufs/ufshcd.c int ufshcd_runtime_idle(struct ufs_hba *hba) hba 8148 drivers/scsi/ufs/ufshcd.c int ufshcd_shutdown(struct ufs_hba *hba) hba 8152 drivers/scsi/ufs/ufshcd.c if (!hba->is_powered) hba 8155 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) hba 8158 drivers/scsi/ufs/ufshcd.c if (pm_runtime_suspended(hba->dev)) { hba 8159 drivers/scsi/ufs/ufshcd.c ret = ufshcd_runtime_resume(hba); hba 8164 drivers/scsi/ufs/ufshcd.c ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); hba 8167 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "%s failed, err %d\n", __func__, ret); hba 8178 drivers/scsi/ufs/ufshcd.c void ufshcd_remove(struct ufs_hba *hba) hba 8180 drivers/scsi/ufs/ufshcd.c ufs_bsg_remove(hba); hba 8181 drivers/scsi/ufs/ufshcd.c ufs_sysfs_remove_nodes(hba->dev); hba 8182 drivers/scsi/ufs/ufshcd.c scsi_remove_host(hba->host); hba 8184 drivers/scsi/ufs/ufshcd.c ufshcd_disable_intr(hba, hba->intr_mask); hba 8185 drivers/scsi/ufs/ufshcd.c ufshcd_hba_stop(hba, true); hba 8187 drivers/scsi/ufs/ufshcd.c ufshcd_exit_clk_scaling(hba); hba 8188 drivers/scsi/ufs/ufshcd.c ufshcd_exit_clk_gating(hba); hba 8189 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_clkscaling_supported(hba)) hba 8190 drivers/scsi/ufs/ufshcd.c device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); hba 8191 drivers/scsi/ufs/ufshcd.c ufshcd_hba_exit(hba); hba 8199 drivers/scsi/ufs/ufshcd.c void ufshcd_dealloc_host(struct ufs_hba *hba) hba 8201 drivers/scsi/ufs/ufshcd.c scsi_host_put(hba->host); hba 8212 drivers/scsi/ufs/ufshcd.c static int ufshcd_set_dma_mask(struct ufs_hba *hba) hba 8214 drivers/scsi/ufs/ufshcd.c if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { hba 8215 drivers/scsi/ufs/ufshcd.c if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) hba 8218 drivers/scsi/ufs/ufshcd.c return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); hba 8230 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba; hba 8247 drivers/scsi/ufs/ufshcd.c hba = shost_priv(host); hba 8248 drivers/scsi/ufs/ufshcd.c hba->host = host; hba 8249 drivers/scsi/ufs/ufshcd.c hba->dev = dev; hba 8250 drivers/scsi/ufs/ufshcd.c *hba_handle = hba; hba 8251 drivers/scsi/ufs/ufshcd.c hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; hba 8253 drivers/scsi/ufs/ufshcd.c INIT_LIST_HEAD(&hba->clk_list_head); hba 8267 drivers/scsi/ufs/ufshcd.c int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba 8270 drivers/scsi/ufs/ufshcd.c struct Scsi_Host *host = hba->host; hba 8271 drivers/scsi/ufs/ufshcd.c struct device *dev = hba->dev; hba 8274 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, hba 8280 drivers/scsi/ufs/ufshcd.c hba->mmio_base = mmio_base; hba 8281 drivers/scsi/ufs/ufshcd.c hba->irq = irq; hba 8283 drivers/scsi/ufs/ufshcd.c err = ufshcd_hba_init(hba); hba 8288 drivers/scsi/ufs/ufshcd.c ufshcd_hba_capabilities(hba); hba 8291 drivers/scsi/ufs/ufshcd.c hba->ufs_version = ufshcd_get_ufs_version(hba); hba 8293 drivers/scsi/ufs/ufshcd.c if ((hba->ufs_version != UFSHCI_VERSION_10) && hba 8294 drivers/scsi/ufs/ufshcd.c (hba->ufs_version != UFSHCI_VERSION_11) && hba 8295 drivers/scsi/ufs/ufshcd.c (hba->ufs_version != UFSHCI_VERSION_20) && hba 8296 drivers/scsi/ufs/ufshcd.c (hba->ufs_version != UFSHCI_VERSION_21)) hba 8297 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "invalid UFS version 0x%x\n", hba 8298 drivers/scsi/ufs/ufshcd.c hba->ufs_version); hba 8301 drivers/scsi/ufs/ufshcd.c hba->intr_mask = ufshcd_get_intr_mask(hba); hba 8303 drivers/scsi/ufs/ufshcd.c err = ufshcd_set_dma_mask(hba); hba 8305 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "set dma mask failed\n"); hba 8310 drivers/scsi/ufs/ufshcd.c err = ufshcd_memory_alloc(hba); hba 8312 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "Memory allocation failed\n"); hba 8317 drivers/scsi/ufs/ufshcd.c ufshcd_host_memory_configure(hba); hba 8319 drivers/scsi/ufs/ufshcd.c host->can_queue = hba->nutrs; hba 8320 drivers/scsi/ufs/ufshcd.c host->cmd_per_lun = hba->nutrs; hba 8327 drivers/scsi/ufs/ufshcd.c hba->max_pwr_info.is_valid = false; hba 8330 drivers/scsi/ufs/ufshcd.c init_waitqueue_head(&hba->tm_wq); hba 8331 drivers/scsi/ufs/ufshcd.c init_waitqueue_head(&hba->tm_tag_wq); hba 8334 drivers/scsi/ufs/ufshcd.c INIT_WORK(&hba->eh_work, ufshcd_err_handler); hba 8335 drivers/scsi/ufs/ufshcd.c INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); hba 8338 drivers/scsi/ufs/ufshcd.c mutex_init(&hba->uic_cmd_mutex); hba 8341 drivers/scsi/ufs/ufshcd.c mutex_init(&hba->dev_cmd.lock); hba 8343 drivers/scsi/ufs/ufshcd.c init_rwsem(&hba->clk_scaling_lock); hba 8346 drivers/scsi/ufs/ufshcd.c init_waitqueue_head(&hba->dev_cmd.tag_wq); hba 8348 drivers/scsi/ufs/ufshcd.c ufshcd_init_clk_gating(hba); hba 8350 drivers/scsi/ufs/ufshcd.c ufshcd_init_clk_scaling(hba); hba 8357 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), hba 8359 drivers/scsi/ufs/ufshcd.c ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); hba 8367 drivers/scsi/ufs/ufshcd.c err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); hba 8369 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "request irq failed\n"); hba 8372 drivers/scsi/ufs/ufshcd.c hba->is_irq_enabled = true; hba 8375 drivers/scsi/ufs/ufshcd.c err = scsi_add_host(host, hba->dev); hba 8377 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "scsi_add_host failed\n"); hba 8382 drivers/scsi/ufs/ufshcd.c ufshcd_vops_device_reset(hba); hba 8385 drivers/scsi/ufs/ufshcd.c err = ufshcd_hba_enable(hba); hba 8387 drivers/scsi/ufs/ufshcd.c dev_err(hba->dev, "Host controller enable failed\n"); hba 8388 drivers/scsi/ufs/ufshcd.c ufshcd_print_host_regs(hba); hba 8389 drivers/scsi/ufs/ufshcd.c ufshcd_print_host_state(hba); hba 8398 drivers/scsi/ufs/ufshcd.c hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( hba 8401 drivers/scsi/ufs/ufshcd.c hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( hba 8406 drivers/scsi/ufs/ufshcd.c if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) { hba 8407 drivers/scsi/ufs/ufshcd.c hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) | hba 8413 drivers/scsi/ufs/ufshcd.c atomic_set(&hba->scsi_block_reqs_cnt, 0); hba 8420 drivers/scsi/ufs/ufshcd.c ufshcd_set_ufs_dev_active(hba); hba 8422 drivers/scsi/ufs/ufshcd.c async_schedule(ufshcd_async_scan, hba); hba 8423 drivers/scsi/ufs/ufshcd.c ufs_sysfs_add_nodes(hba->dev); hba 8428 drivers/scsi/ufs/ufshcd.c scsi_remove_host(hba->host); hba 8430 drivers/scsi/ufs/ufshcd.c ufshcd_exit_clk_scaling(hba); hba 8431 drivers/scsi/ufs/ufshcd.c ufshcd_exit_clk_gating(hba); hba 8433 drivers/scsi/ufs/ufshcd.c hba->is_irq_enabled = false; hba 8434 drivers/scsi/ufs/ufshcd.c ufshcd_hba_exit(hba); hba 121 drivers/scsi/ufs/ufshcd.h #define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE) hba 122 drivers/scsi/ufs/ufshcd.h #define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \ hba 124 drivers/scsi/ufs/ufshcd.h #define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \ hba 126 drivers/scsi/ufs/ufshcd.h #define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE) hba 127 drivers/scsi/ufs/ufshcd.h #define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \ hba 129 drivers/scsi/ufs/ufshcd.h #define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \ hba 329 drivers/scsi/ufs/ufshcd.h void (*dbg_register_dump)(struct ufs_hba *hba); hba 331 drivers/scsi/ufs/ufshcd.h void (*device_reset)(struct ufs_hba *hba); hba 739 drivers/scsi/ufs/ufshcd.h static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba) hba 741 drivers/scsi/ufs/ufshcd.h return hba->caps & UFSHCD_CAP_CLK_GATING; hba 743 drivers/scsi/ufs/ufshcd.h static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba) hba 745 drivers/scsi/ufs/ufshcd.h return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; hba 747 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba) hba 749 drivers/scsi/ufs/ufshcd.h return hba->caps & UFSHCD_CAP_CLK_SCALING; hba 751 drivers/scsi/ufs/ufshcd.h static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba) hba 753 drivers/scsi/ufs/ufshcd.h return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND; hba 756 drivers/scsi/ufs/ufshcd.h static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba) hba 760 drivers/scsi/ufs/ufshcd.h if ((hba->caps & UFSHCD_CAP_INTR_AGGR) && hba 761 drivers/scsi/ufs/ufshcd.h !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR)) hba 770 drivers/scsi/ufs/ufshcd.h static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba) hba 772 drivers/scsi/ufs/ufshcd.h return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT); hba 775 drivers/scsi/ufs/ufshcd.h static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba) hba 777 drivers/scsi/ufs/ufshcd.h return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false; hba 780 drivers/scsi/ufs/ufshcd.h #define ufshcd_writel(hba, val, reg) \ hba 781 drivers/scsi/ufs/ufshcd.h writel((val), (hba)->mmio_base + (reg)) hba 782 drivers/scsi/ufs/ufshcd.h #define ufshcd_readl(hba, reg) \ hba 783 drivers/scsi/ufs/ufshcd.h readl((hba)->mmio_base + (reg)) hba 792 drivers/scsi/ufs/ufshcd.h static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg) hba 796 drivers/scsi/ufs/ufshcd.h tmp = ufshcd_readl(hba, reg); hba 799 drivers/scsi/ufs/ufshcd.h ufshcd_writel(hba, tmp, reg); hba 806 drivers/scsi/ufs/ufshcd.h int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, hba 809 drivers/scsi/ufs/ufshcd.h void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk); hba 822 drivers/scsi/ufs/ufshcd.h static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant) hba 824 drivers/scsi/ufs/ufshcd.h BUG_ON(!hba); hba 825 drivers/scsi/ufs/ufshcd.h hba->priv = variant; hba 832 drivers/scsi/ufs/ufshcd.h static inline void *ufshcd_get_variant(struct ufs_hba *hba) hba 834 drivers/scsi/ufs/ufshcd.h BUG_ON(!hba); hba 835 drivers/scsi/ufs/ufshcd.h return hba->priv; hba 838 drivers/scsi/ufs/ufshcd.h struct ufs_hba *hba) hba 840 drivers/scsi/ufs/ufshcd.h return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND; hba 843 drivers/scsi/ufs/ufshcd.h extern int ufshcd_runtime_suspend(struct ufs_hba *hba); hba 844 drivers/scsi/ufs/ufshcd.h extern int ufshcd_runtime_resume(struct ufs_hba *hba); hba 845 drivers/scsi/ufs/ufshcd.h extern int ufshcd_runtime_idle(struct ufs_hba *hba); hba 846 drivers/scsi/ufs/ufshcd.h extern int ufshcd_system_suspend(struct ufs_hba *hba); hba 847 drivers/scsi/ufs/ufshcd.h extern int ufshcd_system_resume(struct ufs_hba *hba); hba 848 drivers/scsi/ufs/ufshcd.h extern int ufshcd_shutdown(struct ufs_hba *hba); hba 849 drivers/scsi/ufs/ufshcd.h extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, hba 851 drivers/scsi/ufs/ufshcd.h extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, hba 853 drivers/scsi/ufs/ufshcd.h extern int ufshcd_config_pwr_mode(struct ufs_hba *hba, hba 862 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel, hba 865 drivers/scsi/ufs/ufshcd.h return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, hba 869 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel, hba 872 drivers/scsi/ufs/ufshcd.h return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, hba 876 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel, hba 879 drivers/scsi/ufs/ufshcd.h return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, hba 883 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel, hba 886 drivers/scsi/ufs/ufshcd.h return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, hba 890 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_dme_get(struct ufs_hba *hba, hba 893 drivers/scsi/ufs/ufshcd.h return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL); hba 896 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, hba 899 drivers/scsi/ufs/ufshcd.h return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); hba 911 drivers/scsi/ufs/ufshcd.h int ufshcd_query_descriptor_retry(struct ufs_hba *hba, hba 916 drivers/scsi/ufs/ufshcd.h int ufshcd_read_desc_param(struct ufs_hba *hba, hba 922 drivers/scsi/ufs/ufshcd.h int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, hba 924 drivers/scsi/ufs/ufshcd.h int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, hba 927 drivers/scsi/ufs/ufshcd.h void ufshcd_auto_hibern8_enable(struct ufs_hba *hba); hba 931 drivers/scsi/ufs/ufshcd.h int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, hba 934 drivers/scsi/ufs/ufshcd.h int ufshcd_hold(struct ufs_hba *hba, bool async); hba 935 drivers/scsi/ufs/ufshcd.h void ufshcd_release(struct ufs_hba *hba); hba 937 drivers/scsi/ufs/ufshcd.h int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, hba 940 drivers/scsi/ufs/ufshcd.h u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); hba 942 drivers/scsi/ufs/ufshcd.h int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); hba 944 drivers/scsi/ufs/ufshcd.h int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, hba 952 drivers/scsi/ufs/ufshcd.h static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) hba 954 drivers/scsi/ufs/ufshcd.h if (hba->vops) hba 955 drivers/scsi/ufs/ufshcd.h return hba->vops->name; hba 959 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_vops_init(struct ufs_hba *hba) hba 961 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->init) hba 962 drivers/scsi/ufs/ufshcd.h return hba->vops->init(hba); hba 967 drivers/scsi/ufs/ufshcd.h static inline void ufshcd_vops_exit(struct ufs_hba *hba) hba 969 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->exit) hba 970 drivers/scsi/ufs/ufshcd.h return hba->vops->exit(hba); hba 973 drivers/scsi/ufs/ufshcd.h static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba) hba 975 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->get_ufs_hci_version) hba 976 drivers/scsi/ufs/ufshcd.h return hba->vops->get_ufs_hci_version(hba); hba 978 drivers/scsi/ufs/ufshcd.h return ufshcd_readl(hba, REG_UFS_VERSION); hba 981 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, hba 984 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->clk_scale_notify) hba 985 drivers/scsi/ufs/ufshcd.h return hba->vops->clk_scale_notify(hba, up, status); hba 989 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on, hba 992 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->setup_clocks) hba 993 drivers/scsi/ufs/ufshcd.h return hba->vops->setup_clocks(hba, on, status); hba 997 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status) hba 999 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->setup_regulators) hba 1000 drivers/scsi/ufs/ufshcd.h return hba->vops->setup_regulators(hba, status); hba 1005 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba, hba 1008 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->hce_enable_notify) hba 1009 drivers/scsi/ufs/ufshcd.h return hba->vops->hce_enable_notify(hba, status); hba 1013 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba, hba 1016 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->link_startup_notify) hba 1017 drivers/scsi/ufs/ufshcd.h return hba->vops->link_startup_notify(hba, status); hba 1022 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, hba 1027 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->pwr_change_notify) hba 1028 drivers/scsi/ufs/ufshcd.h return hba->vops->pwr_change_notify(hba, status, hba 1034 drivers/scsi/ufs/ufshcd.h static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag, hba 1037 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->setup_xfer_req) hba 1038 drivers/scsi/ufs/ufshcd.h return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); hba 1041 drivers/scsi/ufs/ufshcd.h static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba, hba 1044 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->setup_task_mgmt) hba 1045 drivers/scsi/ufs/ufshcd.h return hba->vops->setup_task_mgmt(hba, tag, tm_function); hba 1048 drivers/scsi/ufs/ufshcd.h static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba, hba 1052 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->hibern8_notify) hba 1053 drivers/scsi/ufs/ufshcd.h return hba->vops->hibern8_notify(hba, cmd, status); hba 1056 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba, hba 1059 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->apply_dev_quirks) hba 1060 drivers/scsi/ufs/ufshcd.h return hba->vops->apply_dev_quirks(hba, card); hba 1064 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op) hba 1066 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->suspend) hba 1067 drivers/scsi/ufs/ufshcd.h return hba->vops->suspend(hba, op); hba 1072 drivers/scsi/ufs/ufshcd.h static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op) hba 1074 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->resume) hba 1075 drivers/scsi/ufs/ufshcd.h return hba->vops->resume(hba, op); hba 1080 drivers/scsi/ufs/ufshcd.h static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba) hba 1082 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->dbg_register_dump) hba 1083 drivers/scsi/ufs/ufshcd.h hba->vops->dbg_register_dump(hba); hba 1086 drivers/scsi/ufs/ufshcd.h static inline void ufshcd_vops_device_reset(struct ufs_hba *hba) hba 1088 drivers/scsi/ufs/ufshcd.h if (hba->vops && hba->vops->device_reset) hba 1089 drivers/scsi/ufs/ufshcd.h hba->vops->device_reset(hba); hba 1109 drivers/scsi/ufs/ufshcd.h int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, hba 40 drivers/target/loopback/tcm_loop.c #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) hba 2098 drivers/target/target_core_configfs.c struct se_hba *hba = dev->se_hba; hba 2117 drivers/target/target_core_configfs.c config_item_name(&hba->hba_group.cg_item), hba 2138 drivers/target/target_core_configfs.c struct se_hba *hba = dev->se_hba; hba 2158 drivers/target/target_core_configfs.c config_item_name(&hba->hba_group.cg_item), hba 2220 drivers/target/target_core_configfs.c struct se_hba *hba = dev->se_hba; hba 2262 drivers/target/target_core_configfs.c config_item_name(&hba->hba_group.cg_item), hba 2287 drivers/target/target_core_configfs.c config_item_name(&hba->hba_group.cg_item), hba 2549 drivers/target/target_core_configfs.c struct se_hba *hba; hba 2559 drivers/target/target_core_configfs.c hba = dev->se_hba; hba 2562 drivers/target/target_core_configfs.c config_item_name(&hba->hba_group.cg_item), hba 3149 drivers/target/target_core_configfs.c struct se_hba *hba = item_to_hba(hba_ci); hba 3150 drivers/target/target_core_configfs.c struct target_backend *tb = hba->backend; hba 3154 drivers/target/target_core_configfs.c ret = mutex_lock_interruptible(&hba->hba_access_mutex); hba 3158 drivers/target/target_core_configfs.c dev = target_alloc_device(hba, name); hba 3209 drivers/target/target_core_configfs.c mutex_unlock(&hba->hba_access_mutex); hba 3215 drivers/target/target_core_configfs.c mutex_unlock(&hba->hba_access_mutex); hba 3226 drivers/target/target_core_configfs.c struct se_hba *hba; hba 3228 drivers/target/target_core_configfs.c hba = item_to_hba(&dev->se_hba->hba_group.cg_item); hba 3230 drivers/target/target_core_configfs.c mutex_lock(&hba->hba_access_mutex); hba 3247 drivers/target/target_core_configfs.c mutex_unlock(&hba->hba_access_mutex); hba 3263 drivers/target/target_core_configfs.c struct se_hba *hba = to_hba(item); hba 3266 drivers/target/target_core_configfs.c hba->hba_id, hba->backend->ops->name, hba 3272 drivers/target/target_core_configfs.c struct se_hba *hba = to_hba(item); hba 3275 drivers/target/target_core_configfs.c if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE) hba 3284 drivers/target/target_core_configfs.c struct se_hba *hba = to_hba(item); hba 3288 drivers/target/target_core_configfs.c if (hba->backend->ops->pmode_enable_hba == NULL) hba 3297 drivers/target/target_core_configfs.c if (hba->dev_count) { hba 3302 drivers/target/target_core_configfs.c ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag); hba 3306 drivers/target/target_core_configfs.c hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; hba 3308 drivers/target/target_core_configfs.c hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; hba 3318 drivers/target/target_core_configfs.c struct se_hba *hba = container_of(to_config_group(item), hba 3320 drivers/target/target_core_configfs.c core_delete_hba(hba); hba 3345 drivers/target/target_core_configfs.c struct se_hba *hba; hba 3390 drivers/target/target_core_configfs.c hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0); hba 3391 drivers/target/target_core_configfs.c if (IS_ERR(hba)) hba 3392 drivers/target/target_core_configfs.c return ERR_CAST(hba); hba 3394 drivers/target/target_core_configfs.c config_group_init_type_name(&hba->hba_group, name, hba 3397 drivers/target/target_core_configfs.c return &hba->hba_group; hba 724 drivers/target/target_core_device.c struct se_device *target_alloc_device(struct se_hba *hba, const char *name) hba 729 drivers/target/target_core_device.c dev = hba->backend->ops->alloc_device(hba, name); hba 733 drivers/target/target_core_device.c dev->se_hba = hba; hba 734 drivers/target/target_core_device.c dev->transport = hba->backend->ops; hba 736 drivers/target/target_core_device.c dev->hba_index = hba->hba_index; hba 915 drivers/target/target_core_device.c struct se_hba *hba = dev->se_hba; hba 971 drivers/target/target_core_device.c spin_lock(&hba->device_lock); hba 972 drivers/target/target_core_device.c hba->dev_count++; hba 973 drivers/target/target_core_device.c spin_unlock(&hba->device_lock); hba 992 drivers/target/target_core_device.c struct se_hba *hba = dev->se_hba; hba 1003 drivers/target/target_core_device.c spin_lock(&hba->device_lock); hba 1004 drivers/target/target_core_device.c hba->dev_count--; hba 1005 drivers/target/target_core_device.c spin_unlock(&hba->device_lock); hba 1021 drivers/target/target_core_device.c struct se_hba *hba; hba 1026 drivers/target/target_core_device.c hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); hba 1027 drivers/target/target_core_device.c if (IS_ERR(hba)) hba 1028 drivers/target/target_core_device.c return PTR_ERR(hba); hba 1030 drivers/target/target_core_device.c dev = target_alloc_device(hba, "virt_lun0"); hba 1036 drivers/target/target_core_device.c hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); hba 1042 drivers/target/target_core_device.c lun0_hba = hba; hba 1049 drivers/target/target_core_device.c core_delete_hba(hba); hba 1056 drivers/target/target_core_device.c struct se_hba *hba = lun0_hba; hba 1058 drivers/target/target_core_device.c if (!hba) hba 1063 drivers/target/target_core_device.c core_delete_hba(hba); hba 36 drivers/target/target_core_file.c static int fd_attach_hba(struct se_hba *hba, u32 host_id) hba 48 drivers/target/target_core_file.c hba->hba_ptr = fd_host; hba 51 drivers/target/target_core_file.c " Target Core Stack %s\n", hba->hba_id, FD_VERSION, hba 54 drivers/target/target_core_file.c hba->hba_id, fd_host->fd_host_id); hba 59 drivers/target/target_core_file.c static void fd_detach_hba(struct se_hba *hba) hba 61 drivers/target/target_core_file.c struct fd_host *fd_host = hba->hba_ptr; hba 64 drivers/target/target_core_file.c " Target Core\n", hba->hba_id, fd_host->fd_host_id); hba 67 drivers/target/target_core_file.c hba->hba_ptr = NULL; hba 70 drivers/target/target_core_file.c static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name) hba 73 drivers/target/target_core_file.c struct fd_host *fd_host = hba->hba_ptr; hba 111 drivers/target/target_core_hba.c struct se_hba *hba; hba 114 drivers/target/target_core_hba.c hba = kzalloc(sizeof(*hba), GFP_KERNEL); hba 115 drivers/target/target_core_hba.c if (!hba) { hba 120 drivers/target/target_core_hba.c spin_lock_init(&hba->device_lock); hba 121 drivers/target/target_core_hba.c mutex_init(&hba->hba_access_mutex); hba 123 drivers/target/target_core_hba.c hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); hba 124 drivers/target/target_core_hba.c hba->hba_flags |= hba_flags; hba 126 drivers/target/target_core_hba.c hba->backend = core_get_backend(plugin_name); hba 127 drivers/target/target_core_hba.c if (!hba->backend) { hba 132 drivers/target/target_core_hba.c ret = hba->backend->ops->attach_hba(hba, plugin_dep_id); hba 137 drivers/target/target_core_hba.c hba->hba_id = hba_id_counter++; hba 138 drivers/target/target_core_hba.c list_add_tail(&hba->hba_node, &hba_list); hba 142 drivers/target/target_core_hba.c " Core\n", hba->hba_id); hba 144 drivers/target/target_core_hba.c return hba; hba 147 drivers/target/target_core_hba.c module_put(hba->backend->ops->owner); hba 148 drivers/target/target_core_hba.c hba->backend = NULL; hba 150 drivers/target/target_core_hba.c kfree(hba); hba 155 drivers/target/target_core_hba.c core_delete_hba(struct se_hba *hba) hba 157 drivers/target/target_core_hba.c WARN_ON(hba->dev_count); hba 159 drivers/target/target_core_hba.c hba->backend->ops->detach_hba(hba); hba 162 drivers/target/target_core_hba.c list_del(&hba->hba_node); hba 166 drivers/target/target_core_hba.c " Core\n", hba->hba_id); hba 168 drivers/target/target_core_hba.c module_put(hba->backend->ops->owner); hba 170 drivers/target/target_core_hba.c hba->backend = NULL; hba 171 drivers/target/target_core_hba.c kfree(hba); hba 42 drivers/target/target_core_iblock.c static int iblock_attach_hba(struct se_hba *hba, u32 host_id) hba 45 drivers/target/target_core_iblock.c " Generic Target Core Stack %s\n", hba->hba_id, hba 50 drivers/target/target_core_iblock.c static void iblock_detach_hba(struct se_hba *hba) hba 54 drivers/target/target_core_iblock.c static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name) hba 86 drivers/target/target_core_internal.h struct se_device *target_alloc_device(struct se_hba *hba, const char *name); hba 52 drivers/target/target_core_pscsi.c static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) hba 64 drivers/target/target_core_pscsi.c hba->hba_ptr = phv; hba 67 drivers/target/target_core_pscsi.c " Generic Target Core Stack %s\n", hba->hba_id, hba 70 drivers/target/target_core_pscsi.c hba->hba_id); hba 75 drivers/target/target_core_pscsi.c static void pscsi_detach_hba(struct se_hba *hba) hba 77 drivers/target/target_core_pscsi.c struct pscsi_hba_virt *phv = hba->hba_ptr; hba 84 drivers/target/target_core_pscsi.c " Generic Target Core\n", hba->hba_id, hba 89 drivers/target/target_core_pscsi.c " from Generic Target Core\n", hba->hba_id); hba 92 drivers/target/target_core_pscsi.c hba->hba_ptr = NULL; hba 95 drivers/target/target_core_pscsi.c static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) hba 97 drivers/target/target_core_pscsi.c struct pscsi_hba_virt *phv = hba->hba_ptr; hba 110 drivers/target/target_core_pscsi.c " %s\n", hba->hba_id, (sh->hostt->name) ? hba 131 drivers/target/target_core_pscsi.c hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); hba 336 drivers/target/target_core_pscsi.c static struct se_device *pscsi_alloc_device(struct se_hba *hba, hba 428 drivers/target/target_core_pscsi.c struct se_hba *hba = dev->se_hba; hba 469 drivers/target/target_core_pscsi.c if (hba->dev_count) { hba 475 drivers/target/target_core_pscsi.c if (pscsi_pmode_enable_hba(hba, 1) != 1) hba 479 drivers/target/target_core_pscsi.c hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; hba 523 drivers/target/target_core_pscsi.c pscsi_pmode_enable_hba(hba, 0); hba 524 drivers/target/target_core_pscsi.c hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; hba 539 drivers/target/target_core_pscsi.c pscsi_pmode_enable_hba(hba, 0); hba 540 drivers/target/target_core_pscsi.c hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; hba 33 drivers/target/target_core_rd.c static int rd_attach_hba(struct se_hba *hba, u32 host_id) hba 43 drivers/target/target_core_rd.c hba->hba_ptr = rd_host; hba 46 drivers/target/target_core_rd.c " Generic Target Core Stack %s\n", hba->hba_id, hba 52 drivers/target/target_core_rd.c static void rd_detach_hba(struct se_hba *hba) hba 54 drivers/target/target_core_rd.c struct rd_host *rd_host = hba->hba_ptr; hba 57 drivers/target/target_core_rd.c " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); hba 60 drivers/target/target_core_rd.c hba->hba_ptr = NULL; hba 270 drivers/target/target_core_rd.c static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name) hba 273 drivers/target/target_core_rd.c struct rd_host *rd_host = hba->hba_ptr; hba 53 drivers/target/target_core_stat.c struct se_hba *hba = to_stat_dev(item)->se_hba; hba 55 drivers/target/target_core_stat.c return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); hba 103 drivers/target/target_core_stat.c struct se_hba *hba = to_stat_tgt_dev(item)->se_hba; hba 105 drivers/target/target_core_stat.c return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); hba 201 drivers/target/target_core_stat.c struct se_hba *hba = to_stat_lu_dev(item)->se_hba; hba 203 drivers/target/target_core_stat.c return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); hba 116 drivers/target/target_core_user.c struct se_hba *hba; hba 1354 drivers/target/target_core_user.c static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) hba 1363 drivers/target/target_core_user.c hba->hba_ptr = tcmu_hba; hba 1368 drivers/target/target_core_user.c static void tcmu_detach_hba(struct se_hba *hba) hba 1370 drivers/target/target_core_user.c kfree(hba->hba_ptr); hba 1371 drivers/target/target_core_user.c hba->hba_ptr = NULL; hba 1374 drivers/target/target_core_user.c static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) hba 1389 drivers/target/target_core_user.c udev->hba = hba; hba 1849 drivers/target/target_core_user.c struct tcmu_hba *hba = udev->hba->hba_ptr; hba 1856 drivers/target/target_core_user.c str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id, hba 1859 drivers/target/target_core_user.c str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id, hba 2728 mm/memory.c pgoff_t hba = holebegin >> PAGE_SHIFT; hba 2736 mm/memory.c hlen = ULONG_MAX - hba + 1; hba 2739 mm/memory.c unmap_mapping_pages(mapping, hba, hlen, even_cows);