Lines Matching refs:hba
373 static struct status_msg *stex_get_status(struct st_hba *hba) in stex_get_status() argument
375 struct status_msg *status = hba->status_buffer + hba->status_tail; in stex_get_status()
377 ++hba->status_tail; in stex_get_status()
378 hba->status_tail %= hba->sts_count+1; in stex_get_status()
394 static struct req_msg *stex_alloc_req(struct st_hba *hba) in stex_alloc_req() argument
396 struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size; in stex_alloc_req()
398 ++hba->req_head; in stex_alloc_req()
399 hba->req_head %= hba->rq_count+1; in stex_alloc_req()
404 static struct req_msg *stex_ss_alloc_req(struct st_hba *hba) in stex_ss_alloc_req() argument
406 return (struct req_msg *)(hba->dma_mem + in stex_ss_alloc_req()
407 hba->req_head * hba->rq_size + sizeof(struct st_msg_header)); in stex_ss_alloc_req()
410 static int stex_map_sg(struct st_hba *hba, in stex_map_sg() argument
427 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize); in stex_map_sg()
442 static int stex_ss_map_sg(struct st_hba *hba, in stex_ss_map_sg() argument
459 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize); in stex_ss_map_sg()
475 static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) in stex_controller_info() argument
480 p = hba->copy_buffer; in stex_controller_info()
483 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); in stex_controller_info()
491 p->bus = hba->pdev->bus->number; in stex_controller_info()
492 p->slot = hba->pdev->devfn; in stex_controller_info()
494 p->irq_vec = hba->pdev->irq; in stex_controller_info()
495 p->id = hba->pdev->vendor << 16 | hba->pdev->device; in stex_controller_info()
497 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; in stex_controller_info()
503 stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) in stex_send_cmd() argument
507 hba->ccb[tag].req = req; in stex_send_cmd()
508 hba->out_req_cnt++; in stex_send_cmd()
510 writel(hba->req_head, hba->mmio_base + IMR0); in stex_send_cmd()
511 writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL); in stex_send_cmd()
512 readl(hba->mmio_base + IDBL); /* flush */ in stex_send_cmd()
516 stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) in stex_ss_send_cmd() argument
524 hba->ccb[tag].req = req; in stex_ss_send_cmd()
525 hba->out_req_cnt++; in stex_ss_send_cmd()
527 cmd = hba->ccb[tag].cmd; in stex_ss_send_cmd()
533 addr = hba->dma_handle + hba->req_head * hba->rq_size; in stex_ss_send_cmd()
534 addr += (hba->ccb[tag].sg_count+4)/11; in stex_ss_send_cmd()
537 ++hba->req_head; in stex_ss_send_cmd()
538 hba->req_head %= hba->rq_count+1; in stex_ss_send_cmd()
540 writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); in stex_ss_send_cmd()
541 readl(hba->mmio_base + YH2I_REQ_HI); /* flush */ in stex_ss_send_cmd()
542 writel(addr, hba->mmio_base + YH2I_REQ); in stex_ss_send_cmd()
543 readl(hba->mmio_base + YH2I_REQ); /* flush */ in stex_ss_send_cmd()
559 struct st_hba *hba; in stex_queuecommand_lck() local
568 hba = (struct st_hba *) &host->hostdata[0]; in stex_queuecommand_lck()
570 if (unlikely(hba->mu_status == MU_STATE_RESETTING)) in stex_queuecommand_lck()
596 if (hba->cardtype == st_shasta || id == host->max_id - 1) { in stex_queuecommand_lck()
636 ver.host_no = hba->host->host_no; in stex_queuecommand_lck()
655 req = hba->alloc_rq(hba); in stex_queuecommand_lck()
670 hba->ccb[tag].cmd = cmd; in stex_queuecommand_lck()
671 hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE; in stex_queuecommand_lck()
672 hba->ccb[tag].sense_buffer = cmd->sense_buffer; in stex_queuecommand_lck()
674 if (!hba->map_sg(hba, req, &hba->ccb[tag])) { in stex_queuecommand_lck()
675 hba->ccb[tag].sg_count = 0; in stex_queuecommand_lck()
679 hba->send(hba, req, tag); in stex_queuecommand_lck()
742 static void stex_check_cmd(struct st_hba *hba, in stex_check_cmd() argument
751 static void stex_mu_intr(struct st_hba *hba, u32 doorbell) in stex_mu_intr() argument
753 void __iomem *base = hba->mmio_base; in stex_mu_intr()
763 hba->status_head = readl(base + OMR1); in stex_mu_intr()
764 if (unlikely(hba->status_head > hba->sts_count)) { in stex_mu_intr()
766 pci_name(hba->pdev)); in stex_mu_intr()
778 if (unlikely(hba->out_req_cnt <= 0 || in stex_mu_intr()
779 (hba->mu_status == MU_STATE_RESETTING && in stex_mu_intr()
780 hba->cardtype != st_yosemite))) { in stex_mu_intr()
781 hba->status_tail = hba->status_head; in stex_mu_intr()
785 while (hba->status_tail != hba->status_head) { in stex_mu_intr()
786 resp = stex_get_status(hba); in stex_mu_intr()
788 if (unlikely(tag >= hba->host->can_queue)) { in stex_mu_intr()
790 "(%s): invalid tag\n", pci_name(hba->pdev)); in stex_mu_intr()
794 hba->out_req_cnt--; in stex_mu_intr()
795 ccb = &hba->ccb[tag]; in stex_mu_intr()
796 if (unlikely(hba->wait_ccb == ccb)) in stex_mu_intr()
797 hba->wait_ccb = NULL; in stex_mu_intr()
800 "(%s): lagging req\n", pci_name(hba->pdev)); in stex_mu_intr()
808 pci_name(hba->pdev)); in stex_mu_intr()
820 if (hba->cardtype == st_yosemite) in stex_mu_intr()
821 stex_check_cmd(hba, ccb, resp); in stex_mu_intr()
825 stex_controller_info(hba, ccb); in stex_mu_intr()
834 writel(hba->status_head, base + IMR1); in stex_mu_intr()
840 struct st_hba *hba = __hba; in stex_intr() local
841 void __iomem *base = hba->mmio_base; in stex_intr()
845 spin_lock_irqsave(hba->host->host_lock, flags); in stex_intr()
853 stex_mu_intr(hba, data); in stex_intr()
854 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_intr()
856 hba->cardtype == st_shasta)) in stex_intr()
857 queue_work(hba->work_q, &hba->reset_work); in stex_intr()
861 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_intr()
866 static void stex_ss_mu_intr(struct st_hba *hba) in stex_ss_mu_intr() argument
876 if (unlikely(hba->out_req_cnt <= 0 || in stex_ss_mu_intr()
877 hba->mu_status == MU_STATE_RESETTING)) in stex_ss_mu_intr()
880 while (count < hba->sts_count) { in stex_ss_mu_intr()
881 scratch = hba->scratch + hba->status_tail; in stex_ss_mu_intr()
886 resp = hba->status_buffer + hba->status_tail; in stex_ss_mu_intr()
889 ++hba->status_tail; in stex_ss_mu_intr()
890 hba->status_tail %= hba->sts_count+1; in stex_ss_mu_intr()
893 if (unlikely(tag >= hba->host->can_queue)) { in stex_ss_mu_intr()
895 "(%s): invalid tag\n", pci_name(hba->pdev)); in stex_ss_mu_intr()
899 hba->out_req_cnt--; in stex_ss_mu_intr()
900 ccb = &hba->ccb[tag]; in stex_ss_mu_intr()
901 if (unlikely(hba->wait_ccb == ccb)) in stex_ss_mu_intr()
902 hba->wait_ccb = NULL; in stex_ss_mu_intr()
905 "(%s): lagging req\n", pci_name(hba->pdev)); in stex_ss_mu_intr()
921 pci_name(hba->pdev)); in stex_ss_mu_intr()
928 stex_check_cmd(hba, ccb, resp); in stex_ss_mu_intr()
941 struct st_hba *hba = __hba; in stex_ss_intr() local
942 void __iomem *base = hba->mmio_base; in stex_ss_intr()
946 spin_lock_irqsave(hba->host->host_lock, flags); in stex_ss_intr()
952 stex_ss_mu_intr(hba); in stex_ss_intr()
953 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_ss_intr()
955 queue_work(hba->work_q, &hba->reset_work); in stex_ss_intr()
959 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_ss_intr()
964 static int stex_common_handshake(struct st_hba *hba) in stex_common_handshake() argument
966 void __iomem *base = hba->mmio_base; in stex_common_handshake()
980 pci_name(hba->pdev)); in stex_common_handshake()
993 if (hba->host->can_queue > data) { in stex_common_handshake()
994 hba->host->can_queue = data; in stex_common_handshake()
995 hba->host->cmd_per_lun = data; in stex_common_handshake()
999 h = (struct handshake_frame *)hba->status_buffer; in stex_common_handshake()
1000 h->rb_phy = cpu_to_le64(hba->dma_handle); in stex_common_handshake()
1001 h->req_sz = cpu_to_le16(hba->rq_size); in stex_common_handshake()
1002 h->req_cnt = cpu_to_le16(hba->rq_count+1); in stex_common_handshake()
1004 h->status_cnt = cpu_to_le16(hba->sts_count+1); in stex_common_handshake()
1007 if (hba->extra_offset) { in stex_common_handshake()
1008 h->extra_offset = cpu_to_le32(hba->extra_offset); in stex_common_handshake()
1009 h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset); in stex_common_handshake()
1013 status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size; in stex_common_handshake()
1030 pci_name(hba->pdev)); in stex_common_handshake()
1048 static int stex_ss_handshake(struct st_hba *hba) in stex_ss_handshake() argument
1050 void __iomem *base = hba->mmio_base; in stex_ss_handshake()
1063 pci_name(hba->pdev)); in stex_ss_handshake()
1069 msg_h = (struct st_msg_header *)hba->dma_mem; in stex_ss_handshake()
1070 msg_h->handle = cpu_to_le64(hba->dma_handle); in stex_ss_handshake()
1074 h->rb_phy = cpu_to_le64(hba->dma_handle); in stex_ss_handshake()
1075 h->req_sz = cpu_to_le16(hba->rq_size); in stex_ss_handshake()
1076 h->req_cnt = cpu_to_le16(hba->rq_count+1); in stex_ss_handshake()
1078 h->status_cnt = cpu_to_le16(hba->sts_count+1); in stex_ss_handshake()
1082 scratch_size = (hba->sts_count+1)*sizeof(u32); in stex_ss_handshake()
1088 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); in stex_ss_handshake()
1090 writel(hba->dma_handle, base + YH2I_REQ); in stex_ss_handshake()
1093 scratch = hba->scratch; in stex_ss_handshake()
1099 pci_name(hba->pdev)); in stex_ss_handshake()
1112 static int stex_handshake(struct st_hba *hba) in stex_handshake() argument
1118 err = (hba->cardtype == st_yel) ? in stex_handshake()
1119 stex_ss_handshake(hba) : stex_common_handshake(hba); in stex_handshake()
1120 spin_lock_irqsave(hba->host->host_lock, flags); in stex_handshake()
1121 mu_status = hba->mu_status; in stex_handshake()
1123 hba->req_head = 0; in stex_handshake()
1124 hba->req_tail = 0; in stex_handshake()
1125 hba->status_head = 0; in stex_handshake()
1126 hba->status_tail = 0; in stex_handshake()
1127 hba->out_req_cnt = 0; in stex_handshake()
1128 hba->mu_status = MU_STATE_STARTED; in stex_handshake()
1130 hba->mu_status = MU_STATE_FAILED; in stex_handshake()
1132 wake_up_all(&hba->reset_waitq); in stex_handshake()
1133 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_handshake()
1140 struct st_hba *hba = (struct st_hba *)host->hostdata; in stex_abort() local
1149 base = hba->mmio_base; in stex_abort()
1152 hba->ccb[tag].req && hba->ccb[tag].cmd == cmd) in stex_abort()
1153 hba->wait_ccb = &hba->ccb[tag]; in stex_abort()
1157 if (hba->cardtype == st_yel) { in stex_abort()
1163 stex_ss_mu_intr(hba); in stex_abort()
1172 stex_mu_intr(hba, data); in stex_abort()
1174 if (hba->wait_ccb == NULL) { in stex_abort()
1176 "(%s): lost interrupt\n", pci_name(hba->pdev)); in stex_abort()
1182 hba->wait_ccb->req = NULL; /* nullify the req's future return */ in stex_abort()
1183 hba->wait_ccb = NULL; in stex_abort()
1190 static void stex_hard_reset(struct st_hba *hba) in stex_hard_reset() argument
1198 pci_read_config_dword(hba->pdev, i * 4, in stex_hard_reset()
1199 &hba->pdev->saved_config_space[i]); in stex_hard_reset()
1203 bus = hba->pdev->bus; in stex_hard_reset()
1217 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd); in stex_hard_reset()
1225 pci_write_config_dword(hba->pdev, i * 4, in stex_hard_reset()
1226 hba->pdev->saved_config_space[i]); in stex_hard_reset()
1229 static int stex_yos_reset(struct st_hba *hba) in stex_yos_reset() argument
1235 base = hba->mmio_base; in stex_yos_reset()
1239 while (hba->out_req_cnt > 0) { in stex_yos_reset()
1242 "(%s): reset timeout\n", pci_name(hba->pdev)); in stex_yos_reset()
1249 spin_lock_irqsave(hba->host->host_lock, flags); in stex_yos_reset()
1251 hba->mu_status = MU_STATE_FAILED; in stex_yos_reset()
1253 hba->mu_status = MU_STATE_STARTED; in stex_yos_reset()
1254 wake_up_all(&hba->reset_waitq); in stex_yos_reset()
1255 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_yos_reset()
1260 static void stex_ss_reset(struct st_hba *hba) in stex_ss_reset() argument
1262 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); in stex_ss_reset()
1263 readl(hba->mmio_base + YH2I_INT); in stex_ss_reset()
1267 static int stex_do_reset(struct st_hba *hba) in stex_do_reset() argument
1274 spin_lock_irqsave(hba->host->host_lock, flags); in stex_do_reset()
1275 if (hba->mu_status == MU_STATE_STARTING) { in stex_do_reset()
1276 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_do_reset()
1278 pci_name(hba->pdev)); in stex_do_reset()
1281 while (hba->mu_status == MU_STATE_RESETTING) { in stex_do_reset()
1282 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_do_reset()
1283 wait_event_timeout(hba->reset_waitq, in stex_do_reset()
1284 hba->mu_status != MU_STATE_RESETTING, in stex_do_reset()
1286 spin_lock_irqsave(hba->host->host_lock, flags); in stex_do_reset()
1287 mu_status = hba->mu_status; in stex_do_reset()
1291 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_do_reset()
1295 hba->mu_status = MU_STATE_RESETTING; in stex_do_reset()
1296 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_do_reset()
1298 if (hba->cardtype == st_yosemite) in stex_do_reset()
1299 return stex_yos_reset(hba); in stex_do_reset()
1301 if (hba->cardtype == st_shasta) in stex_do_reset()
1302 stex_hard_reset(hba); in stex_do_reset()
1303 else if (hba->cardtype == st_yel) in stex_do_reset()
1304 stex_ss_reset(hba); in stex_do_reset()
1306 spin_lock_irqsave(hba->host->host_lock, flags); in stex_do_reset()
1307 for (tag = 0; tag < hba->host->can_queue; tag++) { in stex_do_reset()
1308 ccb = &hba->ccb[tag]; in stex_do_reset()
1319 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_do_reset()
1321 if (stex_handshake(hba) == 0) in stex_do_reset()
1325 pci_name(hba->pdev)); in stex_do_reset()
1331 struct st_hba *hba; in stex_reset() local
1333 hba = (struct st_hba *) &cmd->device->host->hostdata[0]; in stex_reset()
1338 return stex_do_reset(hba) ? FAILED : SUCCESS; in stex_reset()
1343 struct st_hba *hba = container_of(work, struct st_hba, reset_work); in stex_reset_work() local
1345 stex_do_reset(hba); in stex_reset_work()
1486 static int stex_request_irq(struct st_hba *hba) in stex_request_irq() argument
1488 struct pci_dev *pdev = hba->pdev; in stex_request_irq()
1498 hba->msi_enabled = 1; in stex_request_irq()
1500 hba->msi_enabled = 0; in stex_request_irq()
1502 status = request_irq(pdev->irq, hba->cardtype == st_yel ? in stex_request_irq()
1503 stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba); in stex_request_irq()
1506 if (hba->msi_enabled) in stex_request_irq()
1512 static void stex_free_irq(struct st_hba *hba) in stex_free_irq() argument
1514 struct pci_dev *pdev = hba->pdev; in stex_free_irq()
1516 free_irq(pdev->irq, hba); in stex_free_irq()
1517 if (hba->msi_enabled) in stex_free_irq()
1523 struct st_hba *hba; in stex_probe() local
1544 hba = (struct st_hba *)host->hostdata; in stex_probe()
1545 memset(hba, 0, sizeof(struct st_hba)); in stex_probe()
1554 hba->mmio_base = pci_ioremap_bar(pdev, 0); in stex_probe()
1555 if ( !hba->mmio_base) { in stex_probe()
1569 hba->cardtype = (unsigned int) id->driver_data; in stex_probe()
1570 ci = &stex_card_info[hba->cardtype]; in stex_probe()
1572 if (hba->cardtype == st_yel) in stex_probe()
1575 hba->dma_size = cp_offset + sizeof(struct st_frame); in stex_probe()
1576 if (hba->cardtype == st_seq || in stex_probe()
1577 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { in stex_probe()
1578 hba->extra_offset = hba->dma_size; in stex_probe()
1579 hba->dma_size += ST_ADDITIONAL_MEM; in stex_probe()
1581 hba->dma_mem = dma_alloc_coherent(&pdev->dev, in stex_probe()
1582 hba->dma_size, &hba->dma_handle, GFP_KERNEL); in stex_probe()
1583 if (!hba->dma_mem) { in stex_probe()
1585 if (hba->cardtype == st_seq || in stex_probe()
1586 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { in stex_probe()
1590 hba->dma_size = hba->extra_offset in stex_probe()
1592 hba->dma_mem = dma_alloc_coherent(&pdev->dev, in stex_probe()
1593 hba->dma_size, &hba->dma_handle, GFP_KERNEL); in stex_probe()
1596 if (!hba->dma_mem) { in stex_probe()
1604 hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL); in stex_probe()
1605 if (!hba->ccb) { in stex_probe()
1612 if (hba->cardtype == st_yel) in stex_probe()
1613 hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset); in stex_probe()
1614 hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset); in stex_probe()
1615 hba->copy_buffer = hba->dma_mem + cp_offset; in stex_probe()
1616 hba->rq_count = ci->rq_count; in stex_probe()
1617 hba->rq_size = ci->rq_size; in stex_probe()
1618 hba->sts_count = ci->sts_count; in stex_probe()
1619 hba->alloc_rq = ci->alloc_rq; in stex_probe()
1620 hba->map_sg = ci->map_sg; in stex_probe()
1621 hba->send = ci->send; in stex_probe()
1622 hba->mu_status = MU_STATE_STARTING; in stex_probe()
1624 if (hba->cardtype == st_yel) in stex_probe()
1636 hba->host = host; in stex_probe()
1637 hba->pdev = pdev; in stex_probe()
1638 init_waitqueue_head(&hba->reset_waitq); in stex_probe()
1640 snprintf(hba->work_q_name, sizeof(hba->work_q_name), in stex_probe()
1642 hba->work_q = create_singlethread_workqueue(hba->work_q_name); in stex_probe()
1643 if (!hba->work_q) { in stex_probe()
1649 INIT_WORK(&hba->reset_work, stex_reset_work); in stex_probe()
1651 err = stex_request_irq(hba); in stex_probe()
1658 err = stex_handshake(hba); in stex_probe()
1669 pci_set_drvdata(pdev, hba); in stex_probe()
1683 stex_free_irq(hba); in stex_probe()
1685 destroy_workqueue(hba->work_q); in stex_probe()
1687 kfree(hba->ccb); in stex_probe()
1689 dma_free_coherent(&pdev->dev, hba->dma_size, in stex_probe()
1690 hba->dma_mem, hba->dma_handle); in stex_probe()
1692 iounmap(hba->mmio_base); in stex_probe()
1703 static void stex_hba_stop(struct st_hba *hba) in stex_hba_stop() argument
1711 spin_lock_irqsave(hba->host->host_lock, flags); in stex_hba_stop()
1712 req = hba->alloc_rq(hba); in stex_hba_stop()
1713 if (hba->cardtype == st_yel) { in stex_hba_stop()
1715 memset(msg_h, 0, hba->rq_size); in stex_hba_stop()
1717 memset(req, 0, hba->rq_size); in stex_hba_stop()
1719 if (hba->cardtype == st_yosemite || hba->cardtype == st_yel) { in stex_hba_stop()
1730 hba->ccb[tag].cmd = NULL; in stex_hba_stop()
1731 hba->ccb[tag].sg_count = 0; in stex_hba_stop()
1732 hba->ccb[tag].sense_bufflen = 0; in stex_hba_stop()
1733 hba->ccb[tag].sense_buffer = NULL; in stex_hba_stop()
1734 hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE; in stex_hba_stop()
1736 hba->send(hba, req, tag); in stex_hba_stop()
1737 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_hba_stop()
1740 while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) { in stex_hba_stop()
1742 hba->ccb[tag].req_type = 0; in stex_hba_stop()
1749 static void stex_hba_free(struct st_hba *hba) in stex_hba_free() argument
1751 stex_free_irq(hba); in stex_hba_free()
1753 destroy_workqueue(hba->work_q); in stex_hba_free()
1755 iounmap(hba->mmio_base); in stex_hba_free()
1757 pci_release_regions(hba->pdev); in stex_hba_free()
1759 kfree(hba->ccb); in stex_hba_free()
1761 dma_free_coherent(&hba->pdev->dev, hba->dma_size, in stex_hba_free()
1762 hba->dma_mem, hba->dma_handle); in stex_hba_free()
1767 struct st_hba *hba = pci_get_drvdata(pdev); in stex_remove() local
1769 scsi_remove_host(hba->host); in stex_remove()
1771 stex_hba_stop(hba); in stex_remove()
1773 stex_hba_free(hba); in stex_remove()
1775 scsi_host_put(hba->host); in stex_remove()
1782 struct st_hba *hba = pci_get_drvdata(pdev); in stex_shutdown() local
1784 stex_hba_stop(hba); in stex_shutdown()