Lines Matching refs:hw

47 	struct csio_hw *hw = (struct csio_hw *) dev_id;  in csio_nondata_isr()  local
51 if (unlikely(!hw)) in csio_nondata_isr()
54 if (unlikely(pci_channel_offline(hw->pdev))) { in csio_nondata_isr()
55 CSIO_INC_STATS(hw, n_pcich_offline); in csio_nondata_isr()
59 spin_lock_irqsave(&hw->lock, flags); in csio_nondata_isr()
60 csio_hw_slow_intr_handler(hw); in csio_nondata_isr()
61 rv = csio_mb_isr_handler(hw); in csio_nondata_isr()
63 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { in csio_nondata_isr()
64 hw->flags |= CSIO_HWF_FWEVT_PENDING; in csio_nondata_isr()
65 spin_unlock_irqrestore(&hw->lock, flags); in csio_nondata_isr()
66 schedule_work(&hw->evtq_work); in csio_nondata_isr()
69 spin_unlock_irqrestore(&hw->lock, flags); in csio_nondata_isr()
81 csio_fwevt_handler(struct csio_hw *hw) in csio_fwevt_handler() argument
86 rv = csio_fwevtq_handler(hw); in csio_fwevt_handler()
88 spin_lock_irqsave(&hw->lock, flags); in csio_fwevt_handler()
89 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { in csio_fwevt_handler()
90 hw->flags |= CSIO_HWF_FWEVT_PENDING; in csio_fwevt_handler()
91 spin_unlock_irqrestore(&hw->lock, flags); in csio_fwevt_handler()
92 schedule_work(&hw->evtq_work); in csio_fwevt_handler()
95 spin_unlock_irqrestore(&hw->lock, flags); in csio_fwevt_handler()
110 struct csio_hw *hw = (struct csio_hw *) dev_id; in csio_fwevt_isr() local
112 if (unlikely(!hw)) in csio_fwevt_isr()
115 if (unlikely(pci_channel_offline(hw->pdev))) { in csio_fwevt_isr()
116 CSIO_INC_STATS(hw, n_pcich_offline); in csio_fwevt_isr()
120 csio_fwevt_handler(hw); in csio_fwevt_isr()
131 csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len, in csio_fwevt_intx_handler() argument
134 csio_fwevt_handler(hw); in csio_fwevt_intx_handler()
146 csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len, in csio_process_scsi_cmpl() argument
155 ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr); in csio_process_scsi_cmpl()
162 csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n", in csio_process_scsi_cmpl()
166 spin_lock_irqsave(&hw->lock, flags); in csio_process_scsi_cmpl()
188 spin_unlock_irqrestore(&hw->lock, flags); in csio_process_scsi_cmpl()
191 csio_put_scsi_ioreq_lock(hw, in csio_process_scsi_cmpl()
192 csio_hw_to_scsim(hw), ioreq); in csio_process_scsi_cmpl()
194 spin_lock_irqsave(&hw->lock, flags); in csio_process_scsi_cmpl()
196 spin_unlock_irqrestore(&hw->lock, flags); in csio_process_scsi_cmpl()
214 struct csio_hw *hw = (struct csio_hw *)iq->owner; in csio_scsi_isr_handler() local
221 scm = csio_hw_to_scsim(hw); in csio_scsi_isr_handler()
223 if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl, in csio_scsi_isr_handler()
231 ioreq->io_cbfn(hw, ioreq); in csio_scsi_isr_handler()
234 csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list, in csio_scsi_isr_handler()
240 csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q, in csio_scsi_isr_handler()
259 struct csio_hw *hw; in csio_scsi_isr() local
264 hw = (struct csio_hw *)iq->owner; in csio_scsi_isr()
266 if (unlikely(pci_channel_offline(hw->pdev))) { in csio_scsi_isr()
267 CSIO_INC_STATS(hw, n_pcich_offline); in csio_scsi_isr()
285 csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len, in csio_scsi_intx_handler() argument
304 struct csio_hw *hw = (struct csio_hw *) dev_id; in csio_fcoe_isr() local
310 if (unlikely(!hw)) in csio_fcoe_isr()
313 if (unlikely(pci_channel_offline(hw->pdev))) { in csio_fcoe_isr()
314 CSIO_INC_STATS(hw, n_pcich_offline); in csio_fcoe_isr()
319 if (hw->intr_mode == CSIO_IM_INTX) in csio_fcoe_isr()
320 csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A)); in csio_fcoe_isr()
326 if (csio_hw_slow_intr_handler(hw)) in csio_fcoe_isr()
330 intx_q = csio_get_q(hw, hw->intr_iq_idx); in csio_fcoe_isr()
335 if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0)) in csio_fcoe_isr()
338 spin_lock_irqsave(&hw->lock, flags); in csio_fcoe_isr()
339 rv = csio_mb_isr_handler(hw); in csio_fcoe_isr()
340 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { in csio_fcoe_isr()
341 hw->flags |= CSIO_HWF_FWEVT_PENDING; in csio_fcoe_isr()
342 spin_unlock_irqrestore(&hw->lock, flags); in csio_fcoe_isr()
343 schedule_work(&hw->evtq_work); in csio_fcoe_isr()
346 spin_unlock_irqrestore(&hw->lock, flags); in csio_fcoe_isr()
352 csio_add_msix_desc(struct csio_hw *hw) in csio_add_msix_desc() argument
355 struct csio_msix_entries *entryp = &hw->msix_entries[0]; in csio_add_msix_desc()
358 int cnt = hw->num_sqsets + k; in csio_add_msix_desc()
363 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw)); in csio_add_msix_desc()
368 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw)); in csio_add_msix_desc()
375 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), in csio_add_msix_desc()
376 CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS); in csio_add_msix_desc()
381 csio_request_irqs(struct csio_hw *hw) in csio_request_irqs() argument
384 struct csio_msix_entries *entryp = &hw->msix_entries[0]; in csio_request_irqs()
387 if (hw->intr_mode != CSIO_IM_MSIX) { in csio_request_irqs()
388 rv = request_irq(hw->pdev->irq, csio_fcoe_isr, in csio_request_irqs()
389 (hw->intr_mode == CSIO_IM_MSI) ? in csio_request_irqs()
391 KBUILD_MODNAME, hw); in csio_request_irqs()
393 if (hw->intr_mode == CSIO_IM_MSI) in csio_request_irqs()
394 pci_disable_msi(hw->pdev); in csio_request_irqs()
395 csio_err(hw, "Failed to allocate interrupt line.\n"); in csio_request_irqs()
403 csio_add_msix_desc(hw); in csio_request_irqs()
406 entryp[k].desc, hw); in csio_request_irqs()
408 csio_err(hw, "IRQ request failed for vec %d err:%d\n", in csio_request_irqs()
413 entryp[k++].dev_id = (void *)hw; in csio_request_irqs()
416 entryp[k].desc, hw); in csio_request_irqs()
418 csio_err(hw, "IRQ request failed for vec %d err:%d\n", in csio_request_irqs()
423 entryp[k++].dev_id = (void *)hw; in csio_request_irqs()
426 for (i = 0; i < hw->num_pports; i++) { in csio_request_irqs()
427 info = &hw->scsi_cpu_info[i]; in csio_request_irqs()
429 struct csio_scsi_qset *sqset = &hw->sqset[i][j]; in csio_request_irqs()
430 struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx]; in csio_request_irqs()
435 csio_err(hw, in csio_request_irqs()
447 hw->flags |= CSIO_HWF_HOST_INTR_ENABLED; in csio_request_irqs()
453 entryp = &hw->msix_entries[i]; in csio_request_irqs()
456 pci_disable_msix(hw->pdev); in csio_request_irqs()
462 csio_disable_msix(struct csio_hw *hw, bool free) in csio_disable_msix() argument
466 int cnt = hw->num_sqsets + CSIO_EXTRA_VECS; in csio_disable_msix()
470 entryp = &hw->msix_entries[i]; in csio_disable_msix()
474 pci_disable_msix(hw->pdev); in csio_disable_msix()
479 csio_reduce_sqsets(struct csio_hw *hw, int cnt) in csio_reduce_sqsets() argument
484 while (cnt < hw->num_sqsets) { in csio_reduce_sqsets()
485 for (i = 0; i < hw->num_pports; i++) { in csio_reduce_sqsets()
486 info = &hw->scsi_cpu_info[i]; in csio_reduce_sqsets()
489 hw->num_sqsets--; in csio_reduce_sqsets()
490 if (hw->num_sqsets <= cnt) in csio_reduce_sqsets()
496 csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets); in csio_reduce_sqsets()
500 csio_enable_msix(struct csio_hw *hw) in csio_enable_msix() argument
508 min = hw->num_pports + extra; in csio_enable_msix()
509 cnt = hw->num_sqsets + extra; in csio_enable_msix()
512 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw)) in csio_enable_msix()
513 cnt = min_t(uint8_t, hw->cfg_niq, cnt); in csio_enable_msix()
522 csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt); in csio_enable_msix()
524 cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt); in csio_enable_msix()
530 if (cnt < (hw->num_sqsets + extra)) { in csio_enable_msix()
531 csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra); in csio_enable_msix()
532 csio_reduce_sqsets(hw, cnt - extra); in csio_enable_msix()
537 entryp = &hw->msix_entries[i]; in csio_enable_msix()
543 csio_set_nondata_intr_idx(hw, entries[k].entry); in csio_enable_msix()
544 csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry); in csio_enable_msix()
545 csio_set_fwevt_intr_idx(hw, entries[k++].entry); in csio_enable_msix()
547 for (i = 0; i < hw->num_pports; i++) { in csio_enable_msix()
548 info = &hw->scsi_cpu_info[i]; in csio_enable_msix()
550 for (j = 0; j < hw->num_scsi_msix_cpus; j++) { in csio_enable_msix()
552 hw->sqset[i][j].intr_idx = entries[n].entry; in csio_enable_msix()
563 csio_intr_enable(struct csio_hw *hw) in csio_intr_enable() argument
565 hw->intr_mode = CSIO_IM_NONE; in csio_intr_enable()
566 hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED; in csio_intr_enable()
569 if ((csio_msi == 2) && !csio_enable_msix(hw)) in csio_intr_enable()
570 hw->intr_mode = CSIO_IM_MSIX; in csio_intr_enable()
573 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || in csio_intr_enable()
574 !csio_is_hw_master(hw)) { in csio_intr_enable()
577 if (hw->cfg_niq < (hw->num_sqsets + extra)) { in csio_intr_enable()
578 csio_dbg(hw, "Reducing sqsets to %d\n", in csio_intr_enable()
579 hw->cfg_niq - extra); in csio_intr_enable()
580 csio_reduce_sqsets(hw, hw->cfg_niq - extra); in csio_intr_enable()
584 if ((csio_msi == 1) && !pci_enable_msi(hw->pdev)) in csio_intr_enable()
585 hw->intr_mode = CSIO_IM_MSI; in csio_intr_enable()
587 hw->intr_mode = CSIO_IM_INTX; in csio_intr_enable()
590 csio_dbg(hw, "Using %s interrupt mode.\n", in csio_intr_enable()
591 (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" : in csio_intr_enable()
592 ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx")); in csio_intr_enable()
596 csio_intr_disable(struct csio_hw *hw, bool free) in csio_intr_disable() argument
598 csio_hw_intr_disable(hw); in csio_intr_disable()
600 switch (hw->intr_mode) { in csio_intr_disable()
602 csio_disable_msix(hw, free); in csio_intr_disable()
606 free_irq(hw->pdev->irq, hw); in csio_intr_disable()
607 pci_disable_msi(hw->pdev); in csio_intr_disable()
611 free_irq(hw->pdev->irq, hw); in csio_intr_disable()
616 hw->intr_mode = CSIO_IM_NONE; in csio_intr_disable()
617 hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED; in csio_intr_disable()