Lines Matching refs:ha
102 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
104 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
107 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
109 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
175 struct qla_hw_data *ha = vha->hw; in qlt_find_host_by_d_id() local
184 BUG_ON(ha->tgt.tgt_vp_map == NULL); in qlt_find_host_by_d_id()
185 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; in qlt_find_host_by_d_id()
186 if (likely(test_bit(vp_idx, ha->vp_idx_map))) in qlt_find_host_by_d_id()
187 return ha->tgt.tgt_vp_map[vp_idx].vha; in qlt_find_host_by_d_id()
196 struct qla_hw_data *ha = vha->hw; in qlt_find_host_by_vp_idx() local
201 BUG_ON(ha->tgt.tgt_vp_map == NULL); in qlt_find_host_by_vp_idx()
202 if (likely(test_bit(vp_idx, ha->vp_idx_map))) in qlt_find_host_by_vp_idx()
203 return ha->tgt.tgt_vp_map[vp_idx].vha; in qlt_find_host_by_vp_idx()
398 struct qla_hw_data *ha = vha->hw; in qlt_free_session_done() local
436 ha->tgt.tgt_ops->free_session(sess); in qlt_free_session_done()
456 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_free_session_done()
464 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_free_session_done()
498 struct qla_hw_data *ha = vha->hw; in qlt_reset() local
512 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { in qlt_reset()
513 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, in qlt_reset()
537 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); in qlt_reset()
564 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; in qlt_schedule_sess_for_deletion()
617 struct qla_hw_data *ha = vha->hw; in qla24xx_get_loop_id() local
624 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), in qla24xx_get_loop_id()
629 vha->vp_idx, qla2x00_gid_list_size(ha)); in qla24xx_get_loop_id()
654 id_iter += ha->gid_list_info_size; in qla24xx_get_loop_id()
658 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), in qla24xx_get_loop_id()
677 struct qla_hw_data *ha = vha->hw; in qlt_del_sess_work_fn() local
681 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_del_sess_work_fn()
694 ha->tgt.tgt_ops->shutdown_sess(sess); in qlt_del_sess_work_fn()
695 ha->tgt.tgt_ops->put_sess(sess); in qlt_del_sess_work_fn()
702 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_del_sess_work_fn()
714 struct qla_hw_data *ha = vha->hw; in qlt_create_sess() local
720 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_create_sess()
735 spin_unlock_irqrestore(&ha->hardware_lock, in qlt_create_sess()
744 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, in qlt_create_sess()
752 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_create_sess()
757 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_create_sess()
795 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, in qlt_create_sess()
810 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_create_sess()
814 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_create_sess()
831 struct qla_hw_data *ha = vha->hw; in qlt_fc_port_added() local
845 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_fc_port_added()
847 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_fc_port_added()
852 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_fc_port_added()
858 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_fc_port_added()
861 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_fc_port_added()
878 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, in qlt_fc_port_added()
889 ha->tgt.tgt_ops->put_sess(sess); in qlt_fc_port_added()
890 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_fc_port_added()
934 struct qla_hw_data *ha = tgt->ha; in test_tgt_sess_count() local
941 spin_lock_irqsave(&ha->hardware_lock, flags); in test_tgt_sess_count()
946 spin_unlock_irqrestore(&ha->hardware_lock, flags); in test_tgt_sess_count()
955 struct qla_hw_data *ha = tgt->ha; in qlt_stop_phase1() local
987 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_stop_phase1()
990 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_stop_phase1()
1014 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha)) in qlt_stop_phase1()
1026 struct qla_hw_data *ha = tgt->ha; in qlt_stop_phase2() local
1027 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); in qlt_stop_phase2()
1042 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_stop_phase2()
1044 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_stop_phase2()
1046 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_stop_phase2()
1050 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_stop_phase2()
1114 struct qla_hw_data *ha = vha->hw; in qlt_send_notify_ack() local
1118 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); in qlt_send_notify_ack()
1174 struct qla_hw_data *ha = vha->hw; in qlt_24xx_send_abts_resp() local
1181 ha, abts, status); in qlt_24xx_send_abts_resp()
1363 struct qla_hw_data *ha = vha->hw; in __qlt_24xx_handle_abts() local
1414 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, in __qlt_24xx_handle_abts()
1433 struct qla_hw_data *ha = vha->hw; in qlt_24xx_handle_abts() local
1465 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); in qlt_24xx_handle_abts()
1497 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, in qlt_24xx_send_task_mgmt_ctio() argument
1504 ql_dbg(ql_dbg_tgt, ha, 0xe008, in qlt_24xx_send_task_mgmt_ctio()
1506 ha, atio, resp_code); in qlt_24xx_send_task_mgmt_ctio()
1509 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS) in qlt_24xx_send_task_mgmt_ctio()
1512 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL); in qlt_24xx_send_task_mgmt_ctio()
1514 ql_dbg(ql_dbg_tgt, ha, 0xe04c, in qlt_24xx_send_task_mgmt_ctio()
1516 "request packet\n", ha->vp_idx, __func__); in qlt_24xx_send_task_mgmt_ctio()
1525 ctio->vp_index = ha->vp_idx; in qlt_24xx_send_task_mgmt_ctio()
1541 qla2x00_start_iocbs(ha, ha->req); in qlt_24xx_send_task_mgmt_ctio()
1554 struct qla_hw_data *ha = vha->hw; in qlt_xmit_tm_rsp() local
1561 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_xmit_tm_rsp()
1563 if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) { in qlt_xmit_tm_rsp()
1571 ha->chip_reset); in qlt_xmit_tm_rsp()
1572 ha->tgt.tgt_ops->free_mcmd(mcmd); in qlt_xmit_tm_rsp()
1573 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_xmit_tm_rsp()
1596 ha->tgt.tgt_ops->free_mcmd(mcmd); in qlt_xmit_tm_rsp()
1597 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_xmit_tm_rsp()
1609 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg, in qlt_pci_map_calc_cnt()
1636 prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev, in qlt_pci_map_calc_cnt()
1664 struct qla_hw_data *ha = vha->hw; in qlt_unmap_sg() local
1669 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); in qlt_unmap_sg()
1673 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, in qlt_unmap_sg()
1677 qla2x00_clean_dsd_pool(ha, NULL, cmd); in qlt_unmap_sg()
1680 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); in qlt_unmap_sg()
1730 struct qla_hw_data *ha = vha->hw; in qlt_make_handle() local
1733 h = ha->tgt.current_handle; in qlt_make_handle()
1739 if (h == ha->tgt.current_handle) { in qlt_make_handle()
1742 "empty cmd slots in ha %p\n", vha->vp_idx, ha); in qlt_make_handle()
1748 (ha->tgt.cmds[h-1] != NULL)); in qlt_make_handle()
1751 ha->tgt.current_handle = h; in qlt_make_handle()
1762 struct qla_hw_data *ha = vha->hw; in qlt_24xx_build_ctio_pkt() local
1783 ha->tgt.cmds[h-1] = prm->cmd; in qlt_24xx_build_ctio_pkt()
1922 struct qla_hw_data *ha = vha->hw; in qlt_pre_xmit_response() local
1970 (IS_FWI2_CAPABLE(ha) && in qlt_pre_xmit_response()
1981 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, in qlt_need_explicit_conf() argument
1984 if (ha->tgt.enable_class_2) in qlt_need_explicit_conf()
1990 return ha->tgt.enable_explicit_conf && in qlt_need_explicit_conf()
2089 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { in qlt_24xx_init_ctio_to_isp()
2099 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { in qlt_24xx_init_ctio_to_isp()
2129 "lost", prm->tgt->ha->vp_idx, in qlt_24xx_init_ctio_to_isp()
2267 struct qla_hw_data *ha; in qlt_build_ctio_crc2_pkt() local
2277 ha = vha->hw; in qlt_build_ctio_crc2_pkt()
2318 else if (IS_PI_UNINIT_CAPABLE(ha)) { in qlt_build_ctio_crc2_pkt()
2361 ha->tgt.cmds[h-1] = prm->cmd; in qlt_build_ctio_crc2_pkt()
2396 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); in qlt_build_ctio_crc2_pkt()
2443 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, in qlt_build_ctio_crc2_pkt()
2446 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, in qlt_build_ctio_crc2_pkt()
2455 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, in qlt_build_ctio_crc2_pkt()
2476 struct qla_hw_data *ha = vha->hw; in qlt_xmit_response() local
2483 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_xmit_response()
2491 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_xmit_response()
2494 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_xmit_response()
2511 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_xmit_response()
2513 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) { in qlt_xmit_response()
2523 ha->chip_reset); in qlt_xmit_response()
2524 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_xmit_response()
2560 if (qlt_need_explicit_conf(ha, cmd, 0)) { in qlt_xmit_response()
2616 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_xmit_response()
2622 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_xmit_response()
2632 struct qla_hw_data *ha = vha->hw; in qlt_rdy_to_xfer() local
2652 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_rdy_to_xfer()
2654 if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) || in qlt_rdy_to_xfer()
2665 ha->chip_reset); in qlt_rdy_to_xfer()
2666 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_rdy_to_xfer()
2697 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_rdy_to_xfer()
2703 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_rdy_to_xfer()
2851 struct qla_hw_data *ha = vha->hw; in __qlt_send_term_imm_notif() local
2856 "Sending TERM ELS CTIO (ha=%p)\n", ha); in __qlt_send_term_imm_notif()
2935 struct qla_hw_data *ha = vha->hw; in __qlt_send_term_exchange() local
2940 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); in __qlt_send_term_exchange()
3227 struct qla_hw_data *ha = vha->hw; in qlt_get_cmd() local
3230 if (ha->tgt.cmds[handle] != NULL) { in qlt_get_cmd()
3231 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle]; in qlt_get_cmd()
3232 ha->tgt.cmds[handle] = NULL; in qlt_get_cmd()
3282 struct qla_hw_data *ha = vha->hw; in qlt_abort_cmd_on_host_reset() local
3301 ha->tgt.tgt_ops->handle_data(cmd); in qlt_abort_cmd_on_host_reset()
3314 ha->tgt.tgt_ops->free_cmd(cmd); in qlt_abort_cmd_on_host_reset()
3318 qlt_host_reset_handler(struct qla_hw_data *ha) in qlt_host_reset_handler() argument
3322 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); in qlt_host_reset_handler()
3340 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_host_reset_handler()
3349 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_host_reset_handler()
3359 struct qla_hw_data *ha = vha->hw; in qlt_do_ctio_completion() local
3437 ha->tgt.tgt_ops->handle_dif_err(cmd); in qlt_do_ctio_completion()
3480 ha->tgt.tgt_ops->handle_data(cmd); in qlt_do_ctio_completion()
3499 ha->tgt.tgt_ops->free_cmd(cmd); in qlt_do_ctio_completion()
3542 struct qla_hw_data *ha = vha->hw; in __qlt_do_work() local
3585 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, in __qlt_do_work()
3592 spin_lock_irqsave(&ha->hardware_lock, flags); in __qlt_do_work()
3593 ha->tgt.tgt_ops->put_sess(sess); in __qlt_do_work()
3594 spin_unlock_irqrestore(&ha->hardware_lock, flags); in __qlt_do_work()
3604 spin_lock_irqsave(&ha->hardware_lock, flags); in __qlt_do_work()
3609 ha->tgt.tgt_ops->put_sess(sess); in __qlt_do_work()
3610 spin_unlock_irqrestore(&ha->hardware_lock, flags); in __qlt_do_work()
3667 struct qla_hw_data *ha = vha->hw; in qlt_create_sess_from_atio() local
3709 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_create_sess_from_atio()
3711 ha->tgt.tgt_ops->put_sess(sess); in qlt_create_sess_from_atio()
3712 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_create_sess_from_atio()
3725 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_create_sess_from_atio()
3727 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_create_sess_from_atio()
3736 struct qla_hw_data *ha = vha->hw; in qlt_handle_cmd_for_atio() local
3747 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); in qlt_handle_cmd_for_atio()
3784 ha->tgt.tgt_ops->put_sess(sess); in qlt_handle_cmd_for_atio()
3806 struct qla_hw_data *ha = vha->hw; in qlt_issue_task_mgmt() local
3899 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0); in qlt_issue_task_mgmt()
3915 struct qla_hw_data *ha = vha->hw; in qlt_handle_task_mgmt() local
3925 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, in qlt_handle_task_mgmt()
3948 struct qla_hw_data *ha = vha->hw; in __qlt_abort_task() local
3970 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, in __qlt_abort_task()
3987 struct qla_hw_data *ha = vha->hw; in qlt_abort_task() local
3991 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); in qlt_abort_task()
3993 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); in qlt_abort_task()
4138 struct qla_hw_data *ha = vha->hw; in qlt_24xx_handle_els() local
4268 if (ha->current_topology != ISP_CFG_F) { in qlt_24xx_handle_els()
4442 struct qla_hw_data *ha = vha->hw; in qlt_handle_srr() local
4458 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_handle_srr()
4461 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_handle_srr()
4484 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_handle_srr()
4487 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_handle_srr()
4516 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_handle_srr()
4519 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_handle_srr()
4547 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_handle_srr()
4559 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_handle_srr()
4565 struct qla_hw_data *ha = vha->hw; in qlt_reject_free_srr_imm() local
4570 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_reject_free_srr_imm()
4580 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_reject_free_srr_imm()
4759 struct qla_hw_data *ha = vha->hw; in qlt_handle_imm_notify() local
4850 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), in qlt_handle_imm_notify()
4899 struct qla_hw_data *ha = vha->hw; in __qlt_send_busy() local
4903 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, in __qlt_send_busy()
4957 struct qla_hw_data *ha = vha->hw; in qlt_alloc_qfull_cmd() local
4985 sess = ha->tgt.tgt_ops->find_sess_by_s_id in qlt_alloc_qfull_cmd()
5042 struct qla_hw_data *ha = vha->hw; in qlt_free_qfull_cmds() local
5048 if (list_empty(&ha->tgt.q_full_list)) in qlt_free_qfull_cmds()
5055 if (list_empty(&ha->tgt.q_full_list)) { in qlt_free_qfull_cmds()
5060 list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) { in qlt_free_qfull_cmds()
5118 struct qla_hw_data *ha = vha->hw; in qlt_chk_qfull_thresh_hold() local
5121 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) in qlt_chk_qfull_thresh_hold()
5134 struct qla_hw_data *ha = vha->hw; in qlt_24xx_atio_pkt() local
5140 "ATIO pkt, but no tgt (ha %p)", ha); in qlt_24xx_atio_pkt()
5227 struct qla_hw_data *ha = vha->hw; in qlt_response_pkt() local
5233 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha); in qlt_response_pkt()
5413 struct qla_hw_data *ha = vha->hw; in qlt_async_event() local
5417 if (!ha->tgt.tgt_ops) in qlt_async_event()
5422 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha); in qlt_async_event()
5427 IS_QLA2100(ha)) in qlt_async_event()
5586 struct qla_hw_data *ha = vha->hw; in qlt_abort_work() local
5593 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_abort_work()
5602 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, in qlt_abort_work()
5605 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_abort_work()
5612 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_abort_work()
5631 ha->tgt.tgt_ops->put_sess(sess); in qlt_abort_work()
5632 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_abort_work()
5638 ha->tgt.tgt_ops->put_sess(sess); in qlt_abort_work()
5639 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_abort_work()
5647 struct qla_hw_data *ha = vha->hw; in qlt_tmr_work() local
5656 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_tmr_work()
5662 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); in qlt_tmr_work()
5664 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_tmr_work()
5671 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_tmr_work()
5692 ha->tgt.tgt_ops->put_sess(sess); in qlt_tmr_work()
5693 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_tmr_work()
5699 ha->tgt.tgt_ops->put_sess(sess); in qlt_tmr_work()
5700 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_tmr_work()
5745 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) in qlt_add_target() argument
5752 if (!IS_TGT_MODE_CAPABLE(ha)) { in qlt_add_target()
5759 "Registering target for host %ld(%p).\n", base_vha->host_no, ha); in qlt_add_target()
5773 tgt->ha = ha; in qlt_add_target()
5811 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) in qlt_remove_target() argument
5829 vha->host_no, ha); in qlt_remove_target()
5870 struct qla_hw_data *ha; in qlt_lport_register() local
5879 ha = vha->hw; in qlt_lport_register()
5888 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_lport_register()
5892 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_lport_register()
5898 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_lport_register()
5901 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_lport_register()
5935 struct qla_hw_data *ha = vha->hw; in qlt_lport_deregister() local
5941 ha->tgt.tgt_ops = NULL; in qlt_lport_deregister()
5952 struct qla_hw_data *ha = vha->hw; in qlt_set_mode() local
5966 if (ha->tgt.ini_mode_force_reverse) in qlt_set_mode()
5973 struct qla_hw_data *ha = vha->hw; in qlt_clear_mode() local
5989 if (ha->tgt.ini_mode_force_reverse) in qlt_clear_mode()
6001 struct qla_hw_data *ha = vha->hw; in qlt_enable_vha() local
6004 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); in qlt_enable_vha()
6014 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_enable_vha()
6017 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_enable_vha()
6037 struct qla_hw_data *ha = vha->hw; in qlt_disable_vha() local
6049 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_disable_vha()
6051 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_disable_vha()
6064 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) in qlt_vport_create() argument
6082 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; in qlt_vport_create()
6084 qlt_add_target(ha, vha); in qlt_vport_create()
6115 struct qla_hw_data *ha = vha->hw; in qlt_init_atio_q_entries() local
6117 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; in qlt_init_atio_q_entries()
6122 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { in qlt_init_atio_q_entries()
6136 struct qla_hw_data *ha = vha->hw; in qlt_24xx_process_atio_queue() local
6143 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { in qlt_24xx_process_atio_queue()
6144 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; in qlt_24xx_process_atio_queue()
6150 ha->tgt.atio_ring_index++; in qlt_24xx_process_atio_queue()
6151 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { in qlt_24xx_process_atio_queue()
6152 ha->tgt.atio_ring_index = 0; in qlt_24xx_process_atio_queue()
6153 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; in qlt_24xx_process_atio_queue()
6155 ha->tgt.atio_ring_ptr++; in qlt_24xx_process_atio_queue()
6158 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; in qlt_24xx_process_atio_queue()
6164 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); in qlt_24xx_process_atio_queue()
6171 struct qla_hw_data *ha = vha->hw; in qlt_24xx_config_rings() local
6179 if (IS_ATIO_MSIX_CAPABLE(ha)) { in qlt_24xx_config_rings()
6180 struct qla_msix_entry *msix = &ha->msix_entries[2]; in qlt_24xx_config_rings()
6181 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; in qlt_24xx_config_rings()
6193 struct qla_hw_data *ha = vha->hw; in qlt_24xx_config_nvram_stage1() local
6196 if (!ha->tgt.saved_set) { in qlt_24xx_config_nvram_stage1()
6198 ha->tgt.saved_exchange_count = nv->exchange_count; in qlt_24xx_config_nvram_stage1()
6199 ha->tgt.saved_firmware_options_1 = in qlt_24xx_config_nvram_stage1()
6201 ha->tgt.saved_firmware_options_2 = in qlt_24xx_config_nvram_stage1()
6203 ha->tgt.saved_firmware_options_3 = in qlt_24xx_config_nvram_stage1()
6205 ha->tgt.saved_set = 1; in qlt_24xx_config_nvram_stage1()
6233 if (ha->tgt.saved_set) { in qlt_24xx_config_nvram_stage1()
6234 nv->exchange_count = ha->tgt.saved_exchange_count; in qlt_24xx_config_nvram_stage1()
6236 ha->tgt.saved_firmware_options_1; in qlt_24xx_config_nvram_stage1()
6238 ha->tgt.saved_firmware_options_2; in qlt_24xx_config_nvram_stage1()
6240 ha->tgt.saved_firmware_options_3; in qlt_24xx_config_nvram_stage1()
6248 if (ha->tgt.enable_class_2) { in qlt_24xx_config_nvram_stage1()
6266 struct qla_hw_data *ha = vha->hw; in qlt_24xx_config_nvram_stage2() local
6268 if (ha->tgt.node_name_set) { in qlt_24xx_config_nvram_stage2()
6269 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); in qlt_24xx_config_nvram_stage2()
6277 struct qla_hw_data *ha = vha->hw; in qlt_81xx_config_nvram_stage1() local
6283 if (!ha->tgt.saved_set) { in qlt_81xx_config_nvram_stage1()
6285 ha->tgt.saved_exchange_count = nv->exchange_count; in qlt_81xx_config_nvram_stage1()
6286 ha->tgt.saved_firmware_options_1 = in qlt_81xx_config_nvram_stage1()
6288 ha->tgt.saved_firmware_options_2 = in qlt_81xx_config_nvram_stage1()
6290 ha->tgt.saved_firmware_options_3 = in qlt_81xx_config_nvram_stage1()
6292 ha->tgt.saved_set = 1; in qlt_81xx_config_nvram_stage1()
6320 if (ha->tgt.saved_set) { in qlt_81xx_config_nvram_stage1()
6321 nv->exchange_count = ha->tgt.saved_exchange_count; in qlt_81xx_config_nvram_stage1()
6323 ha->tgt.saved_firmware_options_1; in qlt_81xx_config_nvram_stage1()
6325 ha->tgt.saved_firmware_options_2; in qlt_81xx_config_nvram_stage1()
6327 ha->tgt.saved_firmware_options_3; in qlt_81xx_config_nvram_stage1()
6335 if (ha->tgt.enable_class_2) { in qlt_81xx_config_nvram_stage1()
6353 struct qla_hw_data *ha = vha->hw; in qlt_81xx_config_nvram_stage2() local
6358 if (ha->tgt.node_name_set) { in qlt_81xx_config_nvram_stage2()
6359 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); in qlt_81xx_config_nvram_stage2()
6365 qlt_83xx_iospace_config(struct qla_hw_data *ha) in qlt_83xx_iospace_config() argument
6370 ha->msix_count += 1; /* For ATIO Q */ in qlt_83xx_iospace_config()
6401 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) in qlt_probe_one_stage1() argument
6406 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { in qlt_probe_one_stage1()
6407 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; in qlt_probe_one_stage1()
6408 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; in qlt_probe_one_stage1()
6410 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; in qlt_probe_one_stage1()
6411 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; in qlt_probe_one_stage1()
6424 struct qla_hw_data *ha; in qla83xx_msix_atio_q() local
6428 ha = rsp->hw; in qla83xx_msix_atio_q()
6429 vha = pci_get_drvdata(ha->pdev); in qla83xx_msix_atio_q()
6431 spin_lock_irqsave(&ha->hardware_lock, flags); in qla83xx_msix_atio_q()
6436 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla83xx_msix_atio_q()
6442 qlt_mem_alloc(struct qla_hw_data *ha) in qlt_mem_alloc() argument
6447 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) * in qlt_mem_alloc()
6449 if (!ha->tgt.tgt_vp_map) in qlt_mem_alloc()
6452 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, in qlt_mem_alloc()
6453 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), in qlt_mem_alloc()
6454 &ha->tgt.atio_dma, GFP_KERNEL); in qlt_mem_alloc()
6455 if (!ha->tgt.atio_ring) { in qlt_mem_alloc()
6456 kfree(ha->tgt.tgt_vp_map); in qlt_mem_alloc()
6463 qlt_mem_free(struct qla_hw_data *ha) in qlt_mem_free() argument
6468 if (ha->tgt.atio_ring) { in qlt_mem_free()
6469 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * in qlt_mem_free()
6470 sizeof(struct atio_from_isp), ha->tgt.atio_ring, in qlt_mem_free()
6471 ha->tgt.atio_dma); in qlt_mem_free()
6473 kfree(ha->tgt.tgt_vp_map); in qlt_mem_free()