Lines Matching refs:ha
102 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
104 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
107 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
109 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
175 struct qla_hw_data *ha = vha->hw; in qlt_find_host_by_d_id() local
184 BUG_ON(ha->tgt.tgt_vp_map == NULL); in qlt_find_host_by_d_id()
185 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; in qlt_find_host_by_d_id()
186 if (likely(test_bit(vp_idx, ha->vp_idx_map))) in qlt_find_host_by_d_id()
187 return ha->tgt.tgt_vp_map[vp_idx].vha; in qlt_find_host_by_d_id()
196 struct qla_hw_data *ha = vha->hw; in qlt_find_host_by_vp_idx() local
201 BUG_ON(ha->tgt.tgt_vp_map == NULL); in qlt_find_host_by_vp_idx()
202 if (likely(test_bit(vp_idx, ha->vp_idx_map))) in qlt_find_host_by_vp_idx()
203 return ha->tgt.tgt_vp_map[vp_idx].vha; in qlt_find_host_by_vp_idx()
398 struct qla_hw_data *ha = vha->hw; in qlt_free_session_done() local
436 ha->tgt.tgt_ops->free_session(sess); in qlt_free_session_done()
456 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_free_session_done()
464 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_free_session_done()
498 struct qla_hw_data *ha = vha->hw; in qlt_reset() local
512 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { in qlt_reset()
513 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, in qlt_reset()
537 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); in qlt_reset()
564 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; in qlt_schedule_sess_for_deletion()
617 struct qla_hw_data *ha = vha->hw; in qla24xx_get_loop_id() local
624 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), in qla24xx_get_loop_id()
629 vha->vp_idx, qla2x00_gid_list_size(ha)); in qla24xx_get_loop_id()
654 id_iter += ha->gid_list_info_size; in qla24xx_get_loop_id()
658 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), in qla24xx_get_loop_id()
677 struct qla_hw_data *ha = vha->hw; in qlt_del_sess_work_fn() local
681 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_del_sess_work_fn()
694 ha->tgt.tgt_ops->shutdown_sess(sess); in qlt_del_sess_work_fn()
695 ha->tgt.tgt_ops->put_sess(sess); in qlt_del_sess_work_fn()
702 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_del_sess_work_fn()
714 struct qla_hw_data *ha = vha->hw; in qlt_create_sess() local
720 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_create_sess()
735 spin_unlock_irqrestore(&ha->hardware_lock, in qlt_create_sess()
744 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, in qlt_create_sess()
752 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_create_sess()
757 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_create_sess()
795 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, in qlt_create_sess()
810 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_create_sess()
814 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_create_sess()
831 struct qla_hw_data *ha = vha->hw; in qlt_fc_port_added() local
845 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_fc_port_added()
847 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_fc_port_added()
852 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_fc_port_added()
858 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_fc_port_added()
861 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_fc_port_added()
878 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, in qlt_fc_port_added()
889 ha->tgt.tgt_ops->put_sess(sess); in qlt_fc_port_added()
890 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_fc_port_added()
934 struct qla_hw_data *ha = tgt->ha; in test_tgt_sess_count() local
941 spin_lock_irqsave(&ha->hardware_lock, flags); in test_tgt_sess_count()
946 spin_unlock_irqrestore(&ha->hardware_lock, flags); in test_tgt_sess_count()
955 struct qla_hw_data *ha = tgt->ha; in qlt_stop_phase1() local
987 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_stop_phase1()
990 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_stop_phase1()
1014 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha)) in qlt_stop_phase1()
1026 struct qla_hw_data *ha = tgt->ha; in qlt_stop_phase2() local
1027 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); in qlt_stop_phase2()
1042 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_stop_phase2()
1044 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_stop_phase2()
1046 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_stop_phase2()
1050 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_stop_phase2()
1114 struct qla_hw_data *ha = vha->hw; in qlt_send_notify_ack() local
1118 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); in qlt_send_notify_ack()
1174 struct qla_hw_data *ha = vha->hw; in qlt_24xx_send_abts_resp() local
1181 ha, abts, status); in qlt_24xx_send_abts_resp()
1364 struct qla_hw_data *ha = vha->hw; in __qlt_24xx_handle_abts() local
1415 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, in __qlt_24xx_handle_abts()
1434 struct qla_hw_data *ha = vha->hw; in qlt_24xx_handle_abts() local
1466 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); in qlt_24xx_handle_abts()
1498 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, in qlt_24xx_send_task_mgmt_ctio() argument
1505 ql_dbg(ql_dbg_tgt, ha, 0xe008, in qlt_24xx_send_task_mgmt_ctio()
1507 ha, atio, resp_code); in qlt_24xx_send_task_mgmt_ctio()
1510 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS) in qlt_24xx_send_task_mgmt_ctio()
1513 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL); in qlt_24xx_send_task_mgmt_ctio()
1515 ql_dbg(ql_dbg_tgt, ha, 0xe04c, in qlt_24xx_send_task_mgmt_ctio()
1517 "request packet\n", ha->vp_idx, __func__); in qlt_24xx_send_task_mgmt_ctio()
1526 ctio->vp_index = ha->vp_idx; in qlt_24xx_send_task_mgmt_ctio()
1543 qla2x00_start_iocbs(ha, ha->req); in qlt_24xx_send_task_mgmt_ctio()
1556 struct qla_hw_data *ha = vha->hw; in qlt_xmit_tm_rsp() local
1563 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_xmit_tm_rsp()
1565 if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) { in qlt_xmit_tm_rsp()
1573 ha->chip_reset); in qlt_xmit_tm_rsp()
1574 ha->tgt.tgt_ops->free_mcmd(mcmd); in qlt_xmit_tm_rsp()
1575 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_xmit_tm_rsp()
1598 ha->tgt.tgt_ops->free_mcmd(mcmd); in qlt_xmit_tm_rsp()
1599 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_xmit_tm_rsp()
1611 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg, in qlt_pci_map_calc_cnt()
1638 prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev, in qlt_pci_map_calc_cnt()
1666 struct qla_hw_data *ha = vha->hw; in qlt_unmap_sg() local
1671 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); in qlt_unmap_sg()
1675 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, in qlt_unmap_sg()
1679 qla2x00_clean_dsd_pool(ha, NULL, cmd); in qlt_unmap_sg()
1682 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); in qlt_unmap_sg()
1732 struct qla_hw_data *ha = vha->hw; in qlt_make_handle() local
1735 h = ha->tgt.current_handle; in qlt_make_handle()
1741 if (h == ha->tgt.current_handle) { in qlt_make_handle()
1744 "empty cmd slots in ha %p\n", vha->vp_idx, ha); in qlt_make_handle()
1750 (ha->tgt.cmds[h-1] != NULL)); in qlt_make_handle()
1753 ha->tgt.current_handle = h; in qlt_make_handle()
1764 struct qla_hw_data *ha = vha->hw; in qlt_24xx_build_ctio_pkt() local
1785 ha->tgt.cmds[h-1] = prm->cmd; in qlt_24xx_build_ctio_pkt()
1924 struct qla_hw_data *ha = vha->hw; in qlt_pre_xmit_response() local
1973 (IS_FWI2_CAPABLE(ha) && in qlt_pre_xmit_response()
1984 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, in qlt_need_explicit_conf() argument
1987 if (ha->tgt.enable_class_2) in qlt_need_explicit_conf()
1993 return ha->tgt.enable_explicit_conf && in qlt_need_explicit_conf()
2093 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { in qlt_24xx_init_ctio_to_isp()
2103 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { in qlt_24xx_init_ctio_to_isp()
2133 "lost", prm->tgt->ha->vp_idx, in qlt_24xx_init_ctio_to_isp()
2272 struct qla_hw_data *ha; in qlt_build_ctio_crc2_pkt() local
2283 ha = vha->hw; in qlt_build_ctio_crc2_pkt()
2324 else if (IS_PI_UNINIT_CAPABLE(ha)) { in qlt_build_ctio_crc2_pkt()
2367 ha->tgt.cmds[h-1] = prm->cmd; in qlt_build_ctio_crc2_pkt()
2402 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); in qlt_build_ctio_crc2_pkt()
2449 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, in qlt_build_ctio_crc2_pkt()
2452 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, in qlt_build_ctio_crc2_pkt()
2461 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, in qlt_build_ctio_crc2_pkt()
2482 struct qla_hw_data *ha = vha->hw; in qlt_xmit_response() local
2489 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_xmit_response()
2497 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_xmit_response()
2500 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_xmit_response()
2517 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_xmit_response()
2519 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) { in qlt_xmit_response()
2529 ha->chip_reset); in qlt_xmit_response()
2530 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_xmit_response()
2566 if (qlt_need_explicit_conf(ha, cmd, 0)) { in qlt_xmit_response()
2622 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_xmit_response()
2628 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_xmit_response()
2638 struct qla_hw_data *ha = vha->hw; in qlt_rdy_to_xfer() local
2658 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_rdy_to_xfer()
2660 if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) || in qlt_rdy_to_xfer()
2671 ha->chip_reset); in qlt_rdy_to_xfer()
2672 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_rdy_to_xfer()
2703 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_rdy_to_xfer()
2709 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_rdy_to_xfer()
2858 struct qla_hw_data *ha = vha->hw; in __qlt_send_term_imm_notif() local
2863 "Sending TERM ELS CTIO (ha=%p)\n", ha); in __qlt_send_term_imm_notif()
2942 struct qla_hw_data *ha = vha->hw; in __qlt_send_term_exchange() local
2947 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); in __qlt_send_term_exchange()
3234 struct qla_hw_data *ha = vha->hw; in qlt_get_cmd() local
3237 if (ha->tgt.cmds[handle] != NULL) { in qlt_get_cmd()
3238 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle]; in qlt_get_cmd()
3239 ha->tgt.cmds[handle] = NULL; in qlt_get_cmd()
3289 struct qla_hw_data *ha = vha->hw; in qlt_abort_cmd_on_host_reset() local
3308 ha->tgt.tgt_ops->handle_data(cmd); in qlt_abort_cmd_on_host_reset()
3321 ha->tgt.tgt_ops->free_cmd(cmd); in qlt_abort_cmd_on_host_reset()
3325 qlt_host_reset_handler(struct qla_hw_data *ha) in qlt_host_reset_handler() argument
3329 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); in qlt_host_reset_handler()
3347 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_host_reset_handler()
3356 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_host_reset_handler()
3366 struct qla_hw_data *ha = vha->hw; in qlt_do_ctio_completion() local
3446 ha->tgt.tgt_ops->handle_dif_err(cmd); in qlt_do_ctio_completion()
3493 ha->tgt.tgt_ops->handle_data(cmd); in qlt_do_ctio_completion()
3511 ha->tgt.tgt_ops->free_cmd(cmd); in qlt_do_ctio_completion()
3554 struct qla_hw_data *ha = vha->hw; in __qlt_do_work() local
3597 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, in __qlt_do_work()
3604 spin_lock_irqsave(&ha->hardware_lock, flags); in __qlt_do_work()
3605 ha->tgt.tgt_ops->put_sess(sess); in __qlt_do_work()
3606 spin_unlock_irqrestore(&ha->hardware_lock, flags); in __qlt_do_work()
3616 spin_lock_irqsave(&ha->hardware_lock, flags); in __qlt_do_work()
3621 ha->tgt.tgt_ops->put_sess(sess); in __qlt_do_work()
3622 spin_unlock_irqrestore(&ha->hardware_lock, flags); in __qlt_do_work()
3679 struct qla_hw_data *ha = vha->hw; in qlt_create_sess_from_atio() local
3721 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_create_sess_from_atio()
3723 ha->tgt.tgt_ops->put_sess(sess); in qlt_create_sess_from_atio()
3724 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_create_sess_from_atio()
3737 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_create_sess_from_atio()
3739 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_create_sess_from_atio()
3748 struct qla_hw_data *ha = vha->hw; in qlt_handle_cmd_for_atio() local
3759 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); in qlt_handle_cmd_for_atio()
3796 ha->tgt.tgt_ops->put_sess(sess); in qlt_handle_cmd_for_atio()
3818 struct qla_hw_data *ha = vha->hw; in qlt_issue_task_mgmt() local
3911 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0); in qlt_issue_task_mgmt()
3927 struct qla_hw_data *ha = vha->hw; in qlt_handle_task_mgmt() local
3938 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, in qlt_handle_task_mgmt()
3961 struct qla_hw_data *ha = vha->hw; in __qlt_abort_task() local
3983 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, in __qlt_abort_task()
4000 struct qla_hw_data *ha = vha->hw; in qlt_abort_task() local
4004 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); in qlt_abort_task()
4006 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); in qlt_abort_task()
4151 struct qla_hw_data *ha = vha->hw; in qlt_24xx_handle_els() local
4281 if (ha->current_topology != ISP_CFG_F) { in qlt_24xx_handle_els()
4453 struct qla_hw_data *ha = vha->hw; in qlt_handle_srr() local
4469 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_handle_srr()
4472 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_handle_srr()
4495 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_handle_srr()
4498 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_handle_srr()
4528 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_handle_srr()
4531 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_handle_srr()
4561 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_handle_srr()
4573 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_handle_srr()
4579 struct qla_hw_data *ha = vha->hw; in qlt_reject_free_srr_imm() local
4583 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_reject_free_srr_imm()
4591 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_reject_free_srr_imm()
4770 struct qla_hw_data *ha = vha->hw; in qlt_handle_imm_notify() local
4861 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), in qlt_handle_imm_notify()
4910 struct qla_hw_data *ha = vha->hw; in __qlt_send_busy() local
4914 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, in __qlt_send_busy()
4968 struct qla_hw_data *ha = vha->hw; in qlt_alloc_qfull_cmd() local
4996 sess = ha->tgt.tgt_ops->find_sess_by_s_id in qlt_alloc_qfull_cmd()
5053 struct qla_hw_data *ha = vha->hw; in qlt_free_qfull_cmds() local
5059 if (list_empty(&ha->tgt.q_full_list)) in qlt_free_qfull_cmds()
5066 if (list_empty(&ha->tgt.q_full_list)) { in qlt_free_qfull_cmds()
5071 list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) { in qlt_free_qfull_cmds()
5129 struct qla_hw_data *ha = vha->hw; in qlt_chk_qfull_thresh_hold() local
5132 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) in qlt_chk_qfull_thresh_hold()
5145 struct qla_hw_data *ha = vha->hw; in qlt_24xx_atio_pkt() local
5151 "ATIO pkt, but no tgt (ha %p)", ha); in qlt_24xx_atio_pkt()
5238 struct qla_hw_data *ha = vha->hw; in qlt_response_pkt() local
5244 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha); in qlt_response_pkt()
5424 struct qla_hw_data *ha = vha->hw; in qlt_async_event() local
5428 if (!ha->tgt.tgt_ops) in qlt_async_event()
5433 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha); in qlt_async_event()
5438 IS_QLA2100(ha)) in qlt_async_event()
5597 struct qla_hw_data *ha = vha->hw; in qlt_abort_work() local
5604 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_abort_work()
5613 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, in qlt_abort_work()
5616 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_abort_work()
5623 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_abort_work()
5642 ha->tgt.tgt_ops->put_sess(sess); in qlt_abort_work()
5643 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_abort_work()
5649 ha->tgt.tgt_ops->put_sess(sess); in qlt_abort_work()
5650 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_abort_work()
5658 struct qla_hw_data *ha = vha->hw; in qlt_tmr_work() local
5667 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_tmr_work()
5673 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); in qlt_tmr_work()
5675 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_tmr_work()
5682 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_tmr_work()
5704 ha->tgt.tgt_ops->put_sess(sess); in qlt_tmr_work()
5705 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_tmr_work()
5711 ha->tgt.tgt_ops->put_sess(sess); in qlt_tmr_work()
5712 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_tmr_work()
5757 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) in qlt_add_target() argument
5764 if (!IS_TGT_MODE_CAPABLE(ha)) { in qlt_add_target()
5771 "Registering target for host %ld(%p).\n", base_vha->host_no, ha); in qlt_add_target()
5785 tgt->ha = ha; in qlt_add_target()
5823 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) in qlt_remove_target() argument
5841 vha->host_no, ha); in qlt_remove_target()
5882 struct qla_hw_data *ha; in qlt_lport_register() local
5891 ha = vha->hw; in qlt_lport_register()
5900 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_lport_register()
5904 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_lport_register()
5910 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_lport_register()
5913 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_lport_register()
5947 struct qla_hw_data *ha = vha->hw; in qlt_lport_deregister() local
5953 ha->tgt.tgt_ops = NULL; in qlt_lport_deregister()
5964 struct qla_hw_data *ha = vha->hw; in qlt_set_mode() local
5978 if (ha->tgt.ini_mode_force_reverse) in qlt_set_mode()
5985 struct qla_hw_data *ha = vha->hw; in qlt_clear_mode() local
6001 if (ha->tgt.ini_mode_force_reverse) in qlt_clear_mode()
6013 struct qla_hw_data *ha = vha->hw; in qlt_enable_vha() local
6016 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); in qlt_enable_vha()
6026 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_enable_vha()
6029 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_enable_vha()
6049 struct qla_hw_data *ha = vha->hw; in qlt_disable_vha() local
6061 spin_lock_irqsave(&ha->hardware_lock, flags); in qlt_disable_vha()
6063 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qlt_disable_vha()
6076 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) in qlt_vport_create() argument
6094 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; in qlt_vport_create()
6096 qlt_add_target(ha, vha); in qlt_vport_create()
6127 struct qla_hw_data *ha = vha->hw; in qlt_init_atio_q_entries() local
6129 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; in qlt_init_atio_q_entries()
6134 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { in qlt_init_atio_q_entries()
6148 struct qla_hw_data *ha = vha->hw; in qlt_24xx_process_atio_queue() local
6155 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { in qlt_24xx_process_atio_queue()
6156 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; in qlt_24xx_process_atio_queue()
6162 ha->tgt.atio_ring_index++; in qlt_24xx_process_atio_queue()
6163 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { in qlt_24xx_process_atio_queue()
6164 ha->tgt.atio_ring_index = 0; in qlt_24xx_process_atio_queue()
6165 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; in qlt_24xx_process_atio_queue()
6167 ha->tgt.atio_ring_ptr++; in qlt_24xx_process_atio_queue()
6170 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; in qlt_24xx_process_atio_queue()
6176 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); in qlt_24xx_process_atio_queue()
6182 struct qla_hw_data *ha = vha->hw; in qlt_24xx_config_rings() local
6190 if (IS_ATIO_MSIX_CAPABLE(ha)) { in qlt_24xx_config_rings()
6191 struct qla_msix_entry *msix = &ha->msix_entries[2]; in qlt_24xx_config_rings()
6192 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; in qlt_24xx_config_rings()
6204 struct qla_hw_data *ha = vha->hw; in qlt_24xx_config_nvram_stage1() local
6207 if (!ha->tgt.saved_set) { in qlt_24xx_config_nvram_stage1()
6209 ha->tgt.saved_exchange_count = nv->exchange_count; in qlt_24xx_config_nvram_stage1()
6210 ha->tgt.saved_firmware_options_1 = in qlt_24xx_config_nvram_stage1()
6212 ha->tgt.saved_firmware_options_2 = in qlt_24xx_config_nvram_stage1()
6214 ha->tgt.saved_firmware_options_3 = in qlt_24xx_config_nvram_stage1()
6216 ha->tgt.saved_set = 1; in qlt_24xx_config_nvram_stage1()
6244 if (ha->tgt.saved_set) { in qlt_24xx_config_nvram_stage1()
6245 nv->exchange_count = ha->tgt.saved_exchange_count; in qlt_24xx_config_nvram_stage1()
6247 ha->tgt.saved_firmware_options_1; in qlt_24xx_config_nvram_stage1()
6249 ha->tgt.saved_firmware_options_2; in qlt_24xx_config_nvram_stage1()
6251 ha->tgt.saved_firmware_options_3; in qlt_24xx_config_nvram_stage1()
6259 if (ha->tgt.enable_class_2) { in qlt_24xx_config_nvram_stage1()
6277 struct qla_hw_data *ha = vha->hw; in qlt_24xx_config_nvram_stage2() local
6279 if (ha->tgt.node_name_set) { in qlt_24xx_config_nvram_stage2()
6280 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); in qlt_24xx_config_nvram_stage2()
6288 struct qla_hw_data *ha = vha->hw; in qlt_81xx_config_nvram_stage1() local
6294 if (!ha->tgt.saved_set) { in qlt_81xx_config_nvram_stage1()
6296 ha->tgt.saved_exchange_count = nv->exchange_count; in qlt_81xx_config_nvram_stage1()
6297 ha->tgt.saved_firmware_options_1 = in qlt_81xx_config_nvram_stage1()
6299 ha->tgt.saved_firmware_options_2 = in qlt_81xx_config_nvram_stage1()
6301 ha->tgt.saved_firmware_options_3 = in qlt_81xx_config_nvram_stage1()
6303 ha->tgt.saved_set = 1; in qlt_81xx_config_nvram_stage1()
6332 if (ha->tgt.saved_set) { in qlt_81xx_config_nvram_stage1()
6333 nv->exchange_count = ha->tgt.saved_exchange_count; in qlt_81xx_config_nvram_stage1()
6335 ha->tgt.saved_firmware_options_1; in qlt_81xx_config_nvram_stage1()
6337 ha->tgt.saved_firmware_options_2; in qlt_81xx_config_nvram_stage1()
6339 ha->tgt.saved_firmware_options_3; in qlt_81xx_config_nvram_stage1()
6347 if (ha->tgt.enable_class_2) { in qlt_81xx_config_nvram_stage1()
6365 struct qla_hw_data *ha = vha->hw; in qlt_81xx_config_nvram_stage2() local
6370 if (ha->tgt.node_name_set) { in qlt_81xx_config_nvram_stage2()
6371 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); in qlt_81xx_config_nvram_stage2()
6377 qlt_83xx_iospace_config(struct qla_hw_data *ha) in qlt_83xx_iospace_config() argument
6382 ha->msix_count += 1; /* For ATIO Q */ in qlt_83xx_iospace_config()
6413 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) in qlt_probe_one_stage1() argument
6418 if (ha->mqenable || IS_QLA83XX(ha)) { in qlt_probe_one_stage1()
6419 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; in qlt_probe_one_stage1()
6420 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; in qlt_probe_one_stage1()
6422 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; in qlt_probe_one_stage1()
6423 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; in qlt_probe_one_stage1()
6436 struct qla_hw_data *ha; in qla83xx_msix_atio_q() local
6440 ha = rsp->hw; in qla83xx_msix_atio_q()
6441 vha = pci_get_drvdata(ha->pdev); in qla83xx_msix_atio_q()
6443 spin_lock_irqsave(&ha->hardware_lock, flags); in qla83xx_msix_atio_q()
6448 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla83xx_msix_atio_q()
6454 qlt_mem_alloc(struct qla_hw_data *ha) in qlt_mem_alloc() argument
6459 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) * in qlt_mem_alloc()
6461 if (!ha->tgt.tgt_vp_map) in qlt_mem_alloc()
6464 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, in qlt_mem_alloc()
6465 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), in qlt_mem_alloc()
6466 &ha->tgt.atio_dma, GFP_KERNEL); in qlt_mem_alloc()
6467 if (!ha->tgt.atio_ring) { in qlt_mem_alloc()
6468 kfree(ha->tgt.tgt_vp_map); in qlt_mem_alloc()
6475 qlt_mem_free(struct qla_hw_data *ha) in qlt_mem_free() argument
6480 if (ha->tgt.atio_ring) { in qlt_mem_free()
6481 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * in qlt_mem_free()
6482 sizeof(struct atio_from_isp), ha->tgt.atio_ring, in qlt_mem_free()
6483 ha->tgt.atio_dma); in qlt_mem_free()
6485 kfree(ha->tgt.tgt_vp_map); in qlt_mem_free()